Spaces:
Runtime error
Runtime error
lixiang46
commited on
Commit
·
20df108
1
Parent(s):
e9c3996
update
Browse files
app.py
CHANGED
|
@@ -21,19 +21,20 @@ text_encoder = ChatGLMModel.from_pretrained(f'{ckpt_dir}/text_encoder', torch_dt
|
|
| 21 |
tokenizer = ChatGLMTokenizer.from_pretrained(f'{ckpt_dir}/text_encoder')
|
| 22 |
vae = AutoencoderKL.from_pretrained(f"{ckpt_dir}/vae", revision=None).half()
|
| 23 |
scheduler = EulerDiscreteScheduler.from_pretrained(f"{ckpt_dir}/scheduler")
|
| 24 |
-
unet_t2i = UNet2DConditionModel.from_pretrained(f"{ckpt_dir}/unet", revision=None).half()
|
| 25 |
unet_i2i = unet_2d_condition.UNet2DConditionModel.from_pretrained(f"{ckpt_dir}/unet", revision=None).half()
|
| 26 |
image_encoder = CLIPVisionModelWithProjection.from_pretrained(f'{ckpt_IPA_dir}/image_encoder',ignore_mismatched_sizes=True).to(dtype=torch.float16, device=device)
|
| 27 |
ip_img_size = 336
|
| 28 |
clip_image_processor = CLIPImageProcessor(size=ip_img_size, crop_size=ip_img_size)
|
| 29 |
|
| 30 |
-
pipe_t2i = pipeline_stable_diffusion_xl_chatglm_256.StableDiffusionXLPipeline(
|
| 31 |
-
|
| 32 |
-
|
| 33 |
-
|
| 34 |
-
|
| 35 |
-
|
| 36 |
-
|
|
|
|
| 37 |
|
| 38 |
pipe_i2i = pipeline_stable_diffusion_xl_chatglm_256_ipadapter.StableDiffusionXLPipeline(
|
| 39 |
vae=vae,
|
|
@@ -49,7 +50,7 @@ pipe_i2i = pipeline_stable_diffusion_xl_chatglm_256_ipadapter.StableDiffusionXLP
|
|
| 49 |
if hasattr(pipe_i2i.unet, 'encoder_hid_proj'):
|
| 50 |
pipe_i2i.unet.text_encoder_hid_proj = pipe_i2i.unet.encoder_hid_proj
|
| 51 |
|
| 52 |
-
pipe_i2i.load_ip_adapter(
|
| 53 |
|
| 54 |
MAX_SEED = np.iinfo(np.int32).max
|
| 55 |
MAX_IMAGE_SIZE = 2048
|
|
@@ -60,7 +61,7 @@ def infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance
|
|
| 60 |
generator = torch.Generator().manual_seed(seed)
|
| 61 |
|
| 62 |
if ip_adapter_image is None:
|
| 63 |
-
image =
|
| 64 |
prompt = prompt,
|
| 65 |
negative_prompt = negative_prompt,
|
| 66 |
guidance_scale = guidance_scale,
|
|
@@ -118,10 +119,9 @@ with gr.Blocks(css=css) as demo:
|
|
| 118 |
with gr.Row():
|
| 119 |
with gr.Column(elem_id="col-left"):
|
| 120 |
with gr.Row():
|
| 121 |
-
prompt = gr.
|
| 122 |
label="Prompt",
|
| 123 |
show_label=False,
|
| 124 |
-
max_lines=1,
|
| 125 |
placeholder="Enter your prompt",
|
| 126 |
container=False,
|
| 127 |
)
|
|
@@ -129,9 +129,8 @@ with gr.Blocks(css=css) as demo:
|
|
| 129 |
with gr.Row():
|
| 130 |
ip_adapter_image = gr.Image(label="IP-Adapter Image (optional)", type="pil")
|
| 131 |
with gr.Accordion("Advanced Settings", open=False):
|
| 132 |
-
negative_prompt = gr.
|
| 133 |
label="Negative prompt",
|
| 134 |
-
max_lines=1,
|
| 135 |
placeholder="Enter a negative prompt",
|
| 136 |
visible=True,
|
| 137 |
)
|
|
|
|
| 21 |
tokenizer = ChatGLMTokenizer.from_pretrained(f'{ckpt_dir}/text_encoder')
|
| 22 |
vae = AutoencoderKL.from_pretrained(f"{ckpt_dir}/vae", revision=None).half()
|
| 23 |
scheduler = EulerDiscreteScheduler.from_pretrained(f"{ckpt_dir}/scheduler")
|
| 24 |
+
# unet_t2i = UNet2DConditionModel.from_pretrained(f"{ckpt_dir}/unet", revision=None).half()
|
| 25 |
unet_i2i = unet_2d_condition.UNet2DConditionModel.from_pretrained(f"{ckpt_dir}/unet", revision=None).half()
|
| 26 |
image_encoder = CLIPVisionModelWithProjection.from_pretrained(f'{ckpt_IPA_dir}/image_encoder',ignore_mismatched_sizes=True).to(dtype=torch.float16, device=device)
|
| 27 |
ip_img_size = 336
|
| 28 |
clip_image_processor = CLIPImageProcessor(size=ip_img_size, crop_size=ip_img_size)
|
| 29 |
|
| 30 |
+
# pipe_t2i = pipeline_stable_diffusion_xl_chatglm_256.StableDiffusionXLPipeline(
|
| 31 |
+
# vae=vae,
|
| 32 |
+
# text_encoder=text_encoder,
|
| 33 |
+
# tokenizer=tokenizer,
|
| 34 |
+
# unet=unet_t2i,
|
| 35 |
+
# scheduler=scheduler,
|
| 36 |
+
# force_zeros_for_empty_prompt=False
|
| 37 |
+
# ).to(device)
|
| 38 |
|
| 39 |
pipe_i2i = pipeline_stable_diffusion_xl_chatglm_256_ipadapter.StableDiffusionXLPipeline(
|
| 40 |
vae=vae,
|
|
|
|
| 50 |
if hasattr(pipe_i2i.unet, 'encoder_hid_proj'):
|
| 51 |
pipe_i2i.unet.text_encoder_hid_proj = pipe_i2i.unet.encoder_hid_proj
|
| 52 |
|
| 53 |
+
pipe_i2i.load_ip_adapter(f'{ckpt_IPA_dir}' , subfolder="", weight_name=["ip_adapter_plus_general.bin"])
|
| 54 |
|
| 55 |
MAX_SEED = np.iinfo(np.int32).max
|
| 56 |
MAX_IMAGE_SIZE = 2048
|
|
|
|
| 61 |
generator = torch.Generator().manual_seed(seed)
|
| 62 |
|
| 63 |
if ip_adapter_image is None:
|
| 64 |
+
image = pipe_i2i(
|
| 65 |
prompt = prompt,
|
| 66 |
negative_prompt = negative_prompt,
|
| 67 |
guidance_scale = guidance_scale,
|
|
|
|
| 119 |
with gr.Row():
|
| 120 |
with gr.Column(elem_id="col-left"):
|
| 121 |
with gr.Row():
|
| 122 |
+
prompt = gr.Textbox(
|
| 123 |
label="Prompt",
|
| 124 |
show_label=False,
|
|
|
|
| 125 |
placeholder="Enter your prompt",
|
| 126 |
container=False,
|
| 127 |
)
|
|
|
|
| 129 |
with gr.Row():
|
| 130 |
ip_adapter_image = gr.Image(label="IP-Adapter Image (optional)", type="pil")
|
| 131 |
with gr.Accordion("Advanced Settings", open=False):
|
| 132 |
+
negative_prompt = gr.Textbox(
|
| 133 |
label="Negative prompt",
|
|
|
|
| 134 |
placeholder="Enter a negative prompt",
|
| 135 |
visible=True,
|
| 136 |
)
|