Spaces:
Runtime error
Runtime error
push
Browse files- app.py +10 -5
- attention_mask_vis.png +0 -0
app.py
CHANGED
|
@@ -62,13 +62,16 @@ pipe.load_lora_weights(
|
|
| 62 |
pipe.load_lora_weights("xey/sldr_flux_nsfw_v2-studio", weight_name="sldr_flux_nsfw_v2-studio.safetensors", adapter_name="sldr")
|
| 63 |
pipe.set_adapters(["hyper-sd", "hina", "sf5", "sldr"], adapter_weights=[0.25, 0.5, 0.25, 0.25])
|
| 64 |
pipe.fuse_lora(lora_scale=0.8)
|
| 65 |
-
pipe.unload_lora_weights()
|
| 66 |
|
| 67 |
pipe.transformer.to(torch.bfloat16)
|
| 68 |
pipe.controlnet.to(torch.bfloat16)
|
|
|
|
|
|
|
|
|
|
|
|
|
| 69 |
# pipe.to("cuda")
|
| 70 |
pipe.enable_model_cpu_offload()
|
| 71 |
-
|
| 72 |
|
| 73 |
def create_mask_from_editor(editor_value):
|
| 74 |
"""
|
|
@@ -158,7 +161,7 @@ def inpaint_image(image, prompt, subject, editor_value):
|
|
| 158 |
|
| 159 |
subject_name=subject
|
| 160 |
target_text_prompt=prompt
|
| 161 |
-
prompt_final=f'A two side-by-side image of
|
| 162 |
|
| 163 |
# Convert attention mask to PIL image format
|
| 164 |
# Take first head's mask after prompt tokens (shape is now H*W x H*W)
|
|
@@ -179,7 +182,7 @@ def inpaint_image(image, prompt, subject, editor_value):
|
|
| 179 |
control_mask=mask,
|
| 180 |
num_inference_steps=8,
|
| 181 |
generator=generator,
|
| 182 |
-
controlnet_conditioning_scale=0.
|
| 183 |
guidance_scale=3.5,
|
| 184 |
negative_prompt="",
|
| 185 |
true_guidance_scale=1.0,
|
|
@@ -195,8 +198,10 @@ with gr.Blocks() as iface:
|
|
| 195 |
with gr.Row():
|
| 196 |
with gr.Column():
|
| 197 |
with gr.Row():
|
| 198 |
-
|
|
|
|
| 199 |
with gr.Row():
|
|
|
|
| 200 |
subject = gr.Textbox(lines=1, placeholder="Enter your subject", label="Subject")
|
| 201 |
prompt = gr.Textbox(lines=2, placeholder="Enter your prompt here (e.g., 'wearing a christmas hat, in a busy street')", label="Prompt")
|
| 202 |
with gr.Column():
|
|
|
|
| 62 |
pipe.load_lora_weights("xey/sldr_flux_nsfw_v2-studio", weight_name="sldr_flux_nsfw_v2-studio.safetensors", adapter_name="sldr")
|
| 63 |
pipe.set_adapters(["hyper-sd", "hina", "sf5", "sldr"], adapter_weights=[0.25, 0.5, 0.25, 0.25])
|
| 64 |
pipe.fuse_lora(lora_scale=0.8)
|
|
|
|
| 65 |
|
| 66 |
pipe.transformer.to(torch.bfloat16)
|
| 67 |
pipe.controlnet.to(torch.bfloat16)
|
| 68 |
+
pipe.push_to_hub("FLUX.1-Inpainting-8step_uncensored", private=True, token=HF_TOKEN)
|
| 69 |
+
|
| 70 |
+
# pipe.unload_lora_weights()
|
| 71 |
+
|
| 72 |
# pipe.to("cuda")
|
| 73 |
pipe.enable_model_cpu_offload()
|
| 74 |
+
print(pipe.hf_device_map)
|
| 75 |
|
| 76 |
def create_mask_from_editor(editor_value):
|
| 77 |
"""
|
|
|
|
| 161 |
|
| 162 |
subject_name=subject
|
| 163 |
target_text_prompt=prompt
|
| 164 |
+
prompt_final=f'A two side-by-side image of {subject_name}. LEFT: a photo of {subject_name}; RIGHT: a photo of {subject_name} {target_text_prompt}.'
|
| 165 |
|
| 166 |
# Convert attention mask to PIL image format
|
| 167 |
# Take first head's mask after prompt tokens (shape is now H*W x H*W)
|
|
|
|
| 182 |
control_mask=mask,
|
| 183 |
num_inference_steps=8,
|
| 184 |
generator=generator,
|
| 185 |
+
controlnet_conditioning_scale=0.6,
|
| 186 |
guidance_scale=3.5,
|
| 187 |
negative_prompt="",
|
| 188 |
true_guidance_scale=1.0,
|
|
|
|
| 198 |
with gr.Row():
|
| 199 |
with gr.Column():
|
| 200 |
with gr.Row():
|
| 201 |
+
with gr.Accordion():
|
| 202 |
+
input_image = gr.Image(type="filepath", label="Upload Image")
|
| 203 |
with gr.Row():
|
| 204 |
+
prompt_preview = gr.Textbox(value="A two side-by-side image of 'subject_name'. LEFT: a photo of 'subject_name'; RIGHT: a photo of 'subject_name' 'target_text_prompt'", interactive=False)
|
| 205 |
subject = gr.Textbox(lines=1, placeholder="Enter your subject", label="Subject")
|
| 206 |
prompt = gr.Textbox(lines=2, placeholder="Enter your prompt here (e.g., 'wearing a christmas hat, in a busy street')", label="Prompt")
|
| 207 |
with gr.Column():
|
attention_mask_vis.png
ADDED
|