Spaces:
Running
on
Zero
Running
on
Zero
bugfix
Browse files- app.py +27 -18
- requirements.txt +7 -2
app.py
CHANGED
|
@@ -163,12 +163,16 @@ def run_flux(
|
|
| 163 |
progress
|
| 164 |
) -> Image.Image:
|
| 165 |
print("Running FLUX...")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 166 |
if lora_path and lora_weights:
|
| 167 |
with calculateDuration("load lora"):
|
| 168 |
print("start to load lora", lora_path, lora_weights)
|
| 169 |
-
pipe.unload_lora_weights()
|
| 170 |
pipe.load_lora_weights(lora_path, weight_name=lora_weights)
|
| 171 |
-
|
| 172 |
width, height = resolution_wh
|
| 173 |
if randomize_seed_checkbox:
|
| 174 |
seed_slicer = random.randint(0, MAX_SEED)
|
|
@@ -176,25 +180,30 @@ def run_flux(
|
|
| 176 |
|
| 177 |
with calculateDuration("run pipe"):
|
| 178 |
print("start to run pipe", prompt)
|
| 179 |
-
|
| 180 |
-
|
| 181 |
-
|
| 182 |
-
|
| 183 |
-
|
| 184 |
-
|
| 185 |
-
|
| 186 |
-
|
| 187 |
-
|
| 188 |
-
|
| 189 |
-
|
| 190 |
-
|
| 191 |
-
|
| 192 |
-
|
| 193 |
-
|
|
|
|
|
|
|
|
|
|
| 194 |
progress(99, "Generate image success!")
|
| 195 |
return generated_image
|
| 196 |
|
| 197 |
@spaces.GPU(duration=120)
|
|
|
|
|
|
|
| 198 |
def process(
|
| 199 |
image_url: str,
|
| 200 |
mask_url: str,
|
|
@@ -315,7 +324,7 @@ def process(
|
|
| 315 |
result["message"] = "generate image failed"
|
| 316 |
print(e)
|
| 317 |
generated_image = None
|
| 318 |
-
|
| 319 |
print("run flux finish")
|
| 320 |
if generated_image:
|
| 321 |
if upload_to_r2:
|
|
|
|
| 163 |
progress
|
| 164 |
) -> Image.Image:
|
| 165 |
print("Running FLUX...")
|
| 166 |
+
clear_cuda_cache()
|
| 167 |
+
# pipe.enable_vae_slicing()
|
| 168 |
+
# pipe.enable_vae_tiling()
|
| 169 |
+
pipe.enable_model_cpu_offload()
|
| 170 |
+
|
| 171 |
if lora_path and lora_weights:
|
| 172 |
with calculateDuration("load lora"):
|
| 173 |
print("start to load lora", lora_path, lora_weights)
|
|
|
|
| 174 |
pipe.load_lora_weights(lora_path, weight_name=lora_weights)
|
| 175 |
+
|
| 176 |
width, height = resolution_wh
|
| 177 |
if randomize_seed_checkbox:
|
| 178 |
seed_slicer = random.randint(0, MAX_SEED)
|
|
|
|
| 180 |
|
| 181 |
with calculateDuration("run pipe"):
|
| 182 |
print("start to run pipe", prompt)
|
| 183 |
+
|
| 184 |
+
with torch.inference_mode():
|
| 185 |
+
generated_image = pipe(
|
| 186 |
+
prompt=prompt,
|
| 187 |
+
image=image,
|
| 188 |
+
mask_image=mask,
|
| 189 |
+
control_image=control_image,
|
| 190 |
+
control_mode=control_mode,
|
| 191 |
+
controlnet_conditioning_scale=0.85,
|
| 192 |
+
width=width,
|
| 193 |
+
height=height,
|
| 194 |
+
strength=strength_slider,
|
| 195 |
+
generator=generator,
|
| 196 |
+
num_inference_steps=num_inference_steps_slider,
|
| 197 |
+
# max_sequence_length=256,
|
| 198 |
+
cross_attention_kwargs={"scale":0.5},
|
| 199 |
+
joint_attention_kwargs={"scale": lora_scale}
|
| 200 |
+
).images[0]
|
| 201 |
progress(99, "Generate image success!")
|
| 202 |
return generated_image
|
| 203 |
|
| 204 |
@spaces.GPU(duration=120)
|
| 205 |
+
@torch.inference_mode()
|
| 206 |
+
|
| 207 |
def process(
|
| 208 |
image_url: str,
|
| 209 |
mask_url: str,
|
|
|
|
| 324 |
result["message"] = "generate image failed"
|
| 325 |
print(e)
|
| 326 |
generated_image = None
|
| 327 |
+
clear_cuda_cache()
|
| 328 |
print("run flux finish")
|
| 329 |
if generated_image:
|
| 330 |
if upload_to_r2:
|
requirements.txt
CHANGED
|
@@ -1,6 +1,7 @@
|
|
| 1 |
accelerate
|
| 2 |
torch==2.0.1
|
| 3 |
-
|
|
|
|
| 4 |
tqdm
|
| 5 |
einops
|
| 6 |
spaces
|
|
@@ -13,4 +14,8 @@ peft
|
|
| 13 |
controlnet-aux
|
| 14 |
mediapipe
|
| 15 |
kornia
|
| 16 |
-
xformers
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
accelerate
|
| 2 |
torch==2.0.1
|
| 3 |
+
torchvision==0.15.2
|
| 4 |
+
transformers==4.43.3
|
| 5 |
tqdm
|
| 6 |
einops
|
| 7 |
spaces
|
|
|
|
| 14 |
controlnet-aux
|
| 15 |
mediapipe
|
| 16 |
kornia
|
| 17 |
+
xformers
|
| 18 |
+
einops
|
| 19 |
+
onnxruntime-gpu
|
| 20 |
+
omegaconf
|
| 21 |
+
scipy
|