Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -14,6 +14,7 @@ import sys
|
|
| 14 |
import tempfile
|
| 15 |
from typing import Sequence, Mapping, Any, Union
|
| 16 |
import asyncio
|
|
|
|
| 17 |
|
| 18 |
# Copy functions from FluxSimpleUpscaler.txt
|
| 19 |
def get_value_at_index(obj: Union[Sequence, Mapping], index: int) -> Any:
|
|
@@ -61,7 +62,7 @@ def import_custom_nodes() -> None:
|
|
| 61 |
asyncio.set_event_loop(loop)
|
| 62 |
server_instance = server.PromptServer(loop)
|
| 63 |
execution.PromptQueue(server_instance)
|
| 64 |
-
init_extra_nodes()
|
| 65 |
|
| 66 |
# Setup ComfyUI and custom nodes
|
| 67 |
if not os.path.exists("ComfyUI"):
|
|
@@ -80,6 +81,7 @@ os.makedirs("ComfyUI/models/diffusion_models", exist_ok=True)
|
|
| 80 |
os.makedirs("ComfyUI/models/clip", exist_ok=True)
|
| 81 |
os.makedirs("ComfyUI/models/vae", exist_ok=True)
|
| 82 |
os.makedirs("ComfyUI/models/upscale_models", exist_ok=True)
|
|
|
|
| 83 |
|
| 84 |
# Download models if not present
|
| 85 |
diffusion_path = "ComfyUI/models/diffusion_models/flux1-dev-fp8.safetensors"
|
|
@@ -195,7 +197,12 @@ def enhance_image(
|
|
| 195 |
|
| 196 |
with tempfile.NamedTemporaryFile(suffix=".png", delete=False) as tmp:
|
| 197 |
input_image.save(tmp.name)
|
| 198 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 199 |
|
| 200 |
with torch.inference_mode():
|
| 201 |
dualcliploader = NODE_CLASS_MAPPINGS["DualCLIPLoader"]()
|
|
@@ -230,7 +237,7 @@ def enhance_image(
|
|
| 230 |
)
|
| 231 |
|
| 232 |
loadimage = NODE_CLASS_MAPPINGS["LoadImage"]()
|
| 233 |
-
loadimage_res = loadimage.load_image(image=
|
| 234 |
|
| 235 |
fluxguidance = NODE_CLASS_MAPPINGS["FluxGuidance"]()
|
| 236 |
fluxguidance_res = fluxguidance.append(
|
|
@@ -269,7 +276,8 @@ def enhance_image(
|
|
| 269 |
output_tensor = get_value_at_index(usd_res, 0)
|
| 270 |
image = tensor_to_pil(output_tensor)
|
| 271 |
|
| 272 |
-
os.unlink(
|
|
|
|
| 273 |
|
| 274 |
target_w, target_h = w_original * upscale_factor, h_original * upscale_factor
|
| 275 |
if image.size != (target_w, target_h):
|
|
|
|
| 14 |
import tempfile
|
| 15 |
from typing import Sequence, Mapping, Any, Union
|
| 16 |
import asyncio
|
| 17 |
+
import shutil
|
| 18 |
|
| 19 |
# Copy functions from FluxSimpleUpscaler.txt
|
| 20 |
def get_value_at_index(obj: Union[Sequence, Mapping], index: int) -> Any:
|
|
|
|
| 62 |
asyncio.set_event_loop(loop)
|
| 63 |
server_instance = server.PromptServer(loop)
|
| 64 |
execution.PromptQueue(server_instance)
|
| 65 |
+
loop.run_until_complete(init_extra_nodes())
|
| 66 |
|
| 67 |
# Setup ComfyUI and custom nodes
|
| 68 |
if not os.path.exists("ComfyUI"):
|
|
|
|
| 81 |
os.makedirs("ComfyUI/models/clip", exist_ok=True)
|
| 82 |
os.makedirs("ComfyUI/models/vae", exist_ok=True)
|
| 83 |
os.makedirs("ComfyUI/models/upscale_models", exist_ok=True)
|
| 84 |
+
os.makedirs("ComfyUI/input", exist_ok=True)
|
| 85 |
|
| 86 |
# Download models if not present
|
| 87 |
diffusion_path = "ComfyUI/models/diffusion_models/flux1-dev-fp8.safetensors"
|
|
|
|
| 197 |
|
| 198 |
with tempfile.NamedTemporaryFile(suffix=".png", delete=False) as tmp:
|
| 199 |
input_image.save(tmp.name)
|
| 200 |
+
temp_path = tmp.name
|
| 201 |
+
|
| 202 |
+
image_base = os.path.basename(temp_path)
|
| 203 |
+
input_dir = find_path("input")
|
| 204 |
+
input_image_path = os.path.join(input_dir, image_base)
|
| 205 |
+
shutil.copy(temp_path, input_image_path)
|
| 206 |
|
| 207 |
with torch.inference_mode():
|
| 208 |
dualcliploader = NODE_CLASS_MAPPINGS["DualCLIPLoader"]()
|
|
|
|
| 237 |
)
|
| 238 |
|
| 239 |
loadimage = NODE_CLASS_MAPPINGS["LoadImage"]()
|
| 240 |
+
loadimage_res = loadimage.load_image(image=image_base)
|
| 241 |
|
| 242 |
fluxguidance = NODE_CLASS_MAPPINGS["FluxGuidance"]()
|
| 243 |
fluxguidance_res = fluxguidance.append(
|
|
|
|
| 276 |
output_tensor = get_value_at_index(usd_res, 0)
|
| 277 |
image = tensor_to_pil(output_tensor)
|
| 278 |
|
| 279 |
+
os.unlink(input_image_path)
|
| 280 |
+
os.unlink(temp_path)
|
| 281 |
|
| 282 |
target_w, target_h = w_original * upscale_factor, h_original * upscale_factor
|
| 283 |
if image.size != (target_w, target_h):
|