comrender commited on
Commit
f6368ba
Β·
verified Β·
1 Parent(s): 7bda99d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +442 -14
app.py CHANGED
@@ -1,3 +1,79 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  # Create model directories
2
  os.makedirs("ComfyUI/models/diffusion_models", exist_ok=True)
3
  os.makedirs("ComfyUI/models/clip", exist_ok=True)
@@ -29,25 +105,377 @@ esrgan_x4_path = "ComfyUI/models/upscale_models/RealESRGAN_x4.pth"
29
  if not os.path.exists(esrgan_x4_path):
30
  hf_hub_download("ai-forever/Real-ESRGAN", "RealESRGAN_x4.pth", local_dir="ComfyUI/models/upscale_models")
31
 
32
- # ...
33
-
34
  add_comfyui_directory_to_sys_path()
35
  add_extra_model_paths()
36
-
37
  from folder_paths import add_model_folder_path
38
- add_model_folder_path("checkpoints", "ComfyUI/models/diffusion_models")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
39
 
40
- # ...
 
 
41
 
42
- checkpointloader = NODE_CLASS_MAPPINGS["CheckpointLoaderSimple"]()
43
- checkpointloader_res = checkpointloader.load_checkpoint(
44
- ckpt_name="flux1-dev-fp8.safetensors"
 
 
 
45
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
46
 
47
- # ...
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
48
 
49
- ultimatesdupscale_50 = ultimatesdupscale.upscale(
50
- # ...
51
- model=get_value_at_index(checkpointloader_res, 0),
52
- # ...
53
- )
 
1
+ import logging
2
+ import random
3
+ import warnings
4
+ import os
5
+ import gradio as gr
6
+ import numpy as np
7
+ import spaces
8
+ import torch
9
+ from gradio_imageslider import ImageSlider
10
+ from PIL import Image
11
+ from huggingface_hub import hf_hub_download
12
+ import subprocess
13
+ import sys
14
+ import tempfile
15
+ from typing import Sequence, Mapping, Any, Union
16
+ import asyncio
17
+ import execution
18
+ from nodes import init_extra_nodes
19
+ import server
20
+
21
+ # Copy functions from FluxSimpleUpscaler.txt
22
+ def get_value_at_index(obj: Union[Sequence, Mapping], index: int) -> Any:
23
+ try:
24
+ return obj[index]
25
+ except KeyError:
26
+ return obj["result"][index]
27
+
28
+ def find_path(name: str, path: str = None) -> str:
29
+ if path is None:
30
+ path = os.getcwd()
31
+ if name in os.listdir(path):
32
+ path_name = os.path.join(path, name)
33
+ print(f"{name} found: {path_name}")
34
+ return path_name
35
+ parent_directory = os.path.dirname(path)
36
+ if parent_directory == path:
37
+ return None
38
+ return find_path(name, parent_directory)
39
+
40
+ def add_comfyui_directory_to_sys_path() -> None:
41
+ comfyui_path = find_path("ComfyUI")
42
+ if comfyui_path is not None and os.path.isdir(comfyui_path):
43
+ sys.path.append(comfyui_path)
44
+ print(f"'{comfyui_path}' added to sys.path")
45
+
46
+ def add_extra_model_paths() -> None:
47
+ try:
48
+ from main import load_extra_path_config
49
+ except ImportError:
50
+ print("Could not import load_extra_path_config from main.py. Looking in utils.extra_config instead.")
51
+ from utils.extra_config import load_extra_path_config
52
+ extra_model_paths = find_path("extra_model_paths.yaml")
53
+ if extra_model_paths is not None:
54
+ load_extra_path_config(extra_model_paths)
55
+ else:
56
+ print("Could not find the extra_model_paths config file.")
57
+
58
+ def import_custom_nodes() -> None:
59
+ import asyncio
60
+ import execution
61
+ from nodes import init_extra_nodes
62
+ import server
63
+ loop = asyncio.new_event_loop()
64
+ asyncio.set_event_loop(loop)
65
+ server_instance = server.PromptServer(loop)
66
+ execution.PromptQueue(server_instance)
67
+ init_extra_nodes()
68
+
69
+ # Setup ComfyUI and custom nodes
70
+ if not os.path.exists("ComfyUI"):
71
+ subprocess.run(["git", "clone", "https://github.com/comfyanonymous/ComfyUI.git"])
72
+
73
+ custom_node_path = "ComfyUI/custom_nodes/ComfyUI_UltimateSDUpscale"
74
+ if not os.path.exists(custom_node_path):
75
+ subprocess.run(["git", "clone", "https://github.com/ssitu/ComfyUI_UltimateSDUpscale.git", custom_node_path])
76
+
77
  # Create model directories
78
  os.makedirs("ComfyUI/models/diffusion_models", exist_ok=True)
79
  os.makedirs("ComfyUI/models/clip", exist_ok=True)
 
105
  if not os.path.exists(esrgan_x4_path):
106
  hf_hub_download("ai-forever/Real-ESRGAN", "RealESRGAN_x4.pth", local_dir="ComfyUI/models/upscale_models")
107
 
108
+ # Add ComfyUI to path and import custom nodes
 
109
  add_comfyui_directory_to_sys_path()
110
  add_extra_model_paths()
 
111
  from folder_paths import add_model_folder_path
112
+ add_model_folder_path("unet", "ComfyUI/models/diffusion_models")
113
+ import_custom_nodes()
114
+
115
+ from nodes import NODE_CLASS_MAPPINGS
116
+
117
+ css = """
118
+ #col-container {
119
+ margin: 0 auto;
120
+ max-width: 800px;
121
+ }
122
+ .main-header {
123
+ text-align: center;
124
+ margin-bottom: 2rem;
125
+ }
126
+ """
127
+
128
+ MAX_SEED = 1000000
129
+ MAX_PIXEL_BUDGET = 8192 * 8192
130
+
131
+ def make_divisible_by_16(size):
132
+ return ((size // 16) * 16) if (size % 16) < 8 else ((size // 16 + 1) * 16)
133
+
134
+ def process_input(input_image, upscale_factor):
135
+ w, h = input_image.size
136
+ w_original, h_original = w, h
137
+
138
+ was_resized = False
139
+
140
+ if w * h * upscale_factor**2 > MAX_PIXEL_BUDGET:
141
+ gr.Info(f"Requested output image is too large. Resizing input to fit within pixel budget.")
142
+ target_input_pixels = MAX_PIXEL_BUDGET / (upscale_factor ** 2)
143
+ scale = (target_input_pixels / (w * h)) ** 0.5
144
+ new_w = max(16, int(w * scale) // 16 * 16)
145
+ new_h = max(16, int(h * scale) // 16 * 16)
146
+ input_image = input_image.resize((new_w, new_h), resample=Image.LANCZOS)
147
+ was_resized = True
148
+
149
+ return input_image, w_original, h_original, was_resized
150
+
151
+ import requests
152
+ def load_image_from_url(url):
153
+ try:
154
+ response = requests.get(url, stream=True)
155
+ response.raise_for_status()
156
+ return Image.open(response.raw)
157
+ except Exception as e:
158
+ raise gr.Error(f"Failed to load image from URL: {e}")
159
+
160
+ def tensor_to_pil(tensor):
161
+ tensor = tensor.cpu().clamp(0, 1) * 255
162
+ img = tensor.numpy().astype(np.uint8)[0]
163
+ return Image.fromarray(img)
164
+
165
+ @spaces.GPU(duration=120)
166
+ def enhance_image(
167
+ image_input,
168
+ image_url,
169
+ seed,
170
+ randomize_seed,
171
+ num_inference_steps,
172
+ upscale_factor,
173
+ denoising_strength,
174
+ custom_prompt,
175
+ tile_size,
176
+ progress=gr.Progress(track_tqdm=True),
177
+ ):
178
+ if image_input is not None:
179
+ true_input_image = image_input
180
+ elif image_url:
181
+ true_input_image = load_image_from_url(image_url)
182
+ else:
183
+ raise gr.Error("Please provide an image (upload or URL)")
184
+
185
+ if randomize_seed:
186
+ seed = random.randint(0, MAX_SEED)
187
+
188
+ input_image, w_original, h_original, was_resized = process_input(true_input_image, upscale_factor)
189
+
190
+ if upscale_factor == 2:
191
+ upscale_model_name = "RealESRGAN_x2.pth"
192
+ else:
193
+ upscale_model_name = "RealESRGAN_x4.pth"
194
 
195
+ with tempfile.NamedTemporaryFile(suffix=".png", delete=False) as tmp:
196
+ input_image.save(tmp.name)
197
+ image_path = tmp.name
198
 
199
+ with torch.inference_mode():
200
+ dualcliploader = NODE_CLASS_MAPPINGS["DualCLIPLoader"]()
201
+ dualcliploader_res = dualcliploader.load_clip(
202
+ clip_name1="clip_l.safetensors",
203
+ clip_name2="t5xxl_fp8_e4m3fn.safetensors",
204
+ type="flux",
205
  )
206
+ clip = get_value_at_index(dualcliploader_res, 0)
207
+
208
+ cliptextencode = NODE_CLASS_MAPPINGS["CLIPTextEncode"]()
209
+ positive_res = cliptextencode.encode(
210
+ text=custom_prompt,
211
+ clip=clip
212
+ )
213
+ negative_res = cliptextencode.encode(
214
+ text="",
215
+ clip=clip
216
+ )
217
+
218
+ upscalemodelloader = NODE_CLASS_MAPPINGS["UpscaleModelLoader"]()
219
+ upscalemodelloader_res = upscalemodelloader.load_model(
220
+ model_name=upscale_model_name
221
+ )
222
+
223
+ vaeloader = NODE_CLASS_MAPPINGS["VAELoader"]()
224
+ vaeloader_res = vaeloader.load_vae(vae_name="ae.safetensors")
225
+
226
+ unetloader = NODE_CLASS_MAPPINGS["UNETLoader"]()
227
+ unetloader_res = unetloader.load_unet(
228
+ unet_name="flux1-dev-fp8.safetensors", weight_dtype="fp8_e4m3fn"
229
+ )
230
+
231
+ loadimage = NODE_CLASS_MAPPINGS["LoadImage"]()
232
+ loadimage_res = loadimage.load_image(image=os.path.basename(image_path))
233
+
234
+ fluxguidance = NODE_CLASS_MAPPINGS["FluxGuidance"]()
235
+ fluxguidance_res = fluxguidance.append(
236
+ guidance=30, conditioning=get_value_at_index(positive_res, 0)
237
+ )
238
+
239
+ ultimatesdupscale = NODE_CLASS_MAPPINGS["UltimateSDUpscale"]()
240
+ usd_res = ultimatesdupscale.upscale(
241
+ upscale_by=upscale_factor,
242
+ seed=seed,
243
+ steps=num_inference_steps,
244
+ cfg=1,
245
+ sampler_name="euler",
246
+ scheduler="normal",
247
+ denoise=denoising_strength,
248
+ mode_type="Linear",
249
+ tile_width=tile_size,
250
+ tile_height=tile_size,
251
+ mask_blur=8,
252
+ tile_padding=32,
253
+ seam_fix_mode="None",
254
+ seam_fix_denoise=1,
255
+ seam_fix_width=64,
256
+ seam_fix_mask_blur=8,
257
+ seam_fix_padding=16,
258
+ force_uniform_tiles=True,
259
+ tiled_decode=False,
260
+ image=get_value_at_index(loadimage_res, 0),
261
+ model=get_value_at_index(unetloader_res, 0),
262
+ positive=get_value_at_index(fluxguidance_res, 0),
263
+ negative=get_value_at_index(negative_res, 0),
264
+ vae=get_value_at_index(vaeloader_res, 0),
265
+ upscale_model=get_value_at_index(upscalemodelloader_res, 0),
266
+ )
267
+
268
+ output_tensor = get_value_at_index(usd_res, 0)
269
+ image = tensor_to_pil(output_tensor)
270
+
271
+ os.unlink(image_path)
272
+
273
+ target_w, target_h = w_original * upscale_factor, h_original * upscale_factor
274
+ if image.size != (target_w, target_h):
275
+ image = image.resize((target_w, target_h), resample=Image.LANCZOS)
276
+
277
+ if was_resized:
278
+ gr.Info(f"Resizing output to target size: {target_w}x{target_h}")
279
+ image = image.resize((target_w, target_h), resample=Image.LANCZOS)
280
+
281
+ resized_input = true_input_image.resize(image.size, resample=Image.LANCZOS)
282
+
283
+ return [resized_input, image]
284
+
285
+ with gr.Blocks(css=css, title="🎨 AI Image Upscaler - FLUX ComfyUI") as demo:
286
+ gr.HTML("""
287
+ <div class="main-header">
288
+ <h1>🎨 AI Image Upscaler (ComfyUI Workflow)</h1>
289
+ <p>Upload an image or provide a URL to upscale it using FLUX FP8 with ComfyUI Ultimate SD Upscale</p>
290
+ <p>Using FLUX.1-dev FP8 model</p>
291
+ </div>
292
+ """)
293
+
294
+ with gr.Row():
295
+ with gr.Column(scale=1):
296
+ gr.HTML("<h3>πŸ“€ Input</h3>")
297
+
298
+ with gr.Tabs():
299
+ with gr.TabItem("πŸ“ Upload Image"):
300
+ input_image = gr.Image(
301
+ label="Upload Image",
302
+ type="pil",
303
+ height=200
304
+ )
305
+
306
+ with gr.TabItem("πŸ”— Image URL"):
307
+ image_url = gr.Textbox(
308
+ label="Image URL",
309
+ placeholder="https://example.com/image.jpg",
310
+ value="https://upload.wikimedia.org/wikipedia/commons/thumb/a/a7/Example.jpg/800px-Example.jpg"
311
+ )
312
+
313
+ gr.HTML("<h3>πŸŽ›οΈ Prompt Settings</h3>")
314
+
315
+ custom_prompt = gr.Textbox(
316
+ label="Custom Prompt (optional)",
317
+ placeholder="Enter custom prompt or leave empty",
318
+ lines=2
319
+ )
320
+
321
+ gr.HTML("<h3>βš™οΈ Upscaling Settings</h3>")
322
+
323
+ upscale_factor = gr.Slider(
324
+ label="Upscale Factor",
325
+ minimum=1,
326
+ maximum=4,
327
+ step=1,
328
+ value=2,
329
+ info="How much to upscale the image"
330
+ )
331
+
332
+ num_inference_steps = gr.Slider(
333
+ label="Number of Inference Steps",
334
+ minimum=1,
335
+ maximum=50,
336
+ step=1,
337
+ value=25,
338
+ info="More steps = better quality but slower"
339
+ )
340
+
341
+ denoising_strength = gr.Slider(
342
+ label="Denoising Strength",
343
+ minimum=0.0,
344
+ maximum=1.0,
345
+ step=0.05,
346
+ value=0.3,
347
+ info="Controls how much the image is transformed"
348
+ )
349
+
350
+ tile_size = gr.Slider(
351
+ label="Tile Size",
352
+ minimum=256,
353
+ maximum=2048,
354
+ step=64,
355
+ value=1024,
356
+ info="Size of tiles for processing (larger = faster but more memory)"
357
+ )
358
+
359
+ with gr.Row():
360
+ randomize_seed = gr.Checkbox(
361
+ label="Randomize seed",
362
+ value=True
363
+ )
364
+ seed = gr.Slider(
365
+ label="Seed",
366
+ minimum=0,
367
+ maximum=MAX_SEED,
368
+ step=1,
369
+ value=42,
370
+ interactive=True
371
+ )
372
+
373
+ enhance_btn = gr.Button(
374
+ "πŸš€ Upscale Image",
375
+ variant="primary",
376
+ size="lg"
377
+ )
378
+
379
+ with gr.Column(scale=2):
380
+ gr.HTML("<h3>πŸ“Š Results</h3>")
381
+
382
+ result_slider = ImageSlider(
383
+ type="pil",
384
+ interactive=False,
385
+ height=600,
386
+ elem_id="result_slider",
387
+ label=None
388
+ )
389
 
390
+ enhance_btn.click(
391
+ fn=enhance_image,
392
+ inputs=[
393
+ input_image,
394
+ image_url,
395
+ seed,
396
+ randomize_seed,
397
+ num_inference_steps,
398
+ upscale_factor,
399
+ denoising_strength,
400
+ custom_prompt,
401
+ tile_size
402
+ ],
403
+ outputs=[result_slider]
404
+ )
405
+
406
+ gr.HTML("""
407
+ <div style="margin-top: 2rem; padding: 1rem; background: #f0f0f0; border-radius: 8px;">
408
+ <p><strong>Note:</strong> This upscaler uses the Flux.1-dev model. Users are responsible for obtaining commercial rights if used commercially under their license.</p>
409
+ </div>
410
+ """)
411
+
412
+ gr.HTML("""
413
+ <style>
414
+ #result_slider .slider {
415
+ width: 100% !important;
416
+ max-width: inherit !important;
417
+ }
418
+ #result_slider img {
419
+ object-fit: contain !important;
420
+ width: 100% !important;
421
+ height: auto !important;
422
+ }
423
+ #result_slider .gr-button-tool {
424
+ display: none !important;
425
+ }
426
+ #result_slider .gr-button-undo {
427
+ display: none !important;
428
+ }
429
+ #result_slider .gr-button-clear {
430
+ display: none !important;
431
+ }
432
+ #result_slider .badge-container .badge {
433
+ display: none !important;
434
+ }
435
+ #result_slider .badge-container::before {
436
+ content: "Before";
437
+ position: absolute;
438
+ top: 10px;
439
+ left: 10px;
440
+ background: rgba(0,0,0,0.5);
441
+ color: white;
442
+ padding: 5px;
443
+ border-radius: 5px;
444
+ z-index: 10;
445
+ }
446
+ #result_slider .badge-container::after {
447
+ content: "After";
448
+ position: absolute;
449
+ top: 10px;
450
+ right: 10px;
451
+ background: rgba(0,0,0,0.5);
452
+ color: white;
453
+ padding: 5px;
454
+ border-radius: 5px;
455
+ z-index: 10;
456
+ }
457
+ #result_slider .fullscreen img {
458
+ object-fit: contain !important;
459
+ width: 100vw !important;
460
+ height: 100vh !important;
461
+ position: absolute;
462
+ top: 0;
463
+ left: 0;
464
+ }
465
+ </style>
466
+ """)
467
+
468
+ gr.HTML("""
469
+ <script>
470
+ document.addEventListener('DOMContentLoaded', function() {
471
+ const sliderInput = document.querySelector('#result_slider input[type="range"]');
472
+ if (sliderInput) {
473
+ sliderInput.value = 50;
474
+ sliderInput.dispatchEvent(new Event('input'));
475
+ }
476
+ });
477
+ </script>
478
+ """)
479
 
480
+ if __name__ == "__main__":
481
+ demo.queue().launch(share=True, server_name="0.0.0.0", server_port=7860)