Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -78,7 +78,7 @@ def tiled_flux_img2img(image, prompt, strength, num_inference_steps, guidance_sc
|
|
| 78 |
result = Image.new('RGB', (width, height))
|
| 79 |
stride = tile_size - overlap
|
| 80 |
|
| 81 |
-
#
|
| 82 |
for y in range(0, height, stride):
|
| 83 |
for x in range(0, width, stride):
|
| 84 |
tile_left = x
|
|
@@ -106,44 +106,38 @@ def tiled_flux_img2img(image, prompt, strength, num_inference_steps, guidance_sc
|
|
| 106 |
result.paste(generated_tile, (tile_left, tile_top))
|
| 107 |
continue
|
| 108 |
|
| 109 |
-
#
|
| 110 |
-
if y > 0:
|
| 111 |
-
effective_overlap = min(overlap, tile_bottom - tile_top,
|
| 112 |
if effective_overlap > 0:
|
| 113 |
mask = Image.new('L', (tile_right - tile_left, effective_overlap))
|
| 114 |
for i in range(mask.width):
|
| 115 |
for j in range(mask.height):
|
| 116 |
-
|
| 117 |
-
mask.putpixel((i, j), int(255 * (j / (effective_overlap - 1 if effective_overlap > 1 else 1))))
|
| 118 |
-
# Blend the top part of the tile with the bottom of the previous
|
| 119 |
blend_region = Image.composite(
|
| 120 |
generated_tile.crop((0, 0, mask.width, mask.height)),
|
| 121 |
result.crop((tile_left, tile_top, tile_right, tile_top + mask.height)),
|
| 122 |
mask
|
| 123 |
)
|
| 124 |
result.paste(blend_region, (tile_left, tile_top))
|
| 125 |
-
# Paste the non-overlap part
|
| 126 |
result.paste(generated_tile.crop((0, effective_overlap, generated_tile.width, generated_tile.height)), (tile_left, tile_top + effective_overlap))
|
| 127 |
else:
|
| 128 |
result.paste(generated_tile, (tile_left, tile_top))
|
| 129 |
|
| 130 |
-
#
|
| 131 |
-
if x > 0:
|
| 132 |
-
|
| 133 |
-
effective_overlap_h = min(overlap, tile_right - tile_left)
|
| 134 |
if effective_overlap_h > 0:
|
| 135 |
mask_h = Image.new('L', (effective_overlap_h, tile_bottom - tile_top))
|
| 136 |
for i in range(mask_h.width):
|
| 137 |
for j in range(mask_h.height):
|
| 138 |
-
mask_h.putpixel((i, j), int(255 * (i /
|
| 139 |
-
# Blend left part
|
| 140 |
blend_region_h = Image.composite(
|
| 141 |
generated_tile.crop((0, 0, mask_h.width, mask_h.height)),
|
| 142 |
result.crop((tile_left, tile_top, tile_left + mask_h.width, tile_bottom)),
|
| 143 |
mask_h
|
| 144 |
)
|
| 145 |
result.paste(blend_region_h, (tile_left, tile_top))
|
| 146 |
-
# Paste non-overlap
|
| 147 |
result.paste(generated_tile.crop((effective_overlap_h, 0, generated_tile.width, generated_tile.height)), (tile_left + effective_overlap_h, tile_top))
|
| 148 |
else:
|
| 149 |
result.paste(generated_tile, (tile_left, tile_top))
|
|
@@ -164,15 +158,19 @@ def enhance_image(image, text_prompt, seed, randomize_seed, width, height, guida
|
|
| 164 |
if image and (image.size[0] > MAX_IMAGE_SIZE or image.size[1] > MAX_IMAGE_SIZE):
|
| 165 |
output_image = tiled_flux_img2img(image, prompt, strength, num_inference_steps, guidance_scale)
|
| 166 |
else:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 167 |
output_image = pipe(
|
| 168 |
prompt,
|
| 169 |
-
image=image,
|
| 170 |
generator=generator,
|
| 171 |
num_inference_steps=num_inference_steps,
|
| 172 |
-
width=width if image is None else None,
|
| 173 |
-
height=height if image is None else None,
|
| 174 |
guidance_scale=guidance_scale,
|
| 175 |
-
|
| 176 |
).images[0]
|
| 177 |
return output_image, prompt, seed
|
| 178 |
|
|
|
|
| 78 |
result = Image.new('RGB', (width, height))
|
| 79 |
stride = tile_size - overlap
|
| 80 |
|
| 81 |
+
# Tile in both directions, handling small sizes
|
| 82 |
for y in range(0, height, stride):
|
| 83 |
for x in range(0, width, stride):
|
| 84 |
tile_left = x
|
|
|
|
| 106 |
result.paste(generated_tile, (tile_left, tile_top))
|
| 107 |
continue
|
| 108 |
|
| 109 |
+
# Vertical blend
|
| 110 |
+
if y > 0:
|
| 111 |
+
effective_overlap = min(overlap, tile_bottom - tile_top, height - tile_top)
|
| 112 |
if effective_overlap > 0:
|
| 113 |
mask = Image.new('L', (tile_right - tile_left, effective_overlap))
|
| 114 |
for i in range(mask.width):
|
| 115 |
for j in range(mask.height):
|
| 116 |
+
mask.putpixel((i, j), int(255 * (j / effective_overlap)))
|
|
|
|
|
|
|
| 117 |
blend_region = Image.composite(
|
| 118 |
generated_tile.crop((0, 0, mask.width, mask.height)),
|
| 119 |
result.crop((tile_left, tile_top, tile_right, tile_top + mask.height)),
|
| 120 |
mask
|
| 121 |
)
|
| 122 |
result.paste(blend_region, (tile_left, tile_top))
|
|
|
|
| 123 |
result.paste(generated_tile.crop((0, effective_overlap, generated_tile.width, generated_tile.height)), (tile_left, tile_top + effective_overlap))
|
| 124 |
else:
|
| 125 |
result.paste(generated_tile, (tile_left, tile_top))
|
| 126 |
|
| 127 |
+
# Horizontal blend
|
| 128 |
+
if x > 0:
|
| 129 |
+
effective_overlap_h = min(overlap, tile_right - tile_left, width - tile_left)
|
|
|
|
| 130 |
if effective_overlap_h > 0:
|
| 131 |
mask_h = Image.new('L', (effective_overlap_h, tile_bottom - tile_top))
|
| 132 |
for i in range(mask_h.width):
|
| 133 |
for j in range(mask_h.height):
|
| 134 |
+
mask_h.putpixel((i, j), int(255 * (i / effective_overlap_h)))
|
|
|
|
| 135 |
blend_region_h = Image.composite(
|
| 136 |
generated_tile.crop((0, 0, mask_h.width, mask_h.height)),
|
| 137 |
result.crop((tile_left, tile_top, tile_left + mask_h.width, tile_bottom)),
|
| 138 |
mask_h
|
| 139 |
)
|
| 140 |
result.paste(blend_region_h, (tile_left, tile_top))
|
|
|
|
| 141 |
result.paste(generated_tile.crop((effective_overlap_h, 0, generated_tile.width, generated_tile.height)), (tile_left + effective_overlap_h, tile_top))
|
| 142 |
else:
|
| 143 |
result.paste(generated_tile, (tile_left, tile_top))
|
|
|
|
| 158 |
if image and (image.size[0] > MAX_IMAGE_SIZE or image.size[1] > MAX_IMAGE_SIZE):
|
| 159 |
output_image = tiled_flux_img2img(image, prompt, strength, num_inference_steps, guidance_scale)
|
| 160 |
else:
|
| 161 |
+
kw = {}
|
| 162 |
+
if image is not None:
|
| 163 |
+
kw['image'] = image
|
| 164 |
+
kw['strength'] = strength
|
| 165 |
+
else:
|
| 166 |
+
kw['width'] = width
|
| 167 |
+
kw['height'] = height
|
| 168 |
output_image = pipe(
|
| 169 |
prompt,
|
|
|
|
| 170 |
generator=generator,
|
| 171 |
num_inference_steps=num_inference_steps,
|
|
|
|
|
|
|
| 172 |
guidance_scale=guidance_scale,
|
| 173 |
+
**kw
|
| 174 |
).images[0]
|
| 175 |
return output_image, prompt, seed
|
| 176 |
|