Spaces:
Paused
Paused
| import os | |
| import torch | |
| import spaces | |
| import gradio as gr | |
| from diffusers import FluxFillPipeline | |
| import random | |
| import numpy as np | |
| from huggingface_hub import hf_hub_download | |
| from PIL import Image, ImageOps | |
| CSS = """ | |
| h1 { | |
| margin-top: 10px | |
| } | |
| """ | |
| os.environ["HF_HUB_ENABLE_HF_TRANSFER"] = "1" | |
| MAX_SEED = np.iinfo(np.int32).max | |
| repo_id = "black-forest-labs/FLUX.1-Fill-dev" | |
| if torch.cuda.is_available(): | |
| pipe = FluxFillPipeline.from_pretrained(repo_id, torch_dtype=torch.bfloat16).to("cuda") | |
| def inpaintGen( | |
| imgMask, | |
| inpaint_prompt: str, | |
| guidance: float, | |
| num_steps: int, | |
| seed: int, | |
| randomize_seed: bool, | |
| progress=gr.Progress(track_tqdm=True)): | |
| source_path = imgMask["background"] | |
| mask_path = imgMask["layers"][0] | |
| if not source_path: | |
| raise gr.Error("Please upload an image.") | |
| if not mask_path: | |
| raise gr.Error("Please draw a mask on the image.") | |
| source_img = Image.open(source_path).convert("RGB") | |
| mask_img = Image.open(mask_path) | |
| alpha_channel=mask_img.split()[3] | |
| binary_mask = alpha_channel.point(lambda p: p > 0 and 255) | |
| width, height = source_img.size | |
| new_width = (width // 16) * 16 | |
| new_height = (height // 16) * 16 | |
| # If the image size is not already divisible by 16, resize it | |
| if width != new_width or height != new_height: | |
| source_img = source_img.resize((new_width, new_height), Image.LANCZOS) | |
| if randomize_seed: | |
| seed = random.randint(0, MAX_SEED) | |
| generator = torch.Generator("cpu").manual_seed(seed) | |
| result = pipe( | |
| prompt=inpaint_prompt, | |
| image=source_img, | |
| mask_image=binary_mask, | |
| width=new_width, | |
| height=new_height, | |
| num_inference_steps=num_steps, | |
| generator=generator, | |
| guidance_scale=guidance, | |
| max_sequence_length=512, | |
| ).images[0] | |
| return result, seed | |
| def outpaintGen( | |
| img, | |
| outpaint_prompt: str, | |
| overlap_top: int, | |
| overlap_right: int, | |
| overlap_bottom: int, | |
| overlap_left: int, | |
| op_guidance: float, | |
| op_num_steps: int, | |
| op_seed: int, | |
| op_randomize_seed: bool | |
| ): | |
| image = Image.open(img) | |
| # Convert input to PIL Image if it's a numpy array | |
| if isinstance(image, np.ndarray): | |
| image = Image.fromarray(image) | |
| # Get original dimensions | |
| original_width, original_height = image.size | |
| # Calculate new dimensions | |
| new_width = original_width + overlap_left + overlap_right | |
| new_height = original_height + overlap_top + overlap_bottom | |
| # Create new blank mask image (black background) | |
| mask_image = Image.new('RGB', (new_width, new_height), color='black') | |
| # Create white rectangle for original image area | |
| white_area = Image.new('RGB', (original_width, original_height), color='white') | |
| # Paste white rectangle at the appropriate position | |
| mask_image.paste(white_area, (overlap_left, overlap_top)) | |
| # Create a new image with the same size as the original image | |
| new_image = Image.new('RGB', (new_width, new_height), color='black') | |
| # Paste the original image onto the new image | |
| new_image.paste(image, (overlap_left, overlap_top)) | |
| # Convert to grayscale | |
| mask_image = mask_image.convert('L') | |
| mask_image = Image.eval(mask_image, lambda x: 255 - x) | |
| fix_width = (new_width // 16) * 16 | |
| fix_height = (new_height // 16) * 16 | |
| # If the image size is not already divisible by 16, resize it | |
| # if new_width != fix_width or new_height != fix_height: | |
| # mask_image = mask_image.resize((fix_width, fix_height), Image.LANCZOS) | |
| if op_randomize_seed: | |
| op_seed = random.randint(0, MAX_SEED) | |
| generator = torch.Generator("cpu").manual_seed(op_seed) | |
| result = pipe( | |
| prompt=outpaint_prompt, | |
| image=new_image, | |
| mask_image=mask_image, | |
| width=fix_width, | |
| height=fix_height, | |
| num_inference_steps=op_num_steps, | |
| generator=generator, | |
| guidance_scale=op_guidance, | |
| max_sequence_length=512, | |
| ).images[0] | |
| return result, seed | |
| with gr.Blocks(theme="ocean", title="Flux.1 Fill dev", css=CSS) as demo: | |
| gr.HTML("<h1><center>Flux.1 Fill dev</center></h1>") | |
| gr.HTML(""" | |
| <p> | |
| <center> | |
| FLUX.1 Fill [dev] is a 12 billion parameter rectified flow transformer capable of filling areas in existing images based on a text description. | |
| </center> | |
| </p> | |
| """) | |
| with gr.Tab("Inpainting"): | |
| with gr.Row(): | |
| with gr.Column(): | |
| imgMask = gr.ImageMask(type="filepath", label="Image", layers=False, height=800) | |
| inpaint_prompt = gr.Textbox(label='Prompts ✏️', placeholder="A hat...") | |
| with gr.Row(): | |
| Inpaint_sendBtn = gr.Button(value="Submit", variant='primary') | |
| Inpaint_clearBtn = gr.ClearButton([imgMask, inpaint_prompt], value="Clear") | |
| image_out = gr.Image(type="pil", label="Output", height=960) | |
| with gr.Accordion("Advanced ⚙️", open=False): | |
| guidance = gr.Slider(label="Guidance scale", minimum=1, maximum=50, value=30.0, step=0.1) | |
| num_steps = gr.Slider(label="Steps", minimum=1, maximum=50, value=20, step=1) | |
| seed = gr.Number(label="Seed", value=42, precision=0) | |
| randomize_seed = gr.Checkbox(label="Randomize seed", value=True) | |
| gr.on( | |
| triggers = [ | |
| inpaint_prompt.submit, | |
| Inpaint_sendBtn.click, | |
| ], | |
| fn = inpaintGen, | |
| inputs = [ | |
| imgMask, | |
| inpaint_prompt, | |
| guidance, | |
| num_steps, | |
| seed, | |
| randomize_seed | |
| ], | |
| outputs = [image_out, seed] | |
| ) | |
| with gr.Tab("Outpainting"): | |
| with gr.Row(): | |
| with gr.Column(): | |
| img = gr.Image(type="filepath", label="Image", height=800) | |
| outpaint_prompt = gr.Textbox(label='Prompts ✏️', placeholder="In city...") | |
| with gr.Row(): | |
| outpaint_sendBtn = gr.Button(value="Submit", variant='primary') | |
| outpaint_clearBtn = gr.ClearButton([img, outpaint_prompt], value="Clear") | |
| image_exp = gr.Image(type="pil", label="Output", height=960) | |
| with gr.Accordion("Advanced ⚙️", open=False): | |
| with gr.Row(): | |
| overlap_top = gr.Number(label="Top", value=64, precision=0) | |
| overlap_right = gr.Number(label="Right", value=64, precision=0) | |
| overlap_bottom = gr.Number(label="Bottom", value=64, precision=0) | |
| overlap_left = gr.Number(label="Left", value=64, precision=0) | |
| op_guidance = gr.Slider(label="Guidance scale", minimum=1, maximum=50, value=30.0, step=0.1) | |
| op_num_steps = gr.Slider(label="Steps", minimum=1, maximum=50, value=20, step=1) | |
| op_seed = gr.Number(label="Seed", value=42, precision=0) | |
| op_randomize_seed = gr.Checkbox(label="Randomize seed", value=True) | |
| gr.on( | |
| triggers = [ | |
| outpaint_prompt.submit, | |
| outpaint_sendBtn.click, | |
| ], | |
| fn = outpaintGen, | |
| inputs = [ | |
| img, | |
| outpaint_prompt, | |
| overlap_top, | |
| overlap_right, | |
| overlap_bottom, | |
| overlap_left, | |
| op_guidance, | |
| op_num_steps, | |
| op_seed, | |
| op_randomize_seed | |
| ], | |
| outputs = [image_exp, op_seed] | |
| ) | |
| if __name__ == "__main__": | |
| demo.launch(show_api=False, share=False) |