File size: 5,959 Bytes
5b8270b
 
f17b0fe
5b8270b
 
 
8c1d120
 
f17b0fe
5b8270b
3a22768
 
5a5ca47
5b8270b
5a5ca47
5b8270b
f17b0fe
3a22768
b07f8ef
5a5ca47
 
 
 
 
 
b07f8ef
3a22768
 
 
 
 
 
 
5a5ca47
3a22768
5a5ca47
 
 
b07f8ef
3a22768
b07f8ef
5b8270b
 
5a5ca47
a2a7641
 
 
5a5ca47
a2a7641
 
3a22768
 
a2a7641
 
 
 
 
 
 
 
 
 
3a22768
 
 
 
 
 
f20dc96
5a5ca47
5b8270b
 
d4cae39
5b8270b
 
 
ae9f1ce
5a5ca47
 
 
3a22768
5a5ca47
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3a22768
5a5ca47
 
 
 
 
 
 
3a22768
 
 
 
 
 
 
 
 
 
 
 
 
5b8270b
 
5a5ca47
 
 
 
3a22768
 
 
 
 
 
f20dc96
5a5ca47
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
import gradio as gr
import numpy as np
import spaces
import torch
import random
from PIL import Image

from diffusers import FluxKontextPipeline
from diffusers.utils import load_image

# down to 22 steps to try and keep this ~<30 seconds so it will generally work in claude.ai - which doesn't reset timeout with notifications.

MAX_SEED = np.iinfo(np.int32).max

pipe = FluxKontextPipeline.from_pretrained("black-forest-labs/FLUX.1-Kontext-dev", torch_dtype=torch.bfloat16).to("cuda")

@spaces.GPU
def infer(input_image, prompt, seed=42, randomize_seed=False, guidance_scale=2.5, steps=20, progress=gr.Progress(track_tqdm=True)):
    """
    Perform image editing using the FLUX.1 Kontext pipeline.
    
    This function takes an input image and a text prompt to generate a modified version
    of the image based on the provided instructions. It uses the FLUX.1 Kontext model
    for contextual image editing tasks.
    
    Args:
        input_image (PIL.Image.Image): The path to the input image to be edited.
        prompt (str): Text description of the desired edit to apply to the image. Examples: "Remove glasses", "Add a hat", "Change background to beach".
        seed (int, optional): Random seed for reproducible generation. 
            Must be between 0 and MAX_SEED (2^31 - 1). Defaults to 42.
        randomize_seed (bool, optional): If True, generates a random seed instead of using the provided seed value. 
            Defaults to False.
        guidance_scale (float, optional): Controls how closely the model follows the prompt. Higher values mean stronger adherence to the prompt but may reduce image quality. Range: 1.0-10.0. Defaults to 2.5.
        steps (int, optional): Controls how many steps to run the diffusion model for.
            Range: 1-30. Defaults to 20.
        progress (gr.Progress, optional): Gradio progress tracker for monitoring
            generation progress. Defaults to gr.Progress(track_tqdm=True).
    
    Returns:
        The modified image and seed used for generation.
    """
    if randomize_seed:
        seed = random.randint(0, MAX_SEED)
    
    if input_image:
        input_image = input_image.convert("RGB")
        image = pipe(
            image=input_image, 
            prompt=prompt,
            guidance_scale=guidance_scale,
            width = input_image.size[0],
            height = input_image.size[1],
            num_inference_steps=steps,
            generator=torch.Generator().manual_seed(seed),
        ).images[0]
    else:
        image = pipe(
            prompt=prompt,
            guidance_scale=guidance_scale,
            num_inference_steps=steps,
            generator=torch.Generator().manual_seed(seed),
        ).images[0]
    return image, seed, gr.Button(visible=True)

@spaces.GPU(duration=25)
def infer_example(input_image, prompt):
    image, seed, _ = infer(input_image, prompt)
    return image, seed

css="""
#col-container {
    margin: 0 auto;
    max-width: 960px;
}
"""

with gr.Blocks(css=css, theme="Nymbo/Nymbo_Theme") as demo:
    
    with gr.Column(elem_id="col-container"):
        gr.Markdown(f"""# FLUX.1 Kontext [dev]
Image editing and manipulation model guidance-distilled from FLUX.1 Kontext [pro], [[blog]](https://bfl.ai/announcements/flux-1-kontext-dev) [[model]](https://huggingface.co/black-forest-labs/FLUX.1-Kontext-dev)
        """)
        with gr.Row():
            with gr.Column():
                input_image = gr.Image(label="Upload the image for editing", type="pil")
                with gr.Row():
                    prompt = gr.Text(
                        label="Prompt",
                        show_label=False,
                        max_lines=1,
                        placeholder="Enter your prompt for editing (e.g., 'Remove glasses', 'Add a hat')",
                        container=False,
                    )
                    run_button = gr.Button("Run", scale=0)
                with gr.Accordion("Advanced Settings", open=False):
                    
                    seed = gr.Slider(
                        label="Seed",
                        minimum=0,
                        maximum=MAX_SEED,
                        step=1,
                        value=0,
                    )
                    
                    randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
                    
                    guidance_scale = gr.Slider(
                        label="Guidance Scale",
                        minimum=1,
                        maximum=10,
                        step=0.1,
                        value=2.5,
                    )       
                    
                    steps = gr.Slider(
                        label="Steps",
                        minimum=1,
                        maximum=30,
                        value=20,
                        step=1
                    )
                    
            with gr.Column():
                result = gr.Image(label="Result", show_label=False, interactive=False)
                reuse_button = gr.Button("Reuse this image", visible=False)
        
            
        examples = gr.Examples(
            examples=[
                ["flowers.png", "turn the flowers into sunflowers"],
                ["monster.png", "make this monster ride a skateboard on the beach"],
                ["cat.png", "make this cat happy"]
            ],
            inputs=[input_image, prompt],
            outputs=[result, seed],
            fn=infer_example,
            cache_examples="lazy"
        )
            
    gr.on(
        triggers=[run_button.click, prompt.submit],
        fn = infer,
        inputs = [input_image, prompt, seed, randomize_seed, guidance_scale, steps],
        outputs = [result, seed, reuse_button]
    )
    
    # reuse_button.click(
    #    fn = lambda image: image,
    #    inputs = [result],
    #    outputs = [input_image]
    # )

demo.launch(mcp_server=True)