Nymbo commited on
Commit
3a22768
·
verified ·
1 Parent(s): 85e06d1

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +41 -35
app.py CHANGED
@@ -8,12 +8,14 @@ from PIL import Image
8
  from diffusers import FluxKontextPipeline
9
  from diffusers.utils import load_image
10
 
 
 
11
  MAX_SEED = np.iinfo(np.int32).max
12
 
13
  pipe = FluxKontextPipeline.from_pretrained("black-forest-labs/FLUX.1-Kontext-dev", torch_dtype=torch.bfloat16).to("cuda")
14
 
15
  @spaces.GPU
16
- def infer(input_image, prompt, seed=42, randomize_seed=False, guidance_scale=2.5, steps=28, progress=gr.Progress(track_tqdm=True)):
17
  """
18
  Perform image editing using the FLUX.1 Kontext pipeline.
19
 
@@ -22,36 +24,20 @@ def infer(input_image, prompt, seed=42, randomize_seed=False, guidance_scale=2.5
22
  for contextual image editing tasks.
23
 
24
  Args:
25
- input_image (PIL.Image.Image): The input image to be edited. Will be converted
26
- to RGB format if not already in that format.
27
- prompt (str): Text description of the desired edit to apply to the image.
28
- Examples: "Remove glasses", "Add a hat", "Change background to beach".
29
- seed (int, optional): Random seed for reproducible generation. Defaults to 42.
30
- Must be between 0 and MAX_SEED (2^31 - 1).
31
- randomize_seed (bool, optional): If True, generates a random seed instead of
32
- using the provided seed value. Defaults to False.
33
- guidance_scale (float, optional): Controls how closely the model follows the
34
- prompt. Higher values mean stronger adherence to the prompt but may reduce
35
- image quality. Range: 1.0-10.0. Defaults to 2.5.
36
  steps (int, optional): Controls how many steps to run the diffusion model for.
37
- Range: 1-30. Defaults to 28.
38
  progress (gr.Progress, optional): Gradio progress tracker for monitoring
39
  generation progress. Defaults to gr.Progress(track_tqdm=True).
40
 
41
  Returns:
42
- tuple: A 3-tuple containing:
43
- - PIL.Image.Image: The generated/edited image
44
- - int: The seed value used for generation (useful when randomize_seed=True)
45
- - gr.update: Gradio update object to make the reuse button visible
46
-
47
- Example:
48
- >>> edited_image, used_seed, button_update = infer(
49
- ... input_image=my_image,
50
- ... prompt="Add sunglasses",
51
- ... seed=123,
52
- ... randomize_seed=False,
53
- ... guidance_scale=2.5
54
- ... )
55
  """
56
  if randomize_seed:
57
  seed = random.randint(0, MAX_SEED)
@@ -62,6 +48,8 @@ def infer(input_image, prompt, seed=42, randomize_seed=False, guidance_scale=2.5
62
  image=input_image,
63
  prompt=prompt,
64
  guidance_scale=guidance_scale,
 
 
65
  num_inference_steps=steps,
66
  generator=torch.Generator().manual_seed(seed),
67
  ).images[0]
@@ -72,7 +60,12 @@ def infer(input_image, prompt, seed=42, randomize_seed=False, guidance_scale=2.5
72
  num_inference_steps=steps,
73
  generator=torch.Generator().manual_seed(seed),
74
  ).images[0]
75
- return image, seed, gr.update(visible=True)
 
 
 
 
 
76
 
77
  css="""
78
  #col-container {
@@ -85,7 +78,7 @@ with gr.Blocks(css=css) as demo:
85
 
86
  with gr.Column(elem_id="col-container"):
87
  gr.Markdown(f"""# FLUX.1 Kontext [dev]
88
- Image editing and manipulation model.
89
  """)
90
  with gr.Row():
91
  with gr.Column():
@@ -123,7 +116,7 @@ Image editing and manipulation model.
123
  label="Steps",
124
  minimum=1,
125
  maximum=30,
126
- value=28,
127
  step=1
128
  )
129
 
@@ -131,17 +124,30 @@ Image editing and manipulation model.
131
  result = gr.Image(label="Result", show_label=False, interactive=False)
132
  reuse_button = gr.Button("Reuse this image", visible=False)
133
 
134
-
 
 
 
 
 
 
 
 
 
 
 
 
135
  gr.on(
136
  triggers=[run_button.click, prompt.submit],
137
  fn = infer,
138
  inputs = [input_image, prompt, seed, randomize_seed, guidance_scale, steps],
139
  outputs = [result, seed, reuse_button]
140
  )
141
- reuse_button.click(
142
- fn = lambda image: image,
143
- inputs = [result],
144
- outputs = [input_image]
145
- )
 
146
 
147
  demo.launch(mcp_server=True)
 
8
  from diffusers import FluxKontextPipeline
9
  from diffusers.utils import load_image
10
 
11
+ # down to 22 steps to try and keep this ~<30 seconds so it will generally work in claude.ai - which doesn't reset timeout with notifications.
12
+
13
  MAX_SEED = np.iinfo(np.int32).max
14
 
15
  pipe = FluxKontextPipeline.from_pretrained("black-forest-labs/FLUX.1-Kontext-dev", torch_dtype=torch.bfloat16).to("cuda")
16
 
17
  @spaces.GPU
18
+ def infer(input_image, prompt, seed=42, randomize_seed=False, guidance_scale=2.5, steps=20, progress=gr.Progress(track_tqdm=True)):
19
  """
20
  Perform image editing using the FLUX.1 Kontext pipeline.
21
 
 
24
  for contextual image editing tasks.
25
 
26
  Args:
27
+ input_image (PIL.Image.Image): The path to the input image to be edited.
28
+ prompt (str): Text description of the desired edit to apply to the image. Examples: "Remove glasses", "Add a hat", "Change background to beach".
29
+ seed (int, optional): Random seed for reproducible generation.
30
+ Must be between 0 and MAX_SEED (2^31 - 1). Defaults to 42.
31
+ randomize_seed (bool, optional): If True, generates a random seed instead of using the provided seed value.
32
+ Defaults to False.
33
+ guidance_scale (float, optional): Controls how closely the model follows the prompt. Higher values mean stronger adherence to the prompt but may reduce image quality. Range: 1.0-10.0. Defaults to 2.5.
 
 
 
 
34
  steps (int, optional): Controls how many steps to run the diffusion model for.
35
+ Range: 1-30. Defaults to 20.
36
  progress (gr.Progress, optional): Gradio progress tracker for monitoring
37
  generation progress. Defaults to gr.Progress(track_tqdm=True).
38
 
39
  Returns:
40
+ The modified image and seed used for generation.
 
 
 
 
 
 
 
 
 
 
 
 
41
  """
42
  if randomize_seed:
43
  seed = random.randint(0, MAX_SEED)
 
48
  image=input_image,
49
  prompt=prompt,
50
  guidance_scale=guidance_scale,
51
+ width = input_image.size[0],
52
+ height = input_image.size[1],
53
  num_inference_steps=steps,
54
  generator=torch.Generator().manual_seed(seed),
55
  ).images[0]
 
60
  num_inference_steps=steps,
61
  generator=torch.Generator().manual_seed(seed),
62
  ).images[0]
63
+ return image, seed, gr.Button(visible=True)
64
+
65
+ @spaces.GPU(duration=25)
66
+ def infer_example(input_image, prompt):
67
+ image, seed, _ = infer(input_image, prompt)
68
+ return image, seed
69
 
70
  css="""
71
  #col-container {
 
78
 
79
  with gr.Column(elem_id="col-container"):
80
  gr.Markdown(f"""# FLUX.1 Kontext [dev]
81
+ Image editing and manipulation model guidance-distilled from FLUX.1 Kontext [pro], [[blog]](https://bfl.ai/announcements/flux-1-kontext-dev) [[model]](https://huggingface.co/black-forest-labs/FLUX.1-Kontext-dev)
82
  """)
83
  with gr.Row():
84
  with gr.Column():
 
116
  label="Steps",
117
  minimum=1,
118
  maximum=30,
119
+ value=20,
120
  step=1
121
  )
122
 
 
124
  result = gr.Image(label="Result", show_label=False, interactive=False)
125
  reuse_button = gr.Button("Reuse this image", visible=False)
126
 
127
+
128
+ examples = gr.Examples(
129
+ examples=[
130
+ ["flowers.png", "turn the flowers into sunflowers"],
131
+ ["monster.png", "make this monster ride a skateboard on the beach"],
132
+ ["cat.png", "make this cat happy"]
133
+ ],
134
+ inputs=[input_image, prompt],
135
+ outputs=[result, seed],
136
+ fn=infer_example,
137
+ cache_examples="lazy"
138
+ )
139
+
140
  gr.on(
141
  triggers=[run_button.click, prompt.submit],
142
  fn = infer,
143
  inputs = [input_image, prompt, seed, randomize_seed, guidance_scale, steps],
144
  outputs = [result, seed, reuse_button]
145
  )
146
+
147
+ # reuse_button.click(
148
+ # fn = lambda image: image,
149
+ # inputs = [result],
150
+ # outputs = [input_image]
151
+ # )
152
 
153
  demo.launch(mcp_server=True)