Nymbo commited on
Commit
99fcae3
·
verified ·
1 Parent(s): c327e2c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +33 -20
app.py CHANGED
@@ -5,13 +5,24 @@
5
 
6
  import gradio as gr
7
  import numpy as np
8
- import spaces
9
  import torch
10
  import random
11
  from PIL import Image
12
 
13
  from diffusers import FluxKontextPipeline
14
- from diffusers.utils import load_image
 
 
 
 
 
 
 
 
 
 
 
 
15
 
16
  # -----------------------------
17
  # Constants & model bootstrap
@@ -22,16 +33,18 @@ MAX_SEED = np.iinfo(np.int32).max # <-- (layman's) the biggest safe random seed
22
 
23
  # Load the FLUX.1 Kontext-dev pipeline once and keep it on GPU for speed
24
  # (layman's) this downloads the model and prepares it to run on your graphics card
 
 
25
  pipe = FluxKontextPipeline.from_pretrained(
26
  "black-forest-labs/FLUX.1-Kontext-dev",
27
- torch_dtype=torch.bfloat16
28
- ).to("cuda")
29
 
30
  # ---------------------------------------------------------
31
  # Core editing function (works WITH or WITHOUT input image)
32
  # ---------------------------------------------------------
33
 
34
- @spaces.GPU
35
  def infer(
36
  input_image: Image.Image | None,
37
  prompt: str,
@@ -90,7 +103,7 @@ def infer(
90
  # NEW: Dedicated text-to-image function (separate MCP tool)
91
  # ------------------------------------------------------------
92
 
93
- @spaces.GPU # (layman's) make sure we run on the GPU so it's fast
94
  def text_to_image(
95
  prompt: str,
96
  seed: int = 42,
@@ -140,7 +153,7 @@ def text_to_image(
140
  # Lightweight helper for the Examples
141
  # -------------------------------------
142
 
143
- @spaces.GPU(duration=25)
144
  def infer_example(input_image: Image.Image | None, prompt: str) -> tuple[Image.Image, int]:
145
  # (layman's) small wrapper used by the clickable examples
146
  image, seed, _ = infer(input_image, prompt)
@@ -246,18 +259,18 @@ Image editing and manipulation model guidance-distilled from FLUX.1 Kontext [pro
246
  # (Optional) If you want a 1-click "reuse image" flow in the UI later:
247
  # reuse_button.click(fn=lambda image: image, inputs=[result], outputs=[input_image])
248
 
249
- # ------------------------------------------------------------------
250
- # NEW: Register a dedicated MCP tool that does text-to-image only.
251
- # This does not create any extra UI it's a clean API endpoint.
252
- # ------------------------------------------------------------------
253
- gr.api(
254
- text_to_image,
255
- api_name="text_to_image", # <-- MCP tool route
256
- api_description=(
257
- "Generate a brand-new image from text (no input image required) "
258
- "using FLUX.1 Kontext-dev. Returns the image and the seed used."
259
- ),
260
- )
261
 
262
  # (layman's) start the app with MCP enabled so tools show up to agents (e.g., Claude/Cursor)
263
- demo.launch(mcp_server=True)
 
5
 
6
  import gradio as gr
7
  import numpy as np
 
8
  import torch
9
  import random
10
  from PIL import Image
11
 
12
  from diffusers import FluxKontextPipeline
13
+
14
+ try:
15
+ import spaces # Hugging Face Spaces runtime (GPU decorators)
16
+ GPU_DECORATOR = spaces.GPU
17
+ except Exception:
18
+ # Fallback: no-op decorator when not running on Spaces
19
+ class _NoOpGPU:
20
+ def __call__(self, *args, **kwargs):
21
+ def _wrap(fn):
22
+ return fn
23
+ return _wrap
24
+
25
+ GPU_DECORATOR = _NoOpGPU()
26
 
27
  # -----------------------------
28
  # Constants & model bootstrap
 
33
 
34
  # Load the FLUX.1 Kontext-dev pipeline once and keep it on GPU for speed
35
  # (layman's) this downloads the model and prepares it to run on your graphics card
36
+ _device = "cuda" if torch.cuda.is_available() else "cpu"
37
+ _dtype = torch.bfloat16 if _device == "cuda" else torch.float32
38
  pipe = FluxKontextPipeline.from_pretrained(
39
  "black-forest-labs/FLUX.1-Kontext-dev",
40
+ torch_dtype=_dtype,
41
+ ).to(_device)
42
 
43
  # ---------------------------------------------------------
44
  # Core editing function (works WITH or WITHOUT input image)
45
  # ---------------------------------------------------------
46
 
47
+ @GPU_DECORATOR
48
  def infer(
49
  input_image: Image.Image | None,
50
  prompt: str,
 
103
  # NEW: Dedicated text-to-image function (separate MCP tool)
104
  # ------------------------------------------------------------
105
 
106
+ @GPU_DECORATOR # (layman's) make sure we run on the GPU so it's fast
107
  def text_to_image(
108
  prompt: str,
109
  seed: int = 42,
 
153
  # Lightweight helper for the Examples
154
  # -------------------------------------
155
 
156
+ @GPU_DECORATOR(duration=25)
157
  def infer_example(input_image: Image.Image | None, prompt: str) -> tuple[Image.Image, int]:
158
  # (layman's) small wrapper used by the clickable examples
159
  image, seed, _ = infer(input_image, prompt)
 
259
  # (Optional) If you want a 1-click "reuse image" flow in the UI later:
260
  # reuse_button.click(fn=lambda image: image, inputs=[result], outputs=[input_image])
261
 
262
+ # ------------------------------------------------------------------
263
+ # Register a dedicated MCP tool that does text-to-image only (no UI).
264
+ # Placing gr.api inside the Blocks context registers it with this demo.
265
+ # ------------------------------------------------------------------
266
+ gr.api(
267
+ text_to_image,
268
+ api_name="text_to_image", # MCP tool route
269
+ api_description=(
270
+ "Generate a brand-new image from text (no input image required) "
271
+ "using FLUX.1 Kontext-dev. Returns the image and the seed used."
272
+ ),
273
+ )
274
 
275
  # (layman's) start the app with MCP enabled so tools show up to agents (e.g., Claude/Cursor)
276
+ demo.launch(mcp_server=True)