Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -5,24 +5,13 @@
|
|
| 5 |
|
| 6 |
import gradio as gr
|
| 7 |
import numpy as np
|
|
|
|
| 8 |
import torch
|
| 9 |
import random
|
| 10 |
from PIL import Image
|
| 11 |
|
| 12 |
from diffusers import FluxKontextPipeline
|
| 13 |
-
|
| 14 |
-
try:
|
| 15 |
-
import spaces # Hugging Face Spaces runtime (GPU decorators)
|
| 16 |
-
GPU_DECORATOR = spaces.GPU
|
| 17 |
-
except Exception:
|
| 18 |
-
# Fallback: no-op decorator when not running on Spaces
|
| 19 |
-
class _NoOpGPU:
|
| 20 |
-
def __call__(self, *args, **kwargs):
|
| 21 |
-
def _wrap(fn):
|
| 22 |
-
return fn
|
| 23 |
-
return _wrap
|
| 24 |
-
|
| 25 |
-
GPU_DECORATOR = _NoOpGPU()
|
| 26 |
|
| 27 |
# -----------------------------
|
| 28 |
# Constants & model bootstrap
|
|
@@ -33,18 +22,16 @@ MAX_SEED = np.iinfo(np.int32).max # <-- (layman's) the biggest safe random seed
|
|
| 33 |
|
| 34 |
# Load the FLUX.1 Kontext-dev pipeline once and keep it on GPU for speed
|
| 35 |
# (layman's) this downloads the model and prepares it to run on your graphics card
|
| 36 |
-
_device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 37 |
-
_dtype = torch.bfloat16 if _device == "cuda" else torch.float32
|
| 38 |
pipe = FluxKontextPipeline.from_pretrained(
|
| 39 |
"black-forest-labs/FLUX.1-Kontext-dev",
|
| 40 |
-
torch_dtype=
|
| 41 |
-
).to(
|
| 42 |
|
| 43 |
# ---------------------------------------------------------
|
| 44 |
# Core editing function (works WITH or WITHOUT input image)
|
| 45 |
# ---------------------------------------------------------
|
| 46 |
|
| 47 |
-
@
|
| 48 |
def infer(
|
| 49 |
input_image: Image.Image | None,
|
| 50 |
prompt: str,
|
|
@@ -103,7 +90,7 @@ def infer(
|
|
| 103 |
# NEW: Dedicated text-to-image function (separate MCP tool)
|
| 104 |
# ------------------------------------------------------------
|
| 105 |
|
| 106 |
-
@
|
| 107 |
def text_to_image(
|
| 108 |
prompt: str,
|
| 109 |
seed: int = 42,
|
|
@@ -153,7 +140,7 @@ def text_to_image(
|
|
| 153 |
# Lightweight helper for the Examples
|
| 154 |
# -------------------------------------
|
| 155 |
|
| 156 |
-
@
|
| 157 |
def infer_example(input_image: Image.Image | None, prompt: str) -> tuple[Image.Image, int]:
|
| 158 |
# (layman's) small wrapper used by the clickable examples
|
| 159 |
image, seed, _ = infer(input_image, prompt)
|
|
@@ -259,18 +246,38 @@ Image editing and manipulation model guidance-distilled from FLUX.1 Kontext [pro
|
|
| 259 |
# (Optional) If you want a 1-click "reuse image" flow in the UI later:
|
| 260 |
# reuse_button.click(fn=lambda image: image, inputs=[result], outputs=[input_image])
|
| 261 |
|
| 262 |
-
|
| 263 |
-
|
| 264 |
-
|
| 265 |
-
|
| 266 |
-
|
| 267 |
-
|
| 268 |
-
|
| 269 |
-
|
| 270 |
-
|
| 271 |
-
|
| 272 |
-
),
|
| 273 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 274 |
|
| 275 |
# (layman's) start the app with MCP enabled so tools show up to agents (e.g., Claude/Cursor)
|
| 276 |
-
|
|
|
|
| 5 |
|
| 6 |
import gradio as gr
|
| 7 |
import numpy as np
|
| 8 |
+
import spaces
|
| 9 |
import torch
|
| 10 |
import random
|
| 11 |
from PIL import Image
|
| 12 |
|
| 13 |
from diffusers import FluxKontextPipeline
|
| 14 |
+
from diffusers.utils import load_image
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 15 |
|
| 16 |
# -----------------------------
|
| 17 |
# Constants & model bootstrap
|
|
|
|
| 22 |
|
| 23 |
# Load the FLUX.1 Kontext-dev pipeline once and keep it on GPU for speed
|
| 24 |
# (layman's) this downloads the model and prepares it to run on your graphics card
|
|
|
|
|
|
|
| 25 |
pipe = FluxKontextPipeline.from_pretrained(
|
| 26 |
"black-forest-labs/FLUX.1-Kontext-dev",
|
| 27 |
+
torch_dtype=torch.bfloat16
|
| 28 |
+
).to("cuda")
|
| 29 |
|
| 30 |
# ---------------------------------------------------------
|
| 31 |
# Core editing function (works WITH or WITHOUT input image)
|
| 32 |
# ---------------------------------------------------------
|
| 33 |
|
| 34 |
+
@spaces.GPU
|
| 35 |
def infer(
|
| 36 |
input_image: Image.Image | None,
|
| 37 |
prompt: str,
|
|
|
|
| 90 |
# NEW: Dedicated text-to-image function (separate MCP tool)
|
| 91 |
# ------------------------------------------------------------
|
| 92 |
|
| 93 |
+
@spaces.GPU # (layman's) make sure we run on the GPU so it's fast
|
| 94 |
def text_to_image(
|
| 95 |
prompt: str,
|
| 96 |
seed: int = 42,
|
|
|
|
| 140 |
# Lightweight helper for the Examples
|
| 141 |
# -------------------------------------
|
| 142 |
|
| 143 |
+
@spaces.GPU(duration=25)
|
| 144 |
def infer_example(input_image: Image.Image | None, prompt: str) -> tuple[Image.Image, int]:
|
| 145 |
# (layman's) small wrapper used by the clickable examples
|
| 146 |
image, seed, _ = infer(input_image, prompt)
|
|
|
|
| 246 |
# (Optional) If you want a 1-click "reuse image" flow in the UI later:
|
| 247 |
# reuse_button.click(fn=lambda image: image, inputs=[result], outputs=[input_image])
|
| 248 |
|
| 249 |
+
# ------------------------------------------------------------------
|
| 250 |
+
# NEW: Create a dedicated Interface for text-to-image MCP tool
|
| 251 |
+
# This ensures better compatibility with MCP clients
|
| 252 |
+
# ------------------------------------------------------------------
|
| 253 |
+
|
| 254 |
+
# Create a separate interface for the text-to-image tool
|
| 255 |
+
text_to_image_interface = gr.Interface(
|
| 256 |
+
fn=text_to_image,
|
| 257 |
+
inputs=[
|
| 258 |
+
gr.Text(label="Prompt", placeholder="Describe the image you want to generate", value=""),
|
| 259 |
+
gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, value=42, step=1),
|
| 260 |
+
gr.Checkbox(label="Randomize seed", value=True),
|
| 261 |
+
gr.Slider(label="Guidance Scale", minimum=1.0, maximum=10.0, value=2.5, step=0.1),
|
| 262 |
+
gr.Slider(label="Steps", minimum=1, maximum=30, value=20, step=1),
|
| 263 |
+
gr.Slider(label="Width", minimum=256, maximum=2048, value=1024, step=64),
|
| 264 |
+
gr.Slider(label="Height", minimum=256, maximum=2048, value=1024, step=64),
|
| 265 |
+
],
|
| 266 |
+
outputs=[
|
| 267 |
+
gr.Image(label="Generated Image"),
|
| 268 |
+
gr.Number(label="Seed Used")
|
| 269 |
+
],
|
| 270 |
+
title="FLUX.1 Text-to-Image Generator",
|
| 271 |
+
description="Generate high-quality images from text descriptions using FLUX.1 Kontext-dev",
|
| 272 |
+
api_name="text_to_image"
|
| 273 |
+
)
|
| 274 |
+
|
| 275 |
+
# Mount both interfaces using gr.TabbedInterface for better organization
|
| 276 |
+
combined_demo = gr.TabbedInterface(
|
| 277 |
+
[demo, text_to_image_interface],
|
| 278 |
+
["Image Editor", "Text-to-Image Generator"],
|
| 279 |
+
title="FLUX.1 Kontext Tools"
|
| 280 |
+
)
|
| 281 |
|
| 282 |
# (layman's) start the app with MCP enabled so tools show up to agents (e.g., Claude/Cursor)
|
| 283 |
+
combined_demo.launch(mcp_server=True)
|