Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,5 +1,4 @@
|
|
| 1 |
import gradio as gr
|
| 2 |
-
from gradio_image_slider import ImageSlider
|
| 3 |
import numpy as np
|
| 4 |
import random
|
| 5 |
import torch
|
|
@@ -30,11 +29,11 @@ pipe = QwenImageEditPlusPipeline.from_pretrained("Qwen/Qwen-Image-Edit-2509",
|
|
| 30 |
torch_dtype=dtype,
|
| 31 |
device_map='cuda'),torch_dtype=dtype).to(device)
|
| 32 |
|
| 33 |
-
pipe.load_lora_weights("
|
| 34 |
-
weight_name="
|
| 35 |
-
adapter_name="
|
| 36 |
-
pipe.set_adapters(["
|
| 37 |
-
pipe.fuse_lora(adapter_names=["
|
| 38 |
pipe.unload_lora_weights()
|
| 39 |
|
| 40 |
pipe.transformer.__class__ = QwenImageTransformer2DModel
|
|
@@ -45,7 +44,7 @@ optimize_pipeline_(pipe, image=[Image.new("RGB", (1024, 1024)), Image.new("RGB",
|
|
| 45 |
MAX_SEED = np.iinfo(np.int32).max
|
| 46 |
|
| 47 |
@spaces.GPU
|
| 48 |
-
def
|
| 49 |
image,
|
| 50 |
seed,
|
| 51 |
randomize_seed,
|
|
@@ -55,7 +54,7 @@ def upscale_image(
|
|
| 55 |
width,
|
| 56 |
progress=gr.Progress(track_tqdm=True)
|
| 57 |
):
|
| 58 |
-
prompt = "
|
| 59 |
|
| 60 |
if randomize_seed:
|
| 61 |
seed = random.randint(0, MAX_SEED)
|
|
@@ -82,7 +81,7 @@ def upscale_image(
|
|
| 82 |
num_images_per_prompt=1,
|
| 83 |
).images[0]
|
| 84 |
|
| 85 |
-
return
|
| 86 |
|
| 87 |
|
| 88 |
# --- UI ---
|
|
@@ -154,10 +153,10 @@ def update_dimensions_on_upload(image):
|
|
| 154 |
|
| 155 |
with gr.Blocks(theme=gr.themes.Soft(), css=css) as demo:
|
| 156 |
with gr.Column(elem_id="col-container"):
|
| 157 |
-
gr.Markdown("#
|
| 158 |
gr.Markdown(
|
| 159 |
"""
|
| 160 |
-
|
| 161 |
<br>
|
| 162 |
<div style='text-align: center; margin-top: 1rem;'>
|
| 163 |
<a href='https://huggingface.co/spaces/akhaliq/anycoder' target='_blank' style='color: #0071e3; text-decoration: none; font-weight: 500;'>Built with anycoder</a>
|
|
@@ -166,28 +165,30 @@ with gr.Blocks(theme=gr.themes.Soft(), css=css) as demo:
|
|
| 166 |
elem_id="description"
|
| 167 |
)
|
| 168 |
|
| 169 |
-
with gr.
|
| 170 |
-
|
| 171 |
-
|
| 172 |
-
|
| 173 |
-
|
| 174 |
-
|
| 175 |
-
|
| 176 |
-
|
| 177 |
-
|
| 178 |
-
|
| 179 |
-
|
| 180 |
-
|
| 181 |
-
|
| 182 |
-
|
| 183 |
-
|
| 184 |
-
|
| 185 |
-
|
| 186 |
-
|
| 187 |
-
|
| 188 |
-
|
| 189 |
-
|
| 190 |
-
|
|
|
|
|
|
|
| 191 |
|
| 192 |
inputs = [
|
| 193 |
image, seed, randomize_seed, true_guidance_scale,
|
|
@@ -195,9 +196,9 @@ with gr.Blocks(theme=gr.themes.Soft(), css=css) as demo:
|
|
| 195 |
]
|
| 196 |
outputs = [result, seed]
|
| 197 |
|
| 198 |
-
#
|
| 199 |
-
|
| 200 |
-
fn=
|
| 201 |
inputs=inputs,
|
| 202 |
outputs=outputs
|
| 203 |
)
|
|
|
|
| 1 |
import gradio as gr
|
|
|
|
| 2 |
import numpy as np
|
| 3 |
import random
|
| 4 |
import torch
|
|
|
|
| 29 |
torch_dtype=dtype,
|
| 30 |
device_map='cuda'),torch_dtype=dtype).to(device)
|
| 31 |
|
| 32 |
+
pipe.load_lora_weights("autoweeb/Qwen-Image-Edit-2509-Photo-to-Anime",
|
| 33 |
+
weight_name="Qwen-Image-Edit-2509-Photo-to-Anime_000001000.safetensors",
|
| 34 |
+
adapter_name="anime")
|
| 35 |
+
pipe.set_adapters(["anime"], adapter_weights=[1.])
|
| 36 |
+
pipe.fuse_lora(adapter_names=["anime"], lora_scale=1.0)
|
| 37 |
pipe.unload_lora_weights()
|
| 38 |
|
| 39 |
pipe.transformer.__class__ = QwenImageTransformer2DModel
|
|
|
|
| 44 |
MAX_SEED = np.iinfo(np.int32).max
|
| 45 |
|
| 46 |
@spaces.GPU
|
| 47 |
+
def convert_to_anime(
|
| 48 |
image,
|
| 49 |
seed,
|
| 50 |
randomize_seed,
|
|
|
|
| 54 |
width,
|
| 55 |
progress=gr.Progress(track_tqdm=True)
|
| 56 |
):
|
| 57 |
+
prompt = "Convert this photo to anime style"
|
| 58 |
|
| 59 |
if randomize_seed:
|
| 60 |
seed = random.randint(0, MAX_SEED)
|
|
|
|
| 81 |
num_images_per_prompt=1,
|
| 82 |
).images[0]
|
| 83 |
|
| 84 |
+
return result, seed
|
| 85 |
|
| 86 |
|
| 87 |
# --- UI ---
|
|
|
|
| 153 |
|
| 154 |
with gr.Blocks(theme=gr.themes.Soft(), css=css) as demo:
|
| 155 |
with gr.Column(elem_id="col-container"):
|
| 156 |
+
gr.Markdown("# 🎨 Photo to Anime", elem_id="title")
|
| 157 |
gr.Markdown(
|
| 158 |
"""
|
| 159 |
+
Transform your photos into beautiful anime-style images ✨
|
| 160 |
<br>
|
| 161 |
<div style='text-align: center; margin-top: 1rem;'>
|
| 162 |
<a href='https://huggingface.co/spaces/akhaliq/anycoder' target='_blank' style='color: #0071e3; text-decoration: none; font-weight: 500;'>Built with anycoder</a>
|
|
|
|
| 165 |
elem_id="description"
|
| 166 |
)
|
| 167 |
|
| 168 |
+
with gr.Row():
|
| 169 |
+
with gr.Column(scale=1):
|
| 170 |
+
image = gr.Image(
|
| 171 |
+
label="Upload Photo",
|
| 172 |
+
type="pil",
|
| 173 |
+
elem_classes="image-container"
|
| 174 |
+
)
|
| 175 |
+
|
| 176 |
+
with gr.Accordion("⚙️ Advanced Settings", open=False):
|
| 177 |
+
seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0)
|
| 178 |
+
randomize_seed = gr.Checkbox(label="Randomize Seed", value=True)
|
| 179 |
+
true_guidance_scale = gr.Slider(label="Guidance Scale", minimum=1.0, maximum=10.0, step=0.1, value=1.0)
|
| 180 |
+
num_inference_steps = gr.Slider(label="Inference Steps", minimum=1, maximum=40, step=1, value=4)
|
| 181 |
+
height = gr.Slider(label="Height", minimum=256, maximum=2048, step=8, value=1024, visible=False)
|
| 182 |
+
width = gr.Slider(label="Width", minimum=256, maximum=2048, step=8, value=1024, visible=False)
|
| 183 |
+
|
| 184 |
+
convert_btn = gr.Button("Convert to Anime", variant="primary", elem_id="convert-btn", size="lg")
|
| 185 |
+
|
| 186 |
+
with gr.Column(scale=1):
|
| 187 |
+
result = gr.Image(
|
| 188 |
+
label="Anime Result",
|
| 189 |
+
interactive=False,
|
| 190 |
+
elem_classes="image-container"
|
| 191 |
+
)
|
| 192 |
|
| 193 |
inputs = [
|
| 194 |
image, seed, randomize_seed, true_guidance_scale,
|
|
|
|
| 196 |
]
|
| 197 |
outputs = [result, seed]
|
| 198 |
|
| 199 |
+
# Convert button click
|
| 200 |
+
convert_btn.click(
|
| 201 |
+
fn=convert_to_anime,
|
| 202 |
inputs=inputs,
|
| 203 |
outputs=outputs
|
| 204 |
)
|