Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
|
@@ -18,8 +18,8 @@ import aoti
|
|
| 18 |
# =========================================================
|
| 19 |
# MODEL CONFIGURATION
|
| 20 |
# =========================================================
|
| 21 |
-
MODEL_ID = "Wan-AI/Wan2.2-I2V-A14B-Diffusers"
|
| 22 |
-
HF_TOKEN = os.environ.get("HF_TOKEN")
|
| 23 |
|
| 24 |
MAX_DIM = 832
|
| 25 |
MIN_DIM = 480
|
|
@@ -38,6 +38,7 @@ MAX_DURATION = round(MAX_FRAMES_MODEL / FIXED_FPS, 1)
|
|
| 38 |
# =========================================================
|
| 39 |
# LOAD PIPELINE
|
| 40 |
# =========================================================
|
|
|
|
| 41 |
pipe = WanImageToVideoPipeline.from_pretrained(
|
| 42 |
MODEL_ID,
|
| 43 |
transformer=WanTransformer3DModel.from_pretrained(
|
|
@@ -60,6 +61,7 @@ pipe = WanImageToVideoPipeline.from_pretrained(
|
|
| 60 |
# =========================================================
|
| 61 |
# LOAD LORA ADAPTERS
|
| 62 |
# =========================================================
|
|
|
|
| 63 |
pipe.load_lora_weights(
|
| 64 |
"Kijai/WanVideo_comfy",
|
| 65 |
weight_name="Lightx2v/lightx2v_I2V_14B_480p_cfg_step_distill_rank128_bf16.safetensors",
|
|
@@ -80,10 +82,12 @@ pipe.unload_lora_weights()
|
|
| 80 |
# =========================================================
|
| 81 |
# QUANTIZATION & AOT OPTIMIZATION
|
| 82 |
# =========================================================
|
|
|
|
| 83 |
quantize_(pipe.text_encoder, Int8WeightOnlyConfig())
|
| 84 |
quantize_(pipe.transformer, Float8DynamicActivationFloat8WeightConfig())
|
| 85 |
quantize_(pipe.transformer_2, Float8DynamicActivationFloat8WeightConfig())
|
| 86 |
|
|
|
|
| 87 |
aoti.aoti_blocks_load(pipe.transformer, 'zerogpu-aoti/Wan2', variant='fp8da')
|
| 88 |
aoti.aoti_blocks_load(pipe.transformer_2, 'zerogpu-aoti/Wan2', variant='fp8da')
|
| 89 |
|
|
@@ -196,7 +200,26 @@ def generate_video(
|
|
| 196 |
# =========================================================
|
| 197 |
# GRADIO UI
|
| 198 |
# =========================================================
|
| 199 |
-
with gr.Blocks() as demo:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 200 |
gr.Markdown("# 🚀 Dream Wan 2.2 Faster Pro (14B) — Ultra Fast I2V with Lightning LoRA")
|
| 201 |
gr.Markdown("Optimized FP8 quantized pipeline with AoT blocks & 4-step fast inference ⚡")
|
| 202 |
|
|
@@ -246,4 +269,4 @@ with gr.Blocks() as demo:
|
|
| 246 |
)
|
| 247 |
|
| 248 |
if __name__ == "__main__":
|
| 249 |
-
demo.queue().launch(mcp_server=True)
|
|
|
|
| 18 |
# =========================================================
|
| 19 |
# MODEL CONFIGURATION
|
| 20 |
# =========================================================
|
| 21 |
+
MODEL_ID = "Wan-AI/Wan2.2-I2V-A14B-Diffusers"
|
| 22 |
+
HF_TOKEN = os.environ.get("HF_TOKEN")
|
| 23 |
|
| 24 |
MAX_DIM = 832
|
| 25 |
MIN_DIM = 480
|
|
|
|
| 38 |
# =========================================================
|
| 39 |
# LOAD PIPELINE
|
| 40 |
# =========================================================
|
| 41 |
+
print("Loading pipeline...")
|
| 42 |
pipe = WanImageToVideoPipeline.from_pretrained(
|
| 43 |
MODEL_ID,
|
| 44 |
transformer=WanTransformer3DModel.from_pretrained(
|
|
|
|
| 61 |
# =========================================================
|
| 62 |
# LOAD LORA ADAPTERS
|
| 63 |
# =========================================================
|
| 64 |
+
print("Loading LoRA adapters...")
|
| 65 |
pipe.load_lora_weights(
|
| 66 |
"Kijai/WanVideo_comfy",
|
| 67 |
weight_name="Lightx2v/lightx2v_I2V_14B_480p_cfg_step_distill_rank128_bf16.safetensors",
|
|
|
|
| 82 |
# =========================================================
|
| 83 |
# QUANTIZATION & AOT OPTIMIZATION
|
| 84 |
# =========================================================
|
| 85 |
+
print("Applying quantization...")
|
| 86 |
quantize_(pipe.text_encoder, Int8WeightOnlyConfig())
|
| 87 |
quantize_(pipe.transformer, Float8DynamicActivationFloat8WeightConfig())
|
| 88 |
quantize_(pipe.transformer_2, Float8DynamicActivationFloat8WeightConfig())
|
| 89 |
|
| 90 |
+
print("Loading AOTI blocks...")
|
| 91 |
aoti.aoti_blocks_load(pipe.transformer, 'zerogpu-aoti/Wan2', variant='fp8da')
|
| 92 |
aoti.aoti_blocks_load(pipe.transformer_2, 'zerogpu-aoti/Wan2', variant='fp8da')
|
| 93 |
|
|
|
|
| 200 |
# =========================================================
|
| 201 |
# GRADIO UI
|
| 202 |
# =========================================================
|
| 203 |
+
with gr.Blocks(theme=gr.themes.Soft()) as demo:
|
| 204 |
+
|
| 205 |
+
# --- ADVERTISEMENT BANNER FOR DREAM HUB PRO ---
|
| 206 |
+
gr.HTML("""
|
| 207 |
+
<div style="background: linear-gradient(90deg, #4f46e5, #9333ea); color: white; padding: 15px; border-radius: 10px; text-align: center; margin-bottom: 20px; box-shadow: 0 4px 15px rgba(0,0,0,0.1);">
|
| 208 |
+
<div style="display: flex; align-items: center; justify-content: center; gap: 20px; flex-wrap: wrap;">
|
| 209 |
+
<div style="text-align: left;">
|
| 210 |
+
<h3 style="margin: 0; font-weight: bold; font-size: 18px;">✨ New: Dream Hub Pro (All-in-One)</h3>
|
| 211 |
+
<p style="margin: 5px 0 0 0; opacity: 0.9; font-size: 14px;">Access all your pro tools (Wan2.1, Qwen, Audio, Video Enhance) in one place!</p>
|
| 212 |
+
</div>
|
| 213 |
+
<a href="https://huggingface.co/spaces/dream2589632147/Dream-Hub-Pro" target="_blank" style="text-decoration: none;">
|
| 214 |
+
<button style="background-color: white; color: #4f46e5; border: none; padding: 10px 25px; border-radius: 25px; font-weight: bold; cursor: pointer; transition: all 0.2s; font-size: 15px; box-shadow: 0 2px 5px rgba(0,0,0,0.2);">
|
| 215 |
+
🚀 Open Hub Pro Now
|
| 216 |
+
</button>
|
| 217 |
+
</a>
|
| 218 |
+
</div>
|
| 219 |
+
</div>
|
| 220 |
+
""")
|
| 221 |
+
# ---------------------------------------------
|
| 222 |
+
|
| 223 |
gr.Markdown("# 🚀 Dream Wan 2.2 Faster Pro (14B) — Ultra Fast I2V with Lightning LoRA")
|
| 224 |
gr.Markdown("Optimized FP8 quantized pipeline with AoT blocks & 4-step fast inference ⚡")
|
| 225 |
|
|
|
|
| 269 |
)
|
| 270 |
|
| 271 |
if __name__ == "__main__":
|
| 272 |
+
demo.queue().launch(mcp_server=True)
|