x2XcarleX2x commited on
Commit
cd0db05
·
verified ·
1 Parent(s): d31ad86

Update app_wan.py

Browse files
Files changed (1) hide show
  1. app_wan.py +81 -254
app_wan.py CHANGED
@@ -1,268 +1,111 @@
1
- # app_wa
2
- import os
3
- # PyTorch 2.8 (temporary hack)
4
- os.system('pip install --upgrade --pre --extra-index-url https://download.pytorch.org/whl/nightly/cu126 "torch<2.9" spaces')
5
 
6
- # --- 1. Model Download and Setup (Diffusers Backend) ---
 
 
7
 
8
- import torch
9
- from diffusers import FlowMatchEulerDiscreteScheduler
10
- from diffusers.pipelines.wan.pipeline_wan_i2v import WanImageToVideoPipeline
11
- from diffusers.models.transformers.transformer_wan import WanTransformer3DModel
12
- from diffusers.utils.export_utils import export_to_video
13
  import gradio as gr
14
- import tempfile
15
- import numpy as np
16
  from PIL import Image
17
- import random
18
- import gc
19
- from gradio_client import Client, handle_file # Import for API call
20
 
21
- # --- Constants and Model Loading ---
22
  MODEL_ID = "Wan-AI/Wan2.2-I2V-A14B-Diffusers"
23
-
24
- # --- NEW: Flexible Dimension Constants ---
25
  MAX_DIMENSION = 832
26
  MIN_DIMENSION = 480
27
  DIMENSION_MULTIPLE = 16
28
  SQUARE_SIZE = 480
29
-
30
  MAX_SEED = np.iinfo(np.int32).max
31
-
32
  FIXED_FPS = 16
33
  MIN_FRAMES_MODEL = 8
34
  MAX_FRAMES_MODEL = 81
 
 
 
35
 
36
- MIN_DURATION = round(MIN_FRAMES_MODEL/FIXED_FPS, 1)
37
- MAX_DURATION = round(MAX_FRAMES_MODEL/FIXED_FPS, 1)
38
-
39
- default_negative_prompt = "色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走,过曝,"
40
-
41
- print("Loading models into memory. This may take a few minutes...")
42
-
43
- pipe = WanImageToVideoPipeline.from_pretrained(
44
- MODEL_ID,
45
- transformer=WanTransformer3DModel.from_pretrained('cbensimon/Wan2.2-I2V-A14B-bf16-Diffusers',
46
- subfolder='transformer',
47
- torch_dtype=torch.bfloat16,
48
- device_map='auto',
49
- ),
50
- transformer_2=WanTransformer3DModel.from_pretrained('cbensimon/Wan2.2-I2V-A14B-bf16-Diffusers',
51
- subfolder='transformer_2',
52
- torch_dtype=torch.bfloat16,
53
- device_map='auto',
54
- ),
55
- torch_dtype=torch.bfloat16,
56
- )
57
- pipe.scheduler = FlowMatchEulerDiscreteScheduler.from_config(pipe.scheduler.config, shift=32.0)
58
-
59
-
60
-
61
-
62
-
63
-
64
-
65
- # ====================================================================================
66
- # A fusão do LoRA "Lightning" é ESSENCIAL para a geração em 8 passos.
67
- # Trazemos essa lógica para cá, mantendo a otimização completa desativada.
68
- # ====================================================================================
69
-
70
- print("Applying 8-step Lightning LoRA...")
71
- try:
72
- # Carrega os pesos do LoRA para os dois transformadores
73
- pipe.load_lora_weights(
74
- "Kijai/WanVideo_comfy",
75
- weight_name="Lightx2v/lightx2v_I2V_14B_480p_cfg_step_distill_rank128_bf16.safetensors",
76
- adapter_name="lightx2v"
77
- )
78
- kwargs_lora = {"load_into_transformer_2": True}
79
- pipe.load_lora_weights(
80
- "Kijai/WanVideo_comfy",
81
- weight_name="Lightx2v/lightx2v_I2V_14B_480p_cfg_step_distill_rank128_bf16.safetensors",
82
- adapter_name="lightx2v_2", **kwargs_lora
83
- )
84
-
85
- # Define como os adaptadores LoRA serão combinados
86
- pipe.set_adapters(["lightx2v", "lightx2v_2"], adapter_weights=[1.0, 1.0])
87
-
88
- # Funde os LoRAs diretamente nos pesos do modelo para acelerar a inferência.
89
- print("Fusing LoRA weights into the main model...")
90
- pipe.fuse_lora(adapter_names=["lightx2v"], lora_scale=3.0, components=["transformer"])
91
- pipe.fuse_lora(adapter_names=["lightx2v_2"], lora_scale=1.0, components=["transformer_2"])
92
-
93
- # Descarrega os pesos LoRA da memória, pois eles já foram incorporados.
94
- pipe.unload_lora_weights()
95
-
96
- print("Lightning LoRA successfully fused. Model is ready for fast 8-step generation.")
97
-
98
- except Exception as e:
99
- print(f"AVISO: Falha ao carregar ou fundir o LoRA. A geração pode ser lenta ou de baixa qualidade. Erro: {e}")
100
-
101
- print("All models loaded. Gradio app is ready.")
102
-
103
 
104
- # --- 2. Image Processing and Application Logic ---
105
- def generate_end_frame(start_img, gen_prompt, progress=gr.Progress(track_tqdm=True)):
106
- """Calls an external Gradio API to generate an image."""
107
- if start_img is None:
108
- raise gr.Error("Please provide a Start Frame first.")
109
 
110
- hf_token = os.getenv("HF_TOKEN")
111
- if not hf_token:
112
- raise gr.Error("HF_TOKEN not found in environment variables. Please set it in your Space secrets.")
113
-
114
- with tempfile.NamedTemporaryFile(suffix=".png", delete=False) as tmpfile:
115
- start_img.save(tmpfile.name)
116
- tmp_path = tmpfile.name
117
-
118
- progress(0.1, desc="Connecting to image generation API...")
119
- client = Client("multimodalart/nano-banana")
120
-
121
- progress(0.5, desc=f"Generating with prompt: '{gen_prompt}'...")
122
- try:
123
- result = client.predict(
124
- prompt=gen_prompt,
125
- images=[
126
- {"image": handle_file(tmp_path)}
127
- ],
128
- manual_token=hf_token,
129
- api_name="/unified_image_generator"
130
- )
131
- finally:
132
- os.remove(tmp_path)
133
-
134
- progress(1.0, desc="Done!")
135
- print(result)
136
- return result
137
-
138
- def switch_to_upload_tab():
139
- """Returns a gr.Tabs update to switch to the first tab."""
140
- return gr.Tabs(selected="upload_tab")
141
-
142
-
143
- def process_image_for_video(image: Image.Image) -> Image.Image:
144
- """
145
- Resizes an image based on the following rules for video generation.
146
- """
147
- width, height = image.size
148
- if width == height:
149
- return image.resize((SQUARE_SIZE, SQUARE_SIZE), Image.Resampling.LANCZOS)
150
- aspect_ratio = width / height
151
- new_width, new_height = width, height
152
- if new_width > MAX_DIMENSION or new_height > MAX_DIMENSION:
153
- if aspect_ratio > 1: scale = MAX_DIMENSION / new_width
154
- else: scale = MAX_DIMENSION / new_height
155
- new_width *= scale; new_height *= scale
156
- if new_width < MIN_DIMENSION or new_height < MIN_DIMENSION:
157
- if aspect_ratio > 1: scale = MIN_DIMENSION / new_height
158
- else: scale = MIN_DIMENSION / new_width
159
- new_width *= scale; new_height *= scale
160
- final_width = int(round(new_width / DIMENSION_MULTIPLE) * DIMENSION_MULTIPLE)
161
- final_height = int(round(new_height / DIMENSION_MULTIPLE) * DIMENSION_MULTIPLE)
162
- final_width = max(final_width, MIN_DIMENSION if aspect_ratio < 1 else SQUARE_SIZE)
163
- final_height = max(final_height, MIN_DIMENSION if aspect_ratio > 1 else SQUARE_SIZE)
164
- return image.resize((final_width, final_height), Image.Resampling.LANCZOS)
165
-
166
- def resize_and_crop_to_match(target_image, reference_image):
167
- """Resizes and center-crops the target image to match the reference image's dimensions."""
168
- ref_width, ref_height = reference_image.size
169
- target_width, target_height = target_image.size
170
- scale = max(ref_width / target_width, ref_height / target_height)
171
- new_width, new_height = int(target_width * scale), int(target_height * scale)
172
- resized = target_image.resize((new_width, new_height), Image.Resampling.LANCZOS)
173
- left, top = (new_width - ref_width) // 2, (new_height - ref_height) // 2
174
- return resized.crop((left, top, left + ref_width, top + ref_height))
175
-
176
-
177
- def generate_video(
178
  start_image_pil,
 
 
179
  end_image_pil,
 
 
180
  prompt,
181
  negative_prompt=default_negative_prompt,
182
  duration_seconds=2.1,
183
  steps=8,
184
- guidance_scale=1,
185
- guidance_scale_2=1,
186
  seed=42,
187
  randomize_seed=False,
188
  progress=gr.Progress(track_tqdm=True)
189
  ):
190
- """
191
- Generates a video by interpolating between a start and end image, guided by a text prompt.
192
- """
193
- if start_image_pil is None or end_image_pil is None:
194
- raise gr.Error("Please upload both a start and an end image.")
195
-
196
- progress(0.1, desc="Preprocessing images...")
197
- processed_start_image = process_image_for_video(start_image_pil)
198
- processed_end_image = resize_and_crop_to_match(end_image_pil, processed_start_image)
199
- target_height, target_width = processed_start_image.height, processed_start_image.width
200
-
201
- current_seed = random.randint(0, MAX_SEED) if randomize_seed else int(seed)
202
- num_frames = np.clip(int(round(duration_seconds * FIXED_FPS)), MIN_FRAMES_MODEL, MAX_FRAMES_MODEL)
203
-
204
- progress(0.2, desc=f"Generating {num_frames} frames at {target_width}x{target_height} (seed: {current_seed})...")
205
-
206
- # CORREÇÃO FINAL: O gerador é criado na CPU (padrão) para evitar o erro de dispositivo 'meta'.
207
- # A pipeline cuidará de mover os latentes para a GPU.
208
- generator = torch.Generator().manual_seed(current_seed)
209
-
210
- output_frames_list = pipe(
211
- image=processed_start_image,
212
- last_image=processed_end_image,
213
  prompt=prompt,
214
  negative_prompt=negative_prompt,
215
- height=target_height,
216
- width=target_width,
217
- num_frames=num_frames,
218
- guidance_scale=float(guidance_scale),
219
- guidance_scale_2=float(guidance_scale_2),
220
- num_inference_steps=int(steps),
221
- generator=generator,
222
- ).frames[0]
223
-
224
- progress(0.9, desc="Encoding and saving video...")
225
- with tempfile.NamedTemporaryFile(suffix=".mp4", delete=False) as tmpfile:
226
- video_path = tmpfile.name
227
- export_to_video(output_frames_list, video_path, fps=FIXED_FPS)
228
-
229
- progress(1.0, desc="Done!")
230
- return video_path, current_seed
231
-
232
-
233
- # --- 3. Gradio User Interface ---
234
 
235
- css = '''
236
- .fillable{max-width: 1100px !important}
237
- .dark .progress-text {color: white}
238
- #general_items{margin-top: 2em}
239
- #group_all{overflow:visible}
240
- #group_all .styler{overflow:visible}
241
- #group_tabs .tabitem{padding: 0}
242
- .tab-wrapper{margin-top: -33px;z-index: 999;position: absolute;width: 100%;background-color: var(--block-background-fill);padding: 0;}
243
- #component-9-button{width: 50%;justify-content: center}
244
- #component-11-button{width: 50%;justify-content: center}
245
- #or_item{text-align: center; padding-top: 1em; padding-bottom: 1em; font-size: 1.1em;margin-left: .5em;margin-right: .5em;width: calc(100% - 1em)}
246
- #fivesec{margin-top: 5em;margin-left: .5em;margin-right: .5em;width: calc(100% - 1em)}
247
- '''
248
  with gr.Blocks(theme=gr.themes.Citrus(), css=css) as app:
249
  gr.Markdown("# Wan 2.2 Aduca-sdr")
250
-
251
  with gr.Row(elem_id="general_items"):
252
  with gr.Column():
253
  with gr.Group(elem_id="group_all"):
254
  with gr.Row():
255
- start_image = gr.Image(type="pil", label="Start Frame", sources=["upload", "clipboard"])
 
 
 
256
  with gr.Tabs(elem_id="group_tabs") as tabs:
257
  with gr.TabItem("Upload", id="upload_tab"):
258
- end_image = gr.Image(type="pil", label="End Frame", sources=["upload", "clipboard"])
 
 
 
259
  with gr.TabItem("Generate", id="generate_tab"):
260
  generate_5seconds = gr.Button("Generate scene 5 seconds in the future", elem_id="fivesec")
261
- gr.Markdown("Generate a custom end-frame with an edit model like [Nano Banana](https://huggingface.co/spaces/multimodalart/nano-banana) or [Qwen Image Edit](https://huggingface.co/spaces/multimodalart/Qwen-Image-Edit-Fast)", elem_id="or_item")
 
 
 
262
  prompt = gr.Textbox(label="Prompt", info="Describe the transition between the two images")
263
 
264
  with gr.Accordion("Advanced Settings", open=False):
265
- duration_seconds_input = gr.Slider(minimum=MIN_DURATION, maximum=MAX_DURATION, step=0.1, value=2.1, label="Video Duration (seconds)", info=f"Clamped to model's {MIN_FRAMES_MODEL}-{MAX_FRAMES_MODEL} frames at {FIXED_FPS}fps.")
266
  negative_prompt_input = gr.Textbox(label="Negative Prompt", value=default_negative_prompt, lines=3)
267
  steps_slider = gr.Slider(minimum=1, maximum=30, step=1, value=8, label="Inference Steps")
268
  guidance_scale_input = gr.Slider(minimum=0.0, maximum=10.0, step=0.5, value=1.0, label="Guidance Scale - high noise")
@@ -276,29 +119,35 @@ with gr.Blocks(theme=gr.themes.Citrus(), css=css) as app:
276
  with gr.Column():
277
  output_video = gr.Video(label="Generated Video", autoplay=True)
278
 
 
279
  ui_inputs = [
280
- start_image, end_image, prompt, negative_prompt_input, duration_seconds_input,
281
- steps_slider, guidance_scale_input, guidance_scale_2_input, seed_input,
282
- randomize_seed_checkbox
 
 
283
  ]
284
  ui_outputs = [output_video, seed_input]
285
 
286
- generate_button.click(fn=generate_video, inputs=ui_inputs, outputs=ui_outputs)
287
 
 
288
  generate_5seconds.click(
289
- fn=switch_to_upload_tab,
290
- inputs=None,
291
- outputs=[tabs]
292
  ).then(
293
- fn=lambda img: generate_end_frame(img, "this image is a still frame from a movie. generate a new frame with what happens on this scene 5 seconds in the future"),
 
 
 
294
  inputs=[start_image],
295
  outputs=[end_image]
296
  ).success(
297
- fn=generate_video,
298
  inputs=ui_inputs,
299
  outputs=ui_outputs
300
  )
301
 
 
302
  gr.Examples(
303
  examples=[
304
  ["poli_tower.png", "tower_takes_off.png", "the man turns around"],
@@ -307,28 +156,6 @@ with gr.Blocks(theme=gr.themes.Citrus(), css=css) as app:
307
  ],
308
  inputs=[start_image, end_image, prompt],
309
  outputs=ui_outputs,
310
- fn=generate_video,
311
  cache_examples="lazy",
312
  )
313
-
314
-
315
-
316
-
317
- if __name__ == "__main__":
318
- # Cria a pasta e as imagens de exemplo se não existirem
319
- if not os.path.exists("examples"):
320
- os.makedirs("examples")
321
- try:
322
- Image.new('RGB', (832, 480), color = (73, 109, 137)).save("examples/frame_1.png")
323
- Image.new('RGB', (832, 480), color = (173, 109, 237)).save("examples/frame_2.png")
324
- Image.new('RGB', (832, 480), color = (255, 255, 0)).save("examples/frame_3.png")
325
- except:
326
- pass # Evita que o app quebre se não tiver permissão de escrita
327
-
328
- os.makedirs("deformes_workspace", exist_ok=True)
329
- #logger.info("Aplicação Gradio pronta. Lançando interface...")
330
- app.launch(
331
- debug=True,
332
- server_name="0.0.0.0",
333
- server_port=int(os.getenv("PORT", "7860")),
334
- )
 
1
+ # app_wan.py (trechos essenciais)
 
 
 
2
 
3
+ import os
4
+ # (mantenha o hack de instalação se necessário)
5
+ #os.system('pip install --upgrade --pre --extra-index-url https://download.pytorch.org/whl/nightly/cu126 "torch<2.9" spaces')
6
 
 
 
 
 
 
7
  import gradio as gr
 
 
8
  from PIL import Image
9
+ import numpy as np
10
+ import tempfile
 
11
 
12
+ # === mantém as constantes de UI exatamente como no arquivo atual ===
13
  MODEL_ID = "Wan-AI/Wan2.2-I2V-A14B-Diffusers"
 
 
14
  MAX_DIMENSION = 832
15
  MIN_DIMENSION = 480
16
  DIMENSION_MULTIPLE = 16
17
  SQUARE_SIZE = 480
 
18
  MAX_SEED = np.iinfo(np.int32).max
 
19
  FIXED_FPS = 16
20
  MIN_FRAMES_MODEL = 8
21
  MAX_FRAMES_MODEL = 81
22
+ MIN_DURATION = round(MIN_FRAMES_MODEL / FIXED_FPS, 1)
23
+ MAX_DURATION = round(MAX_FRAMES_MODEL / FIXED_FPS, 1)
24
+ default_negative_prompt = "..." # mesmo valor do arquivo original
25
 
26
+ # === novo: importar e instanciar o serviço ===
27
+ from aduc_framework.managers.wan_manager import WanManager
28
+ wan_manager = WanManager()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
29
 
30
+ # (opcional) manter generate_end_frame e switch_to_upload_tab como estão
31
+ # def generate_end_frame(...): ... # inalterado
32
+ # def switch_to_upload_tab(): ... # inalterado
 
 
33
 
34
+ # Wrapper fino: a UI constrói images_condition_items e delega ao serviço
35
+ def ui_generate_video(
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
36
  start_image_pil,
37
+ start_frame_text,
38
+ start_peso,
39
  end_image_pil,
40
+ end_frame_text,
41
+ end_peso,
42
  prompt,
43
  negative_prompt=default_negative_prompt,
44
  duration_seconds=2.1,
45
  steps=8,
46
+ guidance_scale=1.0,
47
+ guidance_scale_2=1.0,
48
  seed=42,
49
  randomize_seed=False,
50
  progress=gr.Progress(track_tqdm=True)
51
  ):
52
+ # Conversão robusta básica (UI “burra”, sem semântica adicional)
53
+ def to_int_safe(v, default=0):
54
+ try:
55
+ return int(v)
56
+ except:
57
+ return default
58
+ def to_float_safe(v, default=1.0):
59
+ try:
60
+ return float(v)
61
+ except:
62
+ return default
63
+
64
+ images_condition_items = [
65
+ [start_image_pil, to_int_safe(start_frame_text, 0), to_float_safe(start_peso, 1.0)],
66
+ [end_image_pil, to_int_safe(end_frame_text, 0), to_float_safe(end_peso, 1.0)],
67
+ ]
68
+ return wan_manager.generate_video_from_conditions(
69
+ images_condition_items=images_condition_items,
 
 
 
 
 
70
  prompt=prompt,
71
  negative_prompt=negative_prompt,
72
+ duration_seconds=duration_seconds,
73
+ steps=steps,
74
+ guidance_scale=guidance_scale,
75
+ guidance_scale_2=guidance_scale_2,
76
+ seed=seed,
77
+ randomize_seed=randomize_seed,
78
+ )
 
 
 
 
 
 
 
 
 
 
 
 
79
 
80
+ # === UI Gradio (adicionar frame/peso abaixo de cada imagem) ===
81
+ css = ''' ... (mantém igual) ... '''
 
 
 
 
 
 
 
 
 
 
 
82
  with gr.Blocks(theme=gr.themes.Citrus(), css=css) as app:
83
  gr.Markdown("# Wan 2.2 Aduca-sdr")
84
+
85
  with gr.Row(elem_id="general_items"):
86
  with gr.Column():
87
  with gr.Group(elem_id="group_all"):
88
  with gr.Row():
89
+ with gr.Column():
90
+ start_image = gr.Image(type="pil", label="Start Frame", sources=["upload", "clipboard"])
91
+ start_frame_tb = gr.Textbox(label="Start Frame (int)", value="0")
92
+ start_peso_sl = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, value=1.0, label="Start Peso")
93
  with gr.Tabs(elem_id="group_tabs") as tabs:
94
  with gr.TabItem("Upload", id="upload_tab"):
95
+ with gr.Column():
96
+ end_image = gr.Image(type="pil", label="End Frame", sources=["upload", "clipboard"])
97
+ end_frame_tb = gr.Textbox(label="End Frame (int)", value="80")
98
+ end_peso_sl = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, value=1.0, label="End Peso")
99
  with gr.TabItem("Generate", id="generate_tab"):
100
  generate_5seconds = gr.Button("Generate scene 5 seconds in the future", elem_id="fivesec")
101
+ gr.Markdown(
102
+ "Generate a custom end-frame with an edit model like Nano Banana or Qwen Image Edit",
103
+ elem_id="or_item"
104
+ )
105
  prompt = gr.Textbox(label="Prompt", info="Describe the transition between the two images")
106
 
107
  with gr.Accordion("Advanced Settings", open=False):
108
+ duration_seconds_input = gr.Slider(minimum=MIN_DURATION, maximum=MAX_DURATION, step=0.1, value=2.1, label="Video Duration (seconds)")
109
  negative_prompt_input = gr.Textbox(label="Negative Prompt", value=default_negative_prompt, lines=3)
110
  steps_slider = gr.Slider(minimum=1, maximum=30, step=1, value=8, label="Inference Steps")
111
  guidance_scale_input = gr.Slider(minimum=0.0, maximum=10.0, step=0.5, value=1.0, label="Guidance Scale - high noise")
 
119
  with gr.Column():
120
  output_video = gr.Video(label="Generated Video", autoplay=True)
121
 
122
+ # Inputs/outputs atualizados para o wrapper
123
  ui_inputs = [
124
+ start_image, start_frame_tb, start_peso_sl,
125
+ end_image, end_frame_tb, end_peso_sl,
126
+ prompt, negative_prompt_input, duration_seconds_input,
127
+ steps_slider, guidance_scale_input, guidance_scale_2_input,
128
+ seed_input, randomize_seed_checkbox
129
  ]
130
  ui_outputs = [output_video, seed_input]
131
 
132
+ generate_button.click(fn=ui_generate_video, inputs=ui_inputs, outputs=ui_outputs)
133
 
134
+ # Cadeia “5 seconds”: mantém a lógica, apenas troca a função final para ui_generate_video
135
  generate_5seconds.click(
136
+ fn=switch_to_upload_tab, inputs=None, outputs=[tabs]
 
 
137
  ).then(
138
+ fn=lambda img: generate_end_frame(
139
+ img,
140
+ "this image is a still frame from a movie. generate a new frame with what happens on this scene 5 seconds in the future"
141
+ ),
142
  inputs=[start_image],
143
  outputs=[end_image]
144
  ).success(
145
+ fn=ui_generate_video,
146
  inputs=ui_inputs,
147
  outputs=ui_outputs
148
  )
149
 
150
+ # Examples permanecem; os campos de frame/peso usam defaults
151
  gr.Examples(
152
  examples=[
153
  ["poli_tower.png", "tower_takes_off.png", "the man turns around"],
 
156
  ],
157
  inputs=[start_image, end_image, prompt],
158
  outputs=ui_outputs,
159
+ fn=ui_generate_video,
160
  cache_examples="lazy",
161
  )