alex commited on
Commit
a1266db
Β·
1 Parent(s): 0675e33

duration instead

Browse files
Files changed (1) hide show
  1. app.py +27 -25
app.py CHANGED
@@ -26,7 +26,7 @@ try:
26
  print("download sam")
27
  sam_dir = snapshot_download(repo_id="alexnasa/sam2")
28
 
29
- @spaces.GPU(duration=450)
30
  def install_sam():
31
  os.environ["TORCH_CUDA_ARCH_LIST"] = "9.0"
32
  sh(f"cd {sam_dir} && python setup.py build_ext --inplace && pip install -e .")
@@ -62,7 +62,7 @@ rc_mapping = {
62
  }
63
 
64
 
65
- def preprocess_video(input_video_path, session_id=None):
66
 
67
  if session_id is None:
68
  session_id = uuid.uuid4().hex
@@ -72,7 +72,7 @@ def preprocess_video(input_video_path, session_id=None):
72
 
73
  process_video_path = os.path.join(output_dir, 'input_video.mp4')
74
 
75
- convert_video_to_30fps_and_clip(input_video_path, process_video_path, crop_width=720, crop_height=1280)
76
 
77
  return process_video_path
78
 
@@ -207,9 +207,9 @@ def get_frames_count(video_file):
207
 
208
  return orig_frame_count
209
 
210
- def calculate_time_required(input_video, rc_bool):
211
 
212
- frames_count = get_frames_count(input_video)
213
 
214
  chunks = frames_count // 77 + 1
215
 
@@ -218,32 +218,29 @@ def calculate_time_required(input_video, rc_bool):
218
  pose2d_tracking_duration_s = 75
219
  iteration_per_step_s = 13
220
  else:
221
- pose2d_tracking_duration_s = 75
222
  iteration_per_step_s = 12
223
 
224
  time_required = pose2d_tracking_duration_s + iteration_per_step_s * 10 * chunks
225
  print(f'for frames_count:{frames_count} doing {chunks} chunks the time_required is {time_required}')
226
  return time_required
227
 
228
- def update_time_required(input_video, rc_str):
229
 
230
- if input_video is None:
231
- return gr.update(value="⌚ Zero GPU Required: --")
232
-
233
  rc_bool = rc_mapping[rc_str]
234
 
235
- duration_s = calculate_time_required(input_video, rc_bool)
236
  duration_m = duration_s / 60
237
 
238
  return gr.update(value=f"⌚ Zero GPU Required: ~{duration_s}.0s ({duration_m:.1f} mins)")
239
 
240
- def get_duration(input_video, edited_frame, rc_bool, session_id, progress):
241
 
242
- return calculate_time_required(input_video, rc_bool)
243
 
244
 
245
  @spaces.GPU(duration=get_duration)
246
- def _animate(input_video, edited_frame, rc_bool, session_id = None, progress=gr.Progress(track_tqdm=True),):
247
 
248
  if session_id is None:
249
  session_id = uuid.uuid4().hex
@@ -301,7 +298,7 @@ def _animate(input_video, edited_frame, rc_bool, session_id = None, progress=gr.
301
 
302
  return output_video_path
303
 
304
- def animate_scene(input_video, edited_frame, rc_str, session_id = None, progress=gr.Progress(track_tqdm=True),):
305
 
306
  if not input_video:
307
  raise gr.Error("Please provide an video")
@@ -312,8 +309,11 @@ def animate_scene(input_video, edited_frame, rc_str, session_id = None, progress
312
  if session_id is None:
313
  session_id = uuid.uuid4().hex
314
 
 
 
315
  rc_bool = rc_mapping[rc_str]
316
 
 
317
  output_dir = os.path.join(os.environ["PROCESSED_RESULTS"], session_id)
318
  os.makedirs(output_dir, exist_ok=True)
319
 
@@ -324,8 +324,8 @@ def animate_scene(input_video, edited_frame, rc_str, session_id = None, progress
324
  edited_frame_png = os.path.join(output_dir, 'edited_frame.png')
325
  edited_frame_img = Image.open(edited_frame)
326
  edited_frame_img.save(edited_frame_png)
327
-
328
- output_video_path = _animate(input_video, edited_frame, rc_bool, session_id, progress)
329
 
330
  final_video_path = os.path.join(output_dir, 'final_result.mp4')
331
 
@@ -345,6 +345,7 @@ def animate_scene(input_video, edited_frame, rc_str, session_id = None, progress
345
  combine_video_and_audio_ffmpeg(output_video_path, input_audio_path, final_video_path)
346
  else:
347
  final_video_path = output_video_path
 
348
  return final_video_path, pose_video, bg_video, mask_video, face_video
349
 
350
  css = """
@@ -480,7 +481,8 @@ with gr.Blocks(css=css, title="Wan 2.2 Animate --replace", theme=gr.themes.Ocean
480
  </div>
481
  """)
482
  input_video = gr.Video(label="Input Video", height=512)
483
- trim_button = gr.Button("Trim to 2s")
 
484
 
485
  with gr.Column(elem_id="step-column"):
486
  gr.HTML("""
@@ -520,32 +522,32 @@ with gr.Blocks(css=css, title="Wan 2.2 Animate --replace", theme=gr.themes.Ocean
520
 
521
  gr.Examples(
522
  examples=[
523
-
524
  [
525
  "./examples/paul.mp4",
 
526
  "./examples/man.png",
527
  "Video β†’ Ref Image"
528
  ],
529
 
530
  [
531
  "./examples/desi.mp4",
 
532
  "./examples/desi.png",
533
  "Video ← Ref Image"
534
  ],
535
 
536
 
537
-
538
  ],
539
- inputs=[input_video, edited_frame, replace_character_string],
540
  outputs=[output_video, pose_video, bg_video, mask_video, face_video],
541
  fn=animate_scene,
542
  cache_examples=True,
543
  )
544
 
545
- action_button.click(fn=animate_scene, inputs=[input_video, edited_frame, replace_character_string, session_state], outputs=[output_video, pose_video, bg_video, mask_video, face_video])
546
- input_video.change(update_time_required, inputs=[input_video, replace_character_string], outputs=[time_required])
547
- trim_button.click(preprocess_video, inputs=[input_video, session_state], outputs=[input_video])
548
- replace_character_string.change(update_time_required, inputs=[input_video, replace_character_string], outputs=[time_required])
549
 
550
  if __name__ == "__main__":
551
  demo.queue()
 
26
  print("download sam")
27
  sam_dir = snapshot_download(repo_id="alexnasa/sam2")
28
 
29
+ @spaces.GPU(duration=500)
30
  def install_sam():
31
  os.environ["TORCH_CUDA_ARCH_LIST"] = "9.0"
32
  sh(f"cd {sam_dir} && python setup.py build_ext --inplace && pip install -e .")
 
62
  }
63
 
64
 
65
+ def preprocess_video(input_video_path, duration, session_id=None):
66
 
67
  if session_id is None:
68
  session_id = uuid.uuid4().hex
 
72
 
73
  process_video_path = os.path.join(output_dir, 'input_video.mp4')
74
 
75
+ convert_video_to_30fps_and_clip(input_video_path, process_video_path, duration_s=duration, crop_width=720, crop_height=1280)
76
 
77
  return process_video_path
78
 
 
207
 
208
  return orig_frame_count
209
 
210
+ def calculate_time_required(max_duration_s, rc_bool):
211
 
212
+ frames_count = 30 * max_duration_s
213
 
214
  chunks = frames_count // 77 + 1
215
 
 
218
  pose2d_tracking_duration_s = 75
219
  iteration_per_step_s = 13
220
  else:
221
+ pose2d_tracking_duration_s = 50
222
  iteration_per_step_s = 12
223
 
224
  time_required = pose2d_tracking_duration_s + iteration_per_step_s * 10 * chunks
225
  print(f'for frames_count:{frames_count} doing {chunks} chunks the time_required is {time_required}')
226
  return time_required
227
 
228
+ def update_time_required(max_duration_s, rc_str):
229
 
 
 
 
230
  rc_bool = rc_mapping[rc_str]
231
 
232
+ duration_s = calculate_time_required(max_duration_s, rc_bool)
233
  duration_m = duration_s / 60
234
 
235
  return gr.update(value=f"⌚ Zero GPU Required: ~{duration_s}.0s ({duration_m:.1f} mins)")
236
 
237
+ def get_duration(input_video, max_duration_s, edited_frame, rc_bool, session_id, progress):
238
 
239
+ return calculate_time_required(max_duration_s, rc_bool)
240
 
241
 
242
  @spaces.GPU(duration=get_duration)
243
+ def _animate(input_video, max_duration_s, edited_frame, rc_bool, session_id = None, progress=gr.Progress(track_tqdm=True),):
244
 
245
  if session_id is None:
246
  session_id = uuid.uuid4().hex
 
298
 
299
  return output_video_path
300
 
301
+ def animate_scene(input_video, max_duration_s, edited_frame, rc_str, session_id = None, progress=gr.Progress(track_tqdm=True),):
302
 
303
  if not input_video:
304
  raise gr.Error("Please provide an video")
 
309
  if session_id is None:
310
  session_id = uuid.uuid4().hex
311
 
312
+ input_video = preprocess_video(input_video, max_duration_s, session_id)
313
+
314
  rc_bool = rc_mapping[rc_str]
315
 
316
+
317
  output_dir = os.path.join(os.environ["PROCESSED_RESULTS"], session_id)
318
  os.makedirs(output_dir, exist_ok=True)
319
 
 
324
  edited_frame_png = os.path.join(output_dir, 'edited_frame.png')
325
  edited_frame_img = Image.open(edited_frame)
326
  edited_frame_img.save(edited_frame_png)
327
+
328
+ output_video_path = _animate(input_video, max_duration_s, edited_frame_png, rc_bool, session_id, progress)
329
 
330
  final_video_path = os.path.join(output_dir, 'final_result.mp4')
331
 
 
345
  combine_video_and_audio_ffmpeg(output_video_path, input_audio_path, final_video_path)
346
  else:
347
  final_video_path = output_video_path
348
+
349
  return final_video_path, pose_video, bg_video, mask_video, face_video
350
 
351
  css = """
 
481
  </div>
482
  """)
483
  input_video = gr.Video(label="Input Video", height=512)
484
+ max_duration_slider = gr.Slider(2, 8, 2, step=2, label="Max Duration")
485
+
486
 
487
  with gr.Column(elem_id="step-column"):
488
  gr.HTML("""
 
522
 
523
  gr.Examples(
524
  examples=[
525
+
526
  [
527
  "./examples/paul.mp4",
528
+ 2,
529
  "./examples/man.png",
530
  "Video β†’ Ref Image"
531
  ],
532
 
533
  [
534
  "./examples/desi.mp4",
535
+ 2,
536
  "./examples/desi.png",
537
  "Video ← Ref Image"
538
  ],
539
 
540
 
 
541
  ],
542
+ inputs=[input_video, max_duration_slider, edited_frame, replace_character_string],
543
  outputs=[output_video, pose_video, bg_video, mask_video, face_video],
544
  fn=animate_scene,
545
  cache_examples=True,
546
  )
547
 
548
+ action_button.click(fn=animate_scene, inputs=[input_video, max_duration_slider, edited_frame, replace_character_string, session_state], outputs=[output_video, pose_video, bg_video, mask_video, face_video])
549
+ max_duration_slider.change(update_time_required, inputs=[max_duration_slider, replace_character_string], outputs=[time_required])
550
+ replace_character_string.change(update_time_required, inputs=[max_duration_slider, replace_character_string], outputs=[time_required])
 
551
 
552
  if __name__ == "__main__":
553
  demo.queue()