yonigozlan HF Staff commited on
Commit
fee29b3
·
1 Parent(s): 519508e

test propagate with return only

Browse files
Files changed (1) hide show
  1. app.py +14 -9
app.py CHANGED
@@ -512,8 +512,8 @@ def on_image_click(
512
  @spaces.GPU()
513
  def propagate_masks(GLOBAL_STATE: gr.State):
514
  if GLOBAL_STATE is None or GLOBAL_STATE.inference_session is None:
515
- yield "Load a video first.", gr.update()
516
- return
517
 
518
  processor = GLOBAL_STATE.processor
519
  model = GLOBAL_STATE.model
@@ -527,7 +527,7 @@ def propagate_masks(GLOBAL_STATE: gr.State):
527
  processed = 0
528
 
529
  # Initial status; no slider change yet
530
- yield f"Propagating masks: {processed}/{total}", gr.update()
531
 
532
  last_frame_idx = 0
533
  with torch.inference_mode():
@@ -550,10 +550,10 @@ def propagate_masks(GLOBAL_STATE: gr.State):
550
 
551
  processed += 1
552
  # Every 15th frame (or last), move slider to current frame to update preview via slider binding
553
- if processed % 15 == 0 or processed == total:
554
- yield f"Propagating masks: {processed}/{total}", gr.update(value=frame_idx)
555
- else:
556
- yield f"Propagating masks: {processed}/{total}", gr.update()
557
 
558
  model.to("cpu")
559
  inference_session.inference_device = "cpu"
@@ -562,7 +562,12 @@ def propagate_masks(GLOBAL_STATE: gr.State):
562
  torch.cuda.empty_cache()
563
 
564
  # Final status; ensure slider points to last processed frame
565
- yield (
 
 
 
 
 
566
  f"Propagated masks across {processed} frames for {len(inference_session.obj_ids)} objects.",
567
  gr.update(value=last_frame_idx),
568
  )
@@ -826,7 +831,7 @@ with gr.Blocks(title="SAM2 Video (Transformers) - Interactive Segmentation", the
826
  propagate_btn.click(
827
  propagate_masks,
828
  inputs=[GLOBAL_STATE],
829
- outputs=[propagate_status, frame_slider],
830
  )
831
 
832
  reset_btn.click(
 
512
  @spaces.GPU()
513
  def propagate_masks(GLOBAL_STATE: gr.State):
514
  if GLOBAL_STATE is None or GLOBAL_STATE.inference_session is None:
515
+ # yield "Load a video first.", gr.update()
516
+ return GLOBAL_STATE, "Load a video first.", gr.update()
517
 
518
  processor = GLOBAL_STATE.processor
519
  model = GLOBAL_STATE.model
 
527
  processed = 0
528
 
529
  # Initial status; no slider change yet
530
+ # yield f"Propagating masks: {processed}/{total}", gr.update()
531
 
532
  last_frame_idx = 0
533
  with torch.inference_mode():
 
550
 
551
  processed += 1
552
  # Every 15th frame (or last), move slider to current frame to update preview via slider binding
553
+ # if processed % 15 == 0 or processed == total:
554
+ # yield f"Propagating masks: {processed}/{total}", gr.update(value=frame_idx)
555
+ # else:
556
+ # yield f"Propagating masks: {processed}/{total}", gr.update()
557
 
558
  model.to("cpu")
559
  inference_session.inference_device = "cpu"
 
562
  torch.cuda.empty_cache()
563
 
564
  # Final status; ensure slider points to last processed frame
565
+ # yield (
566
+ # f"Propagated masks across {processed} frames for {len(inference_session.obj_ids)} objects.",
567
+ # gr.update(value=last_frame_idx),
568
+ # )
569
+ return (
570
+ GLOBAL_STATE,
571
  f"Propagated masks across {processed} frames for {len(inference_session.obj_ids)} objects.",
572
  gr.update(value=last_frame_idx),
573
  )
 
831
  propagate_btn.click(
832
  propagate_masks,
833
  inputs=[GLOBAL_STATE],
834
+ outputs=[GLOBAL_STATE, propagate_status, frame_slider],
835
  )
836
 
837
  reset_btn.click(