thankfulcarp commited on
Commit
2b02e16
·
1 Parent(s): 14301a8
Files changed (1) hide show
  1. app.py +10 -0
app.py CHANGED
@@ -386,6 +386,11 @@ def enhance_prompt_with_llm(prompt: str, enhancer_pipeline):
386
  traceback.print_exc()
387
  gr.Warning(f"An error occurred during prompt enhancement. See console for details.")
388
  return prompt
 
 
 
 
 
389
 
390
 
391
  @spaces.GPU(duration_from_args=get_t2v_duration)
@@ -470,6 +475,11 @@ def generate_t2v_video(
470
  print("✅ Cleanup complete. Pipeline reset to base LoRA state.")
471
  except Exception as e:
472
  print(f"⚠️ Error during LoRA cleanup: {e}. State may be inconsistent.")
 
 
 
 
 
473
 
474
 
475
  # --- 6. Gradio UI Layout ---
 
386
  traceback.print_exc()
387
  gr.Warning(f"An error occurred during prompt enhancement. See console for details.")
388
  return prompt
389
+ finally:
390
+ # Explicitly empty the CUDA cache to help release GPU memory.
391
+ # This can help resolve intermittent issues where the GPU remains active.
392
+ print("🧹 Clearing CUDA cache after prompt enhancement...")
393
+ torch.cuda.empty_cache()
394
 
395
 
396
  @spaces.GPU(duration_from_args=get_t2v_duration)
 
475
  print("✅ Cleanup complete. Pipeline reset to base LoRA state.")
476
  except Exception as e:
477
  print(f"⚠️ Error during LoRA cleanup: {e}. State may be inconsistent.")
478
+
479
+ # Explicitly empty the CUDA cache to help release GPU memory.
480
+ # This can help resolve intermittent issues where the GPU remains active.
481
+ print("🧹 Clearing CUDA cache after video generation...")
482
+ torch.cuda.empty_cache()
483
 
484
 
485
  # --- 6. Gradio UI Layout ---