AiCoderv2 commited on
Commit
8bc71ca
·
verified ·
1 Parent(s): 8f7d78a

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +47 -0
app.py ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import torch
3
+ from diffusers import DiffusionPipeline
4
+
5
+ # Define a dictionary of available models
6
+ models = {
7
+ "AnimateDiff-Lightning": "ByteDance/AnimateDiff-Lightning",
8
+ "Other Model 1": "model_name_or_id_1",
9
+ "Other Model 2": "model_name_or_id_2",
10
+ # Add more models as needed
11
+ }
12
+
13
+ # Load models into a dict for quick access
14
+ loaded_pipelines = {}
15
+
16
+ def load_model(model_id):
17
+ if model_id not in loaded_pipelines:
18
+ pipe = DiffusionPipeline.from_pretrained(model_id)
19
+ pipe = pipe.to("cuda" if torch.cuda.is_available() else "cpu")
20
+ loaded_pipelines[model_id] = pipe
21
+ return loaded_pipelines[model_id]
22
+
23
+ def generate_video(model_name, prompt):
24
+ pipe = load_model(models[model_name])
25
+ # Call your model's video generation method
26
+ result = pipe(prompt)
27
+ # Adjust based on actual output (assuming it returns a video)
28
+ video = result.videos[0] # or result['videos'][0]
29
+ video_path = "output.mp4"
30
+ video.save(video_path)
31
+ return video_path
32
+
33
+ with gr.Blocks() as demo:
34
+ gr.Markdown("# Video Generation from Text")
35
+ with gr.Row():
36
+ model_choice = gr.Dropdown(choices=list(models.keys()), label="Select Model")
37
+ prompt_input = gr.Textbox(label="Enter your prompt", lines=2, placeholder="A spaceship in space, neon colors")
38
+ generate_button = gr.Button("Generate Video")
39
+ video_output = gr.Video(label="Generated Video")
40
+
41
+ generate_button.click(
42
+ fn=generate_video,
43
+ inputs=[model_choice, prompt_input],
44
+ outputs=video_output
45
+ )
46
+
47
+ demo.launch()