Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -109,18 +109,20 @@ def generate_audio(prompt: str, audio_length: int):
|
|
| 109 |
# ---------------------------------------------------------------------
|
| 110 |
# Gradio Interface
|
| 111 |
# ---------------------------------------------------------------------
|
| 112 |
-
def
|
| 113 |
# Load Llama 3 Pipeline with Zero GPU
|
| 114 |
pipeline_llama = load_llama_pipeline_zero_gpu(llama_model_id, hf_token)
|
| 115 |
if isinstance(pipeline_llama, str):
|
| 116 |
-
return pipeline_llama
|
| 117 |
|
| 118 |
# Generate Script
|
| 119 |
script = generate_script(user_prompt, pipeline_llama)
|
|
|
|
| 120 |
|
|
|
|
| 121 |
# Generate Audio
|
| 122 |
audio_data = generate_audio(script, audio_length)
|
| 123 |
-
return
|
| 124 |
|
| 125 |
# ---------------------------------------------------------------------
|
| 126 |
# Interface
|
|
@@ -133,16 +135,27 @@ with gr.Blocks() as demo:
|
|
| 133 |
llama_model_id = gr.Textbox(label="Llama 3 Model ID", value="meta-llama/Meta-Llama-3-70B")
|
| 134 |
audio_length = gr.Slider(label="Audio Length (tokens)", minimum=128, maximum=1024, step=64, value=512)
|
| 135 |
|
| 136 |
-
|
| 137 |
-
|
| 138 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 139 |
|
| 140 |
-
|
| 141 |
-
|
| 142 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 143 |
|
| 144 |
# ---------------------------------------------------------------------
|
| 145 |
# Launch App
|
| 146 |
# ---------------------------------------------------------------------
|
| 147 |
demo.launch(debug=True)
|
| 148 |
-
|
|
|
|
| 109 |
# ---------------------------------------------------------------------
|
| 110 |
# Gradio Interface
|
| 111 |
# ---------------------------------------------------------------------
|
| 112 |
+
def generate_script_interface(user_prompt, llama_model_id):
|
| 113 |
# Load Llama 3 Pipeline with Zero GPU
|
| 114 |
pipeline_llama = load_llama_pipeline_zero_gpu(llama_model_id, hf_token)
|
| 115 |
if isinstance(pipeline_llama, str):
|
| 116 |
+
return pipeline_llama
|
| 117 |
|
| 118 |
# Generate Script
|
| 119 |
script = generate_script(user_prompt, pipeline_llama)
|
| 120 |
+
return script
|
| 121 |
|
| 122 |
+
def generate_audio_interface(script, audio_length):
|
| 123 |
# Generate Audio
|
| 124 |
audio_data = generate_audio(script, audio_length)
|
| 125 |
+
return audio_data
|
| 126 |
|
| 127 |
# ---------------------------------------------------------------------
|
| 128 |
# Interface
|
|
|
|
| 135 |
llama_model_id = gr.Textbox(label="Llama 3 Model ID", value="meta-llama/Meta-Llama-3-70B")
|
| 136 |
audio_length = gr.Slider(label="Audio Length (tokens)", minimum=128, maximum=1024, step=64, value=512)
|
| 137 |
|
| 138 |
+
with gr.Row():
|
| 139 |
+
generate_script_button = gr.Button("Generate Promo Script")
|
| 140 |
+
script_output = gr.Textbox(label="Generated Script", interactive=False)
|
| 141 |
+
|
| 142 |
+
with gr.Row():
|
| 143 |
+
generate_audio_button = gr.Button("Generate Audio")
|
| 144 |
+
audio_output = gr.Audio(label="Generated Audio", type="filepath")
|
| 145 |
|
| 146 |
+
generate_script_button.click(
|
| 147 |
+
generate_script_interface,
|
| 148 |
+
inputs=[user_prompt, llama_model_id],
|
| 149 |
+
outputs=script_output
|
| 150 |
+
)
|
| 151 |
+
|
| 152 |
+
generate_audio_button.click(
|
| 153 |
+
generate_audio_interface,
|
| 154 |
+
inputs=[script_output, audio_length],
|
| 155 |
+
outputs=audio_output
|
| 156 |
+
)
|
| 157 |
|
| 158 |
# ---------------------------------------------------------------------
|
| 159 |
# Launch App
|
| 160 |
# ---------------------------------------------------------------------
|
| 161 |
demo.launch(debug=True)
|
|
|