Update app.py
Browse files
app.py
CHANGED
|
@@ -1,6 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
| 1 |
import gradio as gr
|
| 2 |
import torch
|
| 3 |
-
import whisper
|
| 4 |
import warnings
|
| 5 |
import os
|
| 6 |
import librosa
|
|
@@ -89,4 +92,9 @@ with gr.Blocks() as demo:
|
|
| 89 |
with gr.Row():
|
| 90 |
transcript_output = gr.Textbox(label="Transcription", lines=3)
|
| 91 |
emotion_output = gr.Label(label="Detected Emotion from Text")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 92 |
demo.launch(share=True)
|
|
|
|
| 1 |
+
!pip install gradio
|
| 2 |
+
|
| 3 |
+
!pip install git+https://github.com/openai/whisper.git
|
| 4 |
import gradio as gr
|
| 5 |
import torch
|
| 6 |
+
import whisper
|
| 7 |
import warnings
|
| 8 |
import os
|
| 9 |
import librosa
|
|
|
|
| 92 |
with gr.Row():
|
| 93 |
transcript_output = gr.Textbox(label="Transcription", lines=3)
|
| 94 |
emotion_output = gr.Label(label="Detected Emotion from Text")
|
| 95 |
+
prosody_output = gr.Label(label="Prosodic Features (Pitch, Loudness, Intensity)")
|
| 96 |
+
|
| 97 |
+
transcribe_audio_r.click(translate_and_classify, inputs=audio_input_r, outputs=[transcript_output, emotion_output, prosody_output])
|
| 98 |
+
transcribe_audio_u.click(translate_and_classify, inputs=audio_input_u, outputs=[transcript_output, emotion_output, prosody_output])
|
| 99 |
+
|
| 100 |
demo.launch(share=True)
|