Archime's picture
impl walkthrough
6f523af
raw
history blame
12.8 kB
from app.logger_config import (
logger as logging,
DEBUG
)
import numpy as np
import gradio as gr
import asyncio
from fastrtc.webrtc import WebRTC
from fastrtc.utils import AdditionalOutputs
from pydub import AudioSegment
import time
import os
from gradio.utils import get_space
from app.utils import (
generate_coturn_config,
raise_function
)
from app.session_utils import (
on_load,
on_unload,
get_active_sessions,
register_session,
reset_all_active_sessions,
)
from app.ui_utils import (
SUPPORTED_LANGS_MAP,
EXAMPLE_CONFIGS,
apply_preset_if_example,
reset_to_defaults,
summarize_config,
handle_additional_outputs,
get_custom_theme,
on_file_load
)
from app.stream_utils import (
read_and_stream_audio,
stop_streaming
)
# --------------------------------------------------------
# Initialization
# --------------------------------------------------------
reset_all_active_sessions()
theme,css_style = get_custom_theme()
with gr.Blocks(theme=theme, css=css_style) as demo:
session_hash = gr.State()
session_hash_box = gr.Textbox(label="Session ID", interactive=False, visible=DEBUG)
with gr.Accordion("πŸ“Š Active Sessions", open=True ,visible=DEBUG):
sessions_table = gr.DataFrame(
headers=["session_hash", "file", "start_time", "status"],
interactive=False,
wrap=True,
max_height=200,
)
gr.Timer(3.0).tick(fn=get_active_sessions, outputs=sessions_table)
demo.load(fn=on_load, inputs=None, outputs=[session_hash, session_hash_box])
demo.unload(on_unload)
stop_streaming_flags = gr.State(value={"stop": False})
active_filepath = gr.State(value=next(iter(EXAMPLE_CONFIGS)))
with gr.Walkthrough(selected=0) as walkthrough:
# === STEP 1 ===
with gr.Step("Audio", id=0) as audio_source_step:
gr.Markdown(
"""
### Step 1: Upload or Record an Audio File
You can upload an existing file or record directly from your microphone.
Accepted formats: **.wav**, **.mp3**, **.flac**
Maximum length recommended: **60 seconds**
"""
)
with gr.Group():
with gr.Column():
main_audio = gr.Audio(
label="Audio Input",
sources=["upload", "microphone"],
type="filepath",
interactive=True
)
with gr.Accordion("Need a quick test? Try one of the sample audios below", open=True):
examples = gr.Examples(
examples=list(EXAMPLE_CONFIGS.keys()),
inputs=main_audio,
label=None,
examples_per_page=3
)
gr.Markdown(
"""
πŸ”Ή **english_meeting.wav** – Short business meeting in English
πŸ”Ή **french_news.wav** – Excerpt from a French radio broadcast
πŸ”Ή **spanish_podcast.wav** – Segment from a Spanish-language podcast
"""
)
btn = gr.Button("Proceed to Streaming", visible=True)
# ui_components_one = [active_filepath, btn]
# main_audio.change(fn=on_file_load, inputs=[main_audio], outputs=ui_components_one)
# main_audio.stop_recording(fn=on_file_load, inputs=[main_audio], outputs=ui_components_one)
# main_audio.clear(fn=on_file_load, inputs=[main_audio], outputs=ui_components_one)
btn.click(lambda: gr.Walkthrough(selected=1), outputs=walkthrough)
# === STEP 2 ===
with gr.Step("Stream", id=1) as audio_stream:
gr.Markdown("### Step 2: Start audio streaming")
with gr.Group():
with gr.Column():
webrtc_stream = WebRTC(
label="Live Stream",
mode="receive",
modality="audio",
rtc_configuration=generate_coturn_config(),
visible=True,
)
start_button = gr.Button("Start Streaming")
webrtc_stream.stream(
fn=read_and_stream_audio,
inputs=[active_filepath, session_hash, stop_streaming_flags],
outputs=[webrtc_stream],
trigger=start_button.click,
concurrency_id="audio_stream",
concurrency_limit=10,
)
go_to_config = gr.Button("Go to Configuration", visible=False)
go_to_config.click(lambda: gr.Walkthrough(selected=2), outputs=walkthrough)
# === STEP 3 ===
with gr.Step("Configuration", id=2):
gr.Markdown("## Step 3: Configure the Task")
task_type = gr.Radio(["Transcription", "Translation"], value="Transcription", label="Task Type")
lang_source = gr.Dropdown(list(SUPPORTED_LANGS_MAP.keys()), value="French", label="Source Language")
lang_target = gr.Dropdown(list(SUPPORTED_LANGS_MAP.keys()), value="English", label="Target Language", visible=False)
with gr.Accordion("Advanced Configuration", open=False):
chunk_secs = gr.Number(value=1.0, label="chunk_secs", precision=1)
left_context_secs = gr.Number(value=20.0, label="left_context_secs", precision=1)
right_context_secs = gr.Number(value=0.5, label="right_context_secs", precision=1)
streaming_policy = gr.Dropdown(["waitk", "alignatt"], value="waitk", label="decoding.streaming_policy")
alignatt_thr = gr.Number(value=8, label="alignatt_thr", precision=0)
waitk_lagging = gr.Number(value=2, label="waitk_lagging", precision=0)
exclude_sink_frames = gr.Number(value=8, label="exclude_sink_frames", precision=0)
xatt_scores_layer = gr.Number(value=-2, label="xatt_scores_layer", precision=0)
hallucinations_detector = gr.Checkbox(value=True, label="hallucinations_detector")
with gr.Row():
auto_apply_presets = gr.Checkbox(value=True, label="Auto-apply presets for sample audios")
reset_btn = gr.Button("Reset to defaults")
summary_box = gr.Textbox(label="Configuration Summary", lines=10, interactive=False)
# --- Events ---
task_type.change(
fn=lambda t: gr.update(visible=(t == "Translation")),
inputs=task_type,
outputs=lang_target,
queue=False
)
inputs_list = [
task_type, lang_source, lang_target,
chunk_secs, left_context_secs, right_context_secs,
streaming_policy, alignatt_thr, waitk_lagging,
exclude_sink_frames, xatt_scores_layer, hallucinations_detector
]
for inp in inputs_list:
inp.change(
fn=summarize_config,
inputs=inputs_list,
outputs=summary_box,
queue=False
)
# Apply preset or not
main_audio.change(
fn=apply_preset_if_example,
inputs=[main_audio, auto_apply_presets],
outputs=[
task_type, lang_source, lang_target,
chunk_secs, left_context_secs, right_context_secs,
streaming_policy, alignatt_thr, waitk_lagging,
exclude_sink_frames, xatt_scores_layer, hallucinations_detector,
summary_box
],
queue=False
)
# Reset defaults
reset_btn.click(
fn=reset_to_defaults,
inputs=None,
outputs=[
task_type, lang_source, lang_target,
chunk_secs, left_context_secs, right_context_secs,
streaming_policy, alignatt_thr, waitk_lagging,
exclude_sink_frames, xatt_scores_layer, hallucinations_detector,
summary_box
],
queue=False
)
go_to_task = gr.Button("Go to Task")
go_to_task.click(lambda: gr.Walkthrough(selected=3), outputs=walkthrough)
# === STEP 4 ===
with gr.Step("Task", id=3) as task_step:
gr.Markdown("## Step 4: Start the Task")
with gr.Group():
with gr.Column():
status_slider = gr.Slider(
0, 100,
value=0,
label="Streaming Progress",
interactive=False,
visible=False
)
transcription_output = gr.Textbox(
label="Transcription / Translation Result",
placeholder="The output text will appear here...",
lines=10,
interactive=False,
visible=True
)
start_task_button = gr.Button("Start Task", visible=True)
stop_button = gr.Button("Stop Streaming", visible=False)
stop_task_button = gr.Button("Stop Task", visible=False)
stop_button.click(
fn=stop_streaming,
inputs=[session_hash, stop_streaming_flags],
outputs=[stop_streaming_flags],
)
def stop_task_fn():
return "Task stopped by user."
stop_task_button.click(
fn=stop_task_fn,
inputs=None,
outputs=transcription_output
)
ui_components = [
start_button, stop_button,
go_to_config, audio_source_step, status_slider
]
webrtc_stream.on_additional_outputs(
fn=handle_additional_outputs,
outputs=ui_components,
concurrency_id="additional_outputs_audio_stream",
concurrency_limit=10,
)
# def start_transcription(
# session_hash, stop_streaming_flags,
# task_type, lang_source, lang_target,
# chunk_secs, left_context_secs, right_context_secs,
# streaming_policy, alignatt_thr, waitk_lagging,
# exclude_sink_frames, xatt_scores_layer, hallucinations_detector
# ):
# if task_type == "Translation":
# return f"Translation completed ({lang_source} β†’ {lang_target})\n\nTranslated text:\nLorem ipsum..."
# else:
# return f"Transcription completed ({lang_source})\n\nTranscribed text:\nHello everyone, this is a test audio stream..."
# start_task_button.click(
# fn=start_transcription,
# inputs=[
# session_hash, stop_streaming_flags,
# task_type, lang_source, lang_target,
# chunk_secs, left_context_secs, right_context_secs,
# streaming_policy, alignatt_thr, waitk_lagging,
# exclude_sink_frames, xatt_scores_layer, hallucinations_detector
# ],
# outputs=transcription_output
# )
# def toggle_task_buttons():
# return (
# gr.update(visible=False),
# gr.update(visible=True),
# gr.update(visible=True)
# )
# start_task_button.click(
# fn=toggle_task_buttons,
# inputs=None,
# outputs=[start_task_button, stop_task_button, stop_button],
# queue=False
# )
if __name__ == "__main__":
demo.queue(max_size=10, api_open=False).launch(show_api=False, debug=True)