Archime commited on
Commit
d1f7785
·
1 Parent(s): 70d2ece

add slider process

Browse files
Files changed (1) hide show
  1. new_app.py +71 -16
new_app.py CHANGED
@@ -3,6 +3,7 @@ import numpy as np
3
  import gradio as gr
4
  import asyncio
5
  from fastrtc.webrtc import WebRTC
 
6
  from pydub import AudioSegment
7
  import time
8
  import os
@@ -12,7 +13,7 @@ from app.logger_config import logger as logging
12
  from app.utils import (
13
  generate_coturn_config
14
  )
15
- from app.session_utils import (
16
  on_load,
17
  on_unload,
18
  get_active_sessions,
@@ -57,6 +58,7 @@ def read_and_stream_audio(filepath_to_stream: str, session_id: str, stop_streami
57
  try:
58
  segment = AudioSegment.from_file(filepath_to_stream)
59
  chunk_duree_ms = 1000
 
60
  logging.info(f"[{session_id}] Début du streaming en chunks de {chunk_duree_ms}ms...")
61
 
62
  for i, chunk in enumerate(segment[::chunk_duree_ms]):
@@ -71,8 +73,12 @@ def read_and_stream_audio(filepath_to_stream: str, session_id: str, stop_streami
71
  chunk.frame_rate,
72
  np.array(chunk.get_array_of_samples()).reshape(1, -1),
73
  )
 
 
 
74
 
75
- yield output_chunk
 
76
 
77
  iter_end_time = time.perf_counter()
78
  processing_duration_ms = (iter_end_time - iter_start_time) * 1000
@@ -109,6 +115,8 @@ def read_and_stream_audio(filepath_to_stream: str, session_id: str, stop_streami
109
  if isinstance(stop_streaming_flags, dict):
110
  stop_streaming_flags["stop"] = False
111
  logging.info(f"[{session_id}]Signal d'arrêt nettoyé.")
 
 
112
 
113
 
114
  def stop_streaming(session_id: str, stop_streaming_flags: dict):
@@ -120,6 +128,23 @@ def stop_streaming(session_id: str, stop_streaming_flags: dict):
120
  stop_streaming_flags["stop"] = True
121
  return stop_streaming_flags
122
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
123
  # --- Interface Gradio ---
124
 
125
  with gr.Blocks(theme=gr.themes.Soft()) as demo:
@@ -131,10 +156,6 @@ with gr.Blocks(theme=gr.themes.Soft()) as demo:
131
 
132
  stop_streaming_flags = gr.State(value={"stop": False})
133
 
134
-
135
-
136
-
137
-
138
  gr.Markdown(
139
  "## Application 'Streamer' WebRTC (Serveur -> Client)\n"
140
  "Utilisez l'exemple fourni, uploadez un fichier ou enregistrez depuis votre micro, "
@@ -144,14 +165,17 @@ with gr.Blocks(theme=gr.themes.Soft()) as demo:
144
  # 1. État pour stocker le chemin du fichier à lire
145
  active_filepath = gr.State(value=DEFAULT_FILE)
146
 
147
- with gr.Row():
148
- with gr.Column():
149
- main_audio = gr.Audio(
150
- label="Source Audio",
151
- sources=["upload", "microphone"], # Combine les deux sources
152
- type="filepath",
153
- value=DEFAULT_FILE, # Défaut au premier exemple
154
- )
 
 
 
155
  with gr.Column():
156
  webrtc_stream = WebRTC(
157
  label="Stream Audio",
@@ -234,10 +258,16 @@ with gr.Blocks(theme=gr.themes.Soft()) as demo:
234
  inputs=[active_filepath, session_hash, stop_streaming_flags],
235
  outputs=[webrtc_stream],
236
  trigger=start_button.click,
237
- concurrency_id="audio_stream", # ID de concurrence
 
 
 
 
 
 
 
238
  concurrency_limit=10
239
  )
240
-
241
  # Mettre à jour l'interface au clic sur START
242
  start_button.click(
243
  fn=start_streaming_ui,
@@ -266,5 +296,30 @@ with gr.Blocks(theme=gr.themes.Soft()) as demo:
266
 
267
  gr.Timer(3.0).tick(fn=get_active_sessions, outputs=sessions_table)
268
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
269
  if __name__ == "__main__":
270
  demo.queue(max_size=10, api_open=False).launch(show_api=False, debug=True)
 
3
  import gradio as gr
4
  import asyncio
5
  from fastrtc.webrtc import WebRTC
6
+ from fastrtc.utils import AdditionalOutputs
7
  from pydub import AudioSegment
8
  import time
9
  import os
 
13
  from app.utils import (
14
  generate_coturn_config
15
  )
16
+ from app.new_session_utils import (
17
  on_load,
18
  on_unload,
19
  get_active_sessions,
 
58
  try:
59
  segment = AudioSegment.from_file(filepath_to_stream)
60
  chunk_duree_ms = 1000
61
+ total_chunks = len(segment) // chunk_duree_ms + 1
62
  logging.info(f"[{session_id}] Début du streaming en chunks de {chunk_duree_ms}ms...")
63
 
64
  for i, chunk in enumerate(segment[::chunk_duree_ms]):
 
73
  chunk.frame_rate,
74
  np.array(chunk.get_array_of_samples()).reshape(1, -1),
75
  )
76
+ # Calcul du pourcentage de progression
77
+ progress = round(((i + 1) / total_chunks) * 100, 2)
78
+ logging.debug(f"[{session_id}] Progression: {progress}%")
79
 
80
+ # Envoi du chunk et de la progression numérique
81
+ yield (output_chunk, AdditionalOutputs(progress))
82
 
83
  iter_end_time = time.perf_counter()
84
  processing_duration_ms = (iter_end_time - iter_start_time) * 1000
 
115
  if isinstance(stop_streaming_flags, dict):
116
  stop_streaming_flags["stop"] = False
117
  logging.info(f"[{session_id}]Signal d'arrêt nettoyé.")
118
+ yield (None, AdditionalOutputs({"progress": 100}))
119
+
120
 
121
 
122
  def stop_streaming(session_id: str, stop_streaming_flags: dict):
 
128
  stop_streaming_flags["stop"] = True
129
  return stop_streaming_flags
130
 
131
+ def handle_additional_outputs(status_slider,progress_value):
132
+ """Met à jour le slider selon la valeur reçue et gère sa visibilité."""
133
+ logging.debug(f"📡 Additional output received: {progress_value}")
134
+
135
+ try:
136
+ progress = float(progress_value)
137
+ except (ValueError, TypeError):
138
+ progress = 0
139
+ # status_slider = gr.update(interactive=True,visible=True, value=max(0, min(progress, 100)))
140
+ # return status_slider
141
+ if progress >= 100:
142
+ return gr.update(visible=False, value=100)
143
+ elif progress <= 0:
144
+ return gr.update(visible=False, value=0)
145
+ else:
146
+ return gr.update(visible=True, value=progress)
147
+
148
  # --- Interface Gradio ---
149
 
150
  with gr.Blocks(theme=gr.themes.Soft()) as demo:
 
156
 
157
  stop_streaming_flags = gr.State(value={"stop": False})
158
 
 
 
 
 
159
  gr.Markdown(
160
  "## Application 'Streamer' WebRTC (Serveur -> Client)\n"
161
  "Utilisez l'exemple fourni, uploadez un fichier ou enregistrez depuis votre micro, "
 
165
  # 1. État pour stocker le chemin du fichier à lire
166
  active_filepath = gr.State(value=DEFAULT_FILE)
167
 
168
+ with gr.Row(equal_height=True):
169
+ with gr.Column(elem_id="column_source", scale=1):
170
+ with gr.Group(elem_id="centered_content"):
171
+ main_audio = gr.Audio(
172
+ label="Source Audio",
173
+ sources=["upload", "microphone"], # Combine les deux sources
174
+ type="filepath",
175
+ value=DEFAULT_FILE, # Défaut au premier exemple
176
+ )
177
+ status_slider = gr.Slider(0, 100, value=0, label="Progression du streaming", interactive=False, visible=False )
178
+
179
  with gr.Column():
180
  webrtc_stream = WebRTC(
181
  label="Stream Audio",
 
258
  inputs=[active_filepath, session_hash, stop_streaming_flags],
259
  outputs=[webrtc_stream],
260
  trigger=start_button.click,
261
+ concurrency_id="audio_stream",
262
+ concurrency_limit=10
263
+ )
264
+ webrtc_stream.on_additional_outputs(
265
+ fn=handle_additional_outputs,
266
+ inputs=[status_slider],
267
+ outputs=[status_slider],
268
+ concurrency_id="additional_outputs_audio_stream",
269
  concurrency_limit=10
270
  )
 
271
  # Mettre à jour l'interface au clic sur START
272
  start_button.click(
273
  fn=start_streaming_ui,
 
296
 
297
  gr.Timer(3.0).tick(fn=get_active_sessions, outputs=sessions_table)
298
 
299
+
300
+
301
+ # --------------------------------------------------------
302
+ # CSS
303
+ # --------------------------------------------------------
304
+ custom_css = """
305
+ #column_source {
306
+ display: flex;
307
+ flex-direction: column;
308
+ justify-content: center;
309
+ align-items: center;
310
+ gap: 1rem;
311
+ margin-top: auto;
312
+ margin-bottom: auto;
313
+ }
314
+ #column_source .gr-row {
315
+ padding-top: 12px;
316
+ padding-bottom: 12px;
317
+ }
318
+ """
319
+ demo.css = custom_css
320
+ # --------------------------------------------------------
321
+ # MAIN
322
+ # --------------------------------------------------------
323
+
324
  if __name__ == "__main__":
325
  demo.queue(max_size=10, api_open=False).launch(show_api=False, debug=True)