Archime commited on
Commit
8a8cf97
·
1 Parent(s): 7c21b6e

add Global stop flag

Browse files
Files changed (1) hide show
  1. app.py +57 -94
app.py CHANGED
@@ -5,31 +5,28 @@ import asyncio
5
  from fastrtc.webrtc import WebRTC
6
  from pydub import AudioSegment
7
  import time
8
- import threading
9
- import os # Added to check if file exists
10
- from gradio.utils import get_space
11
  import spaces
 
 
12
 
13
- from app.logger_config import logger as logging
14
- from app.utils import (
15
- generate_coturn_config
16
- )
17
 
18
- # --- Constants and Global State ---
19
  EXAMPLE_FILES = ["data/bonjour.wav", "data/bonjour2.wav"]
20
- # The default file is the first in the list
21
- DEFAULT_FILE = EXAMPLE_FILES[0]
 
22
  streaming_should_stop = threading.Event()
 
 
 
23
  @spaces.GPU
24
  def read_and_stream_audio(filepath_to_stream: str):
25
  """
26
- A synchronous generator that reads an audio file (via filepath_to_stream)
27
- and streams it in 1-second chunks.
28
  """
29
-
30
  if not filepath_to_stream or not os.path.exists(filepath_to_stream):
31
  logging.error(f"Audio file not found or not specified: {filepath_to_stream}")
32
- # Attempt to use the default file as a fallback
33
  if os.path.exists(DEFAULT_FILE):
34
  logging.warning(f"Using default file: {DEFAULT_FILE}")
35
  filepath_to_stream = DEFAULT_FILE
@@ -39,92 +36,79 @@ def read_and_stream_audio(filepath_to_stream: str):
39
 
40
  logging.info(f"Preparing audio segment from: {filepath_to_stream}")
41
  streaming_should_stop.clear()
42
-
43
  try:
44
  segment = AudioSegment.from_file(filepath_to_stream)
45
  chunk_duree_ms = 1000
46
  logging.info(f"Starting streaming in {chunk_duree_ms}ms chunks...")
47
 
48
  for i, chunk in enumerate(segment[::chunk_duree_ms]):
49
- iter_start_time = time.perf_counter()
50
- logging.info(f"Sending chunk {i+1}...")
51
-
52
  if streaming_should_stop.is_set():
53
- logging.info("Stop signal received, breaking loop.")
54
  break
55
 
 
 
 
56
  output_chunk = (
57
  chunk.frame_rate,
58
  np.array(chunk.get_array_of_samples()).reshape(1, -1),
59
  )
60
-
61
  yield output_chunk
62
 
63
- iter_end_time = time.perf_counter()
64
- processing_duration_ms = (iter_end_time - iter_start_time) * 1000
65
-
66
- sleep_duration = (chunk_duree_ms / 1000.0) - (processing_duration_ms / 1000.0) - 0.1
67
- if sleep_duration < 0:
68
- sleep_duration = 0.01 # Avoid negative sleep time
69
-
70
- logging.debug(f"Processing time: {processing_duration_ms:.2f}ms, Sleep: {sleep_duration:.2f}s")
71
-
72
- # Using wait() allows the thread to wake up if the signal is received
73
- if streaming_should_stop.wait(timeout=sleep_duration):
74
- logging.info("Stop signal received while waiting.")
75
- break
76
 
77
- logging.info("Streaming finished.")
78
 
79
  except asyncio.CancelledError:
80
- logging.info("Stream stopped by user (CancelledError).")
81
  raise
82
- except FileNotFoundError:
83
- logging.error(f"Critical error: File not found: {filepath_to_stream}")
84
  except Exception as e:
85
- logging.error(f"Error during stream: {e}", exc_info=True)
86
  raise
87
  finally:
88
  streaming_should_stop.clear()
89
- logging.info("Stop signal cleared.")
90
 
91
 
 
92
  def stop_streaming():
93
- """Activates the stop signal for the generator."""
94
- logging.info("Stop button clicked: sending stop signal.")
95
  streaming_should_stop.set()
96
  return None
97
 
98
- # --- Gradio Interface ---
99
 
 
100
  with gr.Blocks(theme=gr.themes.Soft()) as demo:
101
  gr.Markdown(
102
- "## Application 'Streamer' WebRTC (Serveur -> Client)\n"
103
- "Utilisez l'exemple fourni, uploadez un fichier ou enregistrez depuis votre micro, "
104
- "puis cliquez sur 'Start' pour écouter le stream."
105
  )
106
 
107
- # 1. State to store the path of the file to be read
108
  active_filepath = gr.State(value=DEFAULT_FILE)
109
 
110
  with gr.Row():
111
  with gr.Column():
112
  main_audio = gr.Audio(
113
  label="Source Audio",
114
- sources=["upload", "microphone"], # Combine both sources
115
  type="filepath",
116
- value=DEFAULT_FILE, # Default to the first example
117
  )
118
  with gr.Column():
119
  webrtc_stream = WebRTC(
120
- label="Stream Audio",
121
  mode="receive",
122
  modality="audio",
123
  rtc_configuration=generate_coturn_config(),
124
  visible=True,
125
- height = 200,
126
  )
127
- # 4. Control buttons
128
  with gr.Row():
129
  with gr.Column():
130
  start_button = gr.Button("Start Streaming", variant="primary")
@@ -133,38 +117,21 @@ with gr.Blocks(theme=gr.themes.Soft()) as demo:
133
  gr.Text()
134
 
135
  def set_new_file(filepath):
136
- """Updates the state with the new path, or reverts to default if None."""
137
  if filepath is None:
138
  logging.info("Audio cleared, reverting to default example file.")
139
- new_path = DEFAULT_FILE
140
- else:
141
- logging.info(f"New audio source selected: {filepath}")
142
- new_path = filepath
143
- # Returns the value to be put in the gr.State
144
- return new_path
145
-
146
- # Update the path if the user uploads, clears, or changes the file
147
- main_audio.change(
148
- fn=set_new_file,
149
- inputs=[main_audio],
150
- outputs=[active_filepath]
151
- )
152
-
153
- # Update the path if the user finishes a recording
154
- main_audio.stop_recording(
155
- fn=set_new_file,
156
- inputs=[main_audio],
157
- outputs=[active_filepath]
158
- )
159
 
 
 
160
 
161
- # Functions to update the interface state
162
  def start_streaming_ui():
163
  logging.info("UI: Starting stream. Disabling controls.")
164
  return {
165
  start_button: gr.Button(interactive=False),
166
  stop_button: gr.Button(interactive=True),
167
- main_audio: gr.Audio(visible=False),
168
  }
169
 
170
  def stop_streaming_ui():
@@ -174,42 +141,38 @@ with gr.Blocks(theme=gr.themes.Soft()) as demo:
174
  stop_button: gr.Button(interactive=False),
175
  main_audio: gr.Audio(
176
  label="Source Audio",
177
- sources=["upload", "microphone"], # Combine both sources
178
  type="filepath",
179
  value=active_filepath.value,
180
- visible=True
181
- ),
182
- }
183
-
184
 
185
- ui_components = [
186
- start_button, stop_button,
187
- main_audio,
188
- ]
189
 
 
190
  stream_event = webrtc_stream.stream(
191
  fn=read_and_stream_audio,
192
- inputs=[active_filepath],
193
  outputs=[webrtc_stream],
194
  trigger=start_button.click,
195
- concurrency_id="audio_stream", # Concurrency ID
196
- concurrency_limit=10
197
  )
198
 
199
- # Update the interface on START click
200
  start_button.click(
201
  fn=start_streaming_ui,
202
- outputs=ui_components
203
  )
204
 
205
- # Fix: Ensure the stream is properly cancelled
206
  stop_button.click(
207
- fn=stop_streaming,
208
- outputs=[webrtc_stream],
209
  ).then(
210
- fn=stop_streaming_ui, # THEN, update the interface
211
- inputs=None,
212
- outputs=ui_components
213
  )
214
 
215
 
 
5
  from fastrtc.webrtc import WebRTC
6
  from pydub import AudioSegment
7
  import time
8
+ import os
 
 
9
  import spaces
10
+ import threading
11
+ from app.utils import generate_coturn_config
12
 
 
 
 
 
13
 
14
+ # --- Constants ---
15
  EXAMPLE_FILES = ["data/bonjour.wav", "data/bonjour2.wav"]
16
+ DEFAULT_FILE = EXAMPLE_FILES[0]
17
+
18
+ # --- Global stop flag ---
19
  streaming_should_stop = threading.Event()
20
+
21
+
22
+ # --- Audio Stream Function ---
23
  @spaces.GPU
24
  def read_and_stream_audio(filepath_to_stream: str):
25
  """
26
+ Stream an audio file in 1-second chunks until stop signal is received.
 
27
  """
 
28
  if not filepath_to_stream or not os.path.exists(filepath_to_stream):
29
  logging.error(f"Audio file not found or not specified: {filepath_to_stream}")
 
30
  if os.path.exists(DEFAULT_FILE):
31
  logging.warning(f"Using default file: {DEFAULT_FILE}")
32
  filepath_to_stream = DEFAULT_FILE
 
36
 
37
  logging.info(f"Preparing audio segment from: {filepath_to_stream}")
38
  streaming_should_stop.clear()
39
+
40
  try:
41
  segment = AudioSegment.from_file(filepath_to_stream)
42
  chunk_duree_ms = 1000
43
  logging.info(f"Starting streaming in {chunk_duree_ms}ms chunks...")
44
 
45
  for i, chunk in enumerate(segment[::chunk_duree_ms]):
 
 
 
46
  if streaming_should_stop.is_set():
47
+ logging.info("Stop flag detected, ending stream.")
48
  break
49
 
50
+ iter_start = time.perf_counter()
51
+ logging.info(f"Sending chunk {i+1}...")
52
+
53
  output_chunk = (
54
  chunk.frame_rate,
55
  np.array(chunk.get_array_of_samples()).reshape(1, -1),
56
  )
 
57
  yield output_chunk
58
 
59
+ processing_duration_ms = (time.perf_counter() - iter_start) * 1000
60
+ sleep_duration = max((chunk_duree_ms / 1000.0) - (processing_duration_ms / 1000.0) - 0.1, 0.01)
61
+ time.sleep(sleep_duration)
 
 
 
 
 
 
 
 
 
 
62
 
63
+ logging.info("Streaming finished successfully.")
64
 
65
  except asyncio.CancelledError:
66
+ logging.info("Stream cancelled by user.")
67
  raise
 
 
68
  except Exception as e:
69
+ logging.error(f"Error during audio streaming: {e}", exc_info=True)
70
  raise
71
  finally:
72
  streaming_should_stop.clear()
73
+ logging.info("Stop flag cleared.")
74
 
75
 
76
+ # --- Stop Function ---
77
  def stop_streaming():
78
+ """Set the stop flag to True."""
79
+ logging.info("Stop button clicked: setting stop flag.")
80
  streaming_should_stop.set()
81
  return None
82
 
 
83
 
84
+ # --- Gradio Interface ---
85
  with gr.Blocks(theme=gr.themes.Soft()) as demo:
86
  gr.Markdown(
87
+ "## Application 'Streamer' WebRTC (Serveur Client)\n"
88
+ "Chargez un fichier, enregistrez depuis votre micro ou utilisez un exemple, "
89
+ "puis cliquez sur **Start Streaming** pour écouter le flux en direct."
90
  )
91
 
 
92
  active_filepath = gr.State(value=DEFAULT_FILE)
93
 
94
  with gr.Row():
95
  with gr.Column():
96
  main_audio = gr.Audio(
97
  label="Source Audio",
98
+ sources=["upload", "microphone"],
99
  type="filepath",
100
+ value=DEFAULT_FILE,
101
  )
102
  with gr.Column():
103
  webrtc_stream = WebRTC(
104
+ label="Flux Audio",
105
  mode="receive",
106
  modality="audio",
107
  rtc_configuration=generate_coturn_config(),
108
  visible=True,
109
+ height=200,
110
  )
111
+
112
  with gr.Row():
113
  with gr.Column():
114
  start_button = gr.Button("Start Streaming", variant="primary")
 
117
  gr.Text()
118
 
119
  def set_new_file(filepath):
 
120
  if filepath is None:
121
  logging.info("Audio cleared, reverting to default example file.")
122
+ return DEFAULT_FILE
123
+ logging.info(f"New audio source selected: {filepath}")
124
+ return filepath
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
125
 
126
+ main_audio.change(fn=set_new_file, inputs=[main_audio], outputs=[active_filepath])
127
+ main_audio.stop_recording(fn=set_new_file, inputs=[main_audio], outputs=[active_filepath])
128
 
 
129
  def start_streaming_ui():
130
  logging.info("UI: Starting stream. Disabling controls.")
131
  return {
132
  start_button: gr.Button(interactive=False),
133
  stop_button: gr.Button(interactive=True),
134
+ main_audio: gr.Audio(visible=False),
135
  }
136
 
137
  def stop_streaming_ui():
 
141
  stop_button: gr.Button(interactive=False),
142
  main_audio: gr.Audio(
143
  label="Source Audio",
144
+ sources=["upload", "microphone"],
145
  type="filepath",
146
  value=active_filepath.value,
147
+ visible=True,
148
+ ),
149
+ }
 
150
 
151
+ ui_components = [start_button, stop_button, main_audio]
 
 
 
152
 
153
+ # --- Stream event ---
154
  stream_event = webrtc_stream.stream(
155
  fn=read_and_stream_audio,
156
+ inputs=[active_filepath],
157
  outputs=[webrtc_stream],
158
  trigger=start_button.click,
159
+ concurrency_id="audio_stream",
160
+ concurrency_limit=10,
161
  )
162
 
163
+ # --- Button actions ---
164
  start_button.click(
165
  fn=start_streaming_ui,
166
+ outputs=ui_components,
167
  )
168
 
169
+ # Stop streaming instantly (thread-safe flag)
170
  stop_button.click(
171
+ fn=stop_streaming,
172
+ outputs=[webrtc_stream],
173
  ).then(
174
+ fn=stop_streaming_ui,
175
+ outputs=ui_components,
 
176
  )
177
 
178