Archime commited on
Commit
703ca2c
Β·
1 Parent(s): 9f80a29

add lgic TRANSCRIPTION

Browse files
Files changed (2) hide show
  1. app.py +182 -85
  2. app/session_utils.py +20 -0
app.py CHANGED
@@ -29,87 +29,131 @@ EXAMPLE_FILES = ["data/bonjour.wav", "data/bonjour2.wav"]
29
  DEFAULT_FILE = EXAMPLE_FILES[0]
30
 
31
 
32
- @spaces.GPU
33
- def read_and_stream_audio(filepath_to_stream: str, session_id: str):
34
- """Stream audio chunks for a specific session."""
35
- stop_file = stop_file_path(session_id)
 
 
 
 
36
  logging.debug(f"[{session_id}] read_and_stream_audio() started with file: {filepath_to_stream}")
37
 
38
  if not filepath_to_stream or not os.path.exists(filepath_to_stream):
39
  logging.error(f"[{session_id}] Audio file not found: {filepath_to_stream}")
40
- if os.path.exists(DEFAULT_FILE):
41
- filepath_to_stream = DEFAULT_FILE
42
- logging.warning(f"[{session_id}] Using default file: {DEFAULT_FILE}")
43
- else:
44
- logging.error(f"[{session_id}] Default file missing. Aborting.")
45
- return
46
-
47
  clear_stop_flag(session_id)
48
  register_session(session_id, filepath_to_stream)
49
- progress_path = os.path.join(TMP_DIR, f"progress_{session_id}.json")
 
 
50
 
51
  try:
52
  segment = AudioSegment.from_file(filepath_to_stream)
53
- chunk_ms = 1000
54
  total_chunks = len(segment) // chunk_ms + 1
55
- logging.info(f"[{session_id}] Streaming {total_chunks} chunks...")
 
56
 
57
  for i, chunk in enumerate(segment[::chunk_ms], start=1):
58
  if os.path.exists(stop_file):
59
- logging.info(f"[{session_id}] Stop flag detected at chunk {i}. Stopping.")
60
  clear_stop_flag(session_id)
61
  break
62
 
63
  iter_start = time.perf_counter()
64
- logging.debug(f"[{session_id}] Sending chunk {i}/{total_chunks}...")
65
 
66
- # Compute elapsed time (hh:mm:ss)
67
- elapsed_s = i * (chunk_ms / 1000)
68
  hours, remainder = divmod(int(elapsed_s), 3600)
69
  minutes, seconds = divmod(remainder, 60)
70
  elapsed_str = f"{hours:02d}:{minutes:02d}:{seconds:02d}"
71
 
72
- # Compute percentage
73
  percent = round((i / total_chunks) * 100, 2)
74
-
75
- # Save progress info
76
- progress_data = {
77
- "value": percent,
78
- "elapsed": elapsed_str,
79
- "text": f"Streaming... {elapsed_str} ({percent}%)"
80
- }
81
  with open(progress_path, "w") as f:
82
  json.dump(progress_data, f)
 
 
83
 
84
- # Stream chunk
85
- output_chunk = (
86
- chunk.frame_rate,
87
- np.array(chunk.get_array_of_samples()).reshape(1, -1),
88
- )
89
- yield output_chunk
90
 
91
- process_ms = (time.perf_counter() - iter_start) * 1000
92
- time.sleep(max((chunk_ms / 1000.0) - (process_ms / 1000.0) - 0.1, 0.01))
93
 
94
- with open(progress_path, "w") as f:
95
- json.dump({"value": 100.0, "elapsed": elapsed_str, "text": "Streaming completed βœ…"}, f)
96
 
97
- logging.info(f"[{session_id}] Stream completed successfully.")
98
 
99
- except asyncio.CancelledError:
100
- logging.info(f"[{session_id}] Stream cancelled by user.")
101
- raise
102
  except Exception as e:
103
  logging.error(f"[{session_id}] Stream error: {e}", exc_info=True)
104
- raise
105
  finally:
106
  unregister_session(session_id)
107
  clear_stop_flag(session_id)
108
  if os.path.exists(progress_path):
109
  os.remove(progress_path)
110
- logging.debug(f"[{session_id}] Stream closed.")
111
 
112
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
113
  def stop_streaming(session_id: str):
114
  create_stop_flag(session_id)
115
  logging.info(f"[{session_id}] Stop button clicked β†’ stop flag created.")
@@ -127,18 +171,20 @@ def get_session_progress(session_id: str):
127
  value = data.get("value", 0.0)
128
  elapsed = data.get("elapsed", "00:00:00")
129
  return value, elapsed
130
- except Exception as e:
131
- logging.error(f"[{session_id}] Progress read error: {e}")
132
  return 0.0, "00:00:00"
133
 
134
 
 
 
 
135
  with gr.Blocks(theme=gr.themes.Soft()) as demo:
136
  gr.Markdown(
137
  "## 🎧 WebRTC Audio Streamer (Multi-user)\n"
138
- "Each user controls their own audio stream with elapsed time and percentage progress."
139
  )
140
 
141
- session_id = gr.State(value=generate_session_id)
142
  active_filepath = gr.State(value=DEFAULT_FILE)
143
 
144
  with gr.Row(equal_height=True):
@@ -151,6 +197,15 @@ with gr.Blocks(theme=gr.themes.Soft()) as demo:
151
  value=DEFAULT_FILE,
152
  )
153
 
 
 
 
 
 
 
 
 
 
154
  progress_bar = gr.Slider(
155
  label="Streaming Progress (%)",
156
  minimum=0,
@@ -165,14 +220,11 @@ with gr.Blocks(theme=gr.themes.Soft()) as demo:
165
  label="Elapsed Time (hh:mm:ss)",
166
  interactive=False,
167
  visible=False,
168
- show_label=False
169
  )
170
 
171
  with gr.Row():
172
- with gr.Column(scale=1, min_width=0):
173
- start_button = gr.Button("▢️ Start Streaming", variant="primary")
174
- with gr.Column(scale=1, min_width=0):
175
- stop_button = gr.Button("⏹️ Stop Streaming", variant="stop", interactive=False)
176
 
177
  with gr.Column():
178
  webrtc_stream = WebRTC(
@@ -183,80 +235,121 @@ with gr.Blocks(theme=gr.themes.Soft()) as demo:
183
  visible=True,
184
  )
185
 
186
- def set_new_file(filepath):
187
- return filepath if filepath else DEFAULT_FILE
188
-
189
- main_audio.change(fn=set_new_file, inputs=[main_audio], outputs=[active_filepath])
190
- main_audio.stop_recording(fn=set_new_file, inputs=[main_audio], outputs=[active_filepath])
191
 
 
192
  def start_streaming_ui(session_id):
193
- logging.debug(f"[{session_id}] UI: Start clicked β†’ disabling controls.")
194
  return {
195
  start_button: gr.Button(interactive=False),
196
  stop_button: gr.Button(interactive=True),
 
 
 
197
  main_audio: gr.Audio(visible=False),
198
  progress_bar: gr.Slider(value=0, visible=True),
199
  progress_text: gr.Textbox(value="00:00:00", visible=True),
200
  }
201
 
202
  def stop_streaming_ui(session_id):
203
- logging.debug(f"[{session_id}] UI: Stop clicked or finished β†’ restoring controls.")
 
204
  return {
205
  start_button: gr.Button(interactive=True),
206
  stop_button: gr.Button(interactive=False),
207
- main_audio: gr.Audio(
208
- label="Audio Source",
209
- sources=["upload", "microphone"],
210
- type="filepath",
211
- value=DEFAULT_FILE,
212
- visible=True,
213
- ),
214
  progress_bar: gr.Slider(value=0, visible=False),
215
  progress_text: gr.Textbox(value="00:00:00", visible=False),
216
  }
217
 
218
- ui_components = [start_button, stop_button, main_audio, progress_bar, progress_text]
219
-
220
  webrtc_stream.stream(
221
  fn=read_and_stream_audio,
222
- inputs=[active_filepath, session_id],
223
  outputs=[webrtc_stream],
224
  trigger=start_button.click,
225
- concurrency_id="audio_stream",
226
- concurrency_limit=20,
227
  )
228
 
229
- start_button.click(
230
- fn=start_streaming_ui,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
231
  inputs=[session_id],
232
- outputs=ui_components,
233
  )
234
 
235
- stop_button.click(
236
- fn=stop_streaming,
 
237
  inputs=[session_id],
238
- outputs=[webrtc_stream],
239
- ).then(
240
- fn=stop_streaming_ui,
 
 
241
  inputs=[session_id],
242
- outputs=ui_components,
243
  )
244
 
 
245
  with gr.Accordion("πŸ“Š Active Sessions", open=False):
246
  sessions_table = gr.DataFrame(
247
  headers=["session_id", "file", "start_time", "status"],
248
  interactive=False,
249
  wrap=True,
250
- label="Connected Users",
251
  max_height=200,
252
  )
253
 
254
- timer = gr.Timer(3.0)
255
- timer.tick(fn=get_active_sessions, outputs=sessions_table)
256
-
257
- progress_timer = gr.Timer(1.0)
258
- progress_timer.tick(fn=get_session_progress, inputs=[session_id], outputs=[progress_bar, progress_text])
259
 
 
 
 
260
  custom_css = """
261
  #column_source {
262
  display: flex;
@@ -274,5 +367,9 @@ custom_css = """
274
  """
275
  demo.css = custom_css
276
 
 
 
 
 
277
  if __name__ == "__main__":
278
  demo.queue(max_size=50, api_open=False).launch(show_api=False, debug=True)
 
29
  DEFAULT_FILE = EXAMPLE_FILES[0]
30
 
31
 
32
+ # --------------------------------------------------------
33
+ # STREAMING
34
+ # --------------------------------------------------------
35
+ def read_and_stream_audio(filepath_to_stream: str, session_id: str, chunk_seconds: float):
36
+ """Stream audio chunks and save .npz files only when transcription is active."""
37
+ stop_file = os.path.join(TMP_DIR, f"stream_stop_flag_{session_id}.txt")
38
+ transcribe_flag = os.path.join(TMP_DIR, f"transcribe_active_{session_id}.txt")
39
+
40
  logging.debug(f"[{session_id}] read_and_stream_audio() started with file: {filepath_to_stream}")
41
 
42
  if not filepath_to_stream or not os.path.exists(filepath_to_stream):
43
  logging.error(f"[{session_id}] Audio file not found: {filepath_to_stream}")
44
+ return
 
 
 
 
 
 
45
  clear_stop_flag(session_id)
46
  register_session(session_id, filepath_to_stream)
47
+ progress_path = os.path.join(TMP_DIR, f"progress_{session_id}.json")
48
+ chunk_dir = os.path.join(TMP_DIR, f"chunks_{session_id}")
49
+ os.makedirs(chunk_dir, exist_ok=True)
50
 
51
  try:
52
  segment = AudioSegment.from_file(filepath_to_stream)
53
+ chunk_ms = int(chunk_seconds * 1000)
54
  total_chunks = len(segment) // chunk_ms + 1
55
+
56
+ logging.info(f"[{session_id}] Streaming {total_chunks} chunks ({chunk_seconds:.2f}s each)...")
57
 
58
  for i, chunk in enumerate(segment[::chunk_ms], start=1):
59
  if os.path.exists(stop_file):
60
+ logging.info(f"[{session_id}] Stop flag detected at chunk {i}. Ending stream.")
61
  clear_stop_flag(session_id)
62
  break
63
 
64
  iter_start = time.perf_counter()
 
65
 
66
+ elapsed_s = i * chunk_seconds
 
67
  hours, remainder = divmod(int(elapsed_s), 3600)
68
  minutes, seconds = divmod(remainder, 60)
69
  elapsed_str = f"{hours:02d}:{minutes:02d}:{seconds:02d}"
70
 
 
71
  percent = round((i / total_chunks) * 100, 2)
72
+ progress_data = {"value": percent, "elapsed": elapsed_str}
 
 
 
 
 
 
73
  with open(progress_path, "w") as f:
74
  json.dump(progress_data, f)
75
+ chunk_array = np.array(chunk.get_array_of_samples(), dtype=np.int16)
76
+ rate = chunk.frame_rate
77
 
78
+ # βœ… Save only if transcription is active
79
+ if os.path.exists(transcribe_flag):
80
+ npz_path = os.path.join(chunk_dir, f"chunk_{i:05d}.npz")
81
+ np.savez_compressed(npz_path, data=chunk_array, rate=rate)
82
+ logging.debug(f"[{session_id}] Saved chunk {i}/{total_chunks} (transcribe active)")
 
83
 
84
+ # Stream audio to client
85
+ yield (rate, chunk_array.reshape(1, -1))
86
 
87
+ process_ms = (time.perf_counter() - iter_start) * 1000
88
+ time.sleep(max(chunk_seconds - (process_ms / 1000.0) - 0.1, 0.01))
89
 
90
+ logging.info(f"[{session_id}] Streaming completed successfully.")
91
 
 
 
 
92
  except Exception as e:
93
  logging.error(f"[{session_id}] Stream error: {e}", exc_info=True)
 
94
  finally:
95
  unregister_session(session_id)
96
  clear_stop_flag(session_id)
97
  if os.path.exists(progress_path):
98
  os.remove(progress_path)
 
99
 
100
 
101
+ # --------------------------------------------------------
102
+ # TRANSCRIPTION
103
+ # --------------------------------------------------------
104
+ @spaces.GPU
105
+ def transcribe(session_id: str):
106
+ """Continuously read and delete .npz chunks while transcription is active."""
107
+ active_flag = os.path.join(TMP_DIR, f"transcribe_active_{session_id}.txt")
108
+ chunk_dir = os.path.join(TMP_DIR, f"chunks_{session_id}")
109
+
110
+ if not os.path.exists(chunk_dir):
111
+ logging.warning(f"[{session_id}] No chunk directory found for transcription.")
112
+ return
113
+
114
+ logging.info(f"[{session_id}] Transcription loop started.")
115
+ try:
116
+ while os.path.exists(active_flag):
117
+ files = sorted(f for f in os.listdir(chunk_dir) if f.endswith(".npz"))
118
+ if not files:
119
+ time.sleep(0.25)
120
+ continue
121
+
122
+ for fname in files:
123
+ fpath = os.path.join(chunk_dir, fname)
124
+ try:
125
+ npz = np.load(fpath)
126
+ samples = npz["data"]
127
+ rate = int(npz["rate"])
128
+
129
+ text = f"Transcribed {fname}: {len(samples)} samples @ {rate}Hz"
130
+ logging.debug(f"[{session_id}] {text}")
131
+
132
+ os.remove(fpath)
133
+ logging.debug(f"[{session_id}] Deleted processed chunk: {fname}")
134
+ except Exception as e:
135
+ logging.error(f"[{session_id}] Error processing {fname}: {e}")
136
+ continue
137
+
138
+ time.sleep(0.25)
139
+
140
+ logging.info(f"[{session_id}] Transcription loop ended (flag removed).")
141
+
142
+ except Exception as e:
143
+ logging.error(f"[{session_id}] Transcription error: {e}", exc_info=True)
144
+ finally:
145
+ try:
146
+ if os.path.exists(chunk_dir) and not os.listdir(chunk_dir):
147
+ os.rmdir(chunk_dir)
148
+ logging.debug(f"[{session_id}] Cleaned up empty chunk dir.")
149
+ except Exception as e:
150
+ logging.error(f"[{session_id}] Cleanup error: {e}")
151
+ logging.info(f"[{session_id}] Exiting transcription loop.")
152
+
153
+
154
+ # --------------------------------------------------------
155
+ # STOP STREAMING
156
+ # --------------------------------------------------------
157
  def stop_streaming(session_id: str):
158
  create_stop_flag(session_id)
159
  logging.info(f"[{session_id}] Stop button clicked β†’ stop flag created.")
 
171
  value = data.get("value", 0.0)
172
  elapsed = data.get("elapsed", "00:00:00")
173
  return value, elapsed
174
+ except Exception:
 
175
  return 0.0, "00:00:00"
176
 
177
 
178
+ # --------------------------------------------------------
179
+ # UI
180
+ # --------------------------------------------------------
181
  with gr.Blocks(theme=gr.themes.Soft()) as demo:
182
  gr.Markdown(
183
  "## 🎧 WebRTC Audio Streamer (Multi-user)\n"
184
+ "Each user controls their own stream. Transcription runs only during streaming."
185
  )
186
 
187
+ session_id = gr.State(value=generate_session_id())
188
  active_filepath = gr.State(value=DEFAULT_FILE)
189
 
190
  with gr.Row(equal_height=True):
 
197
  value=DEFAULT_FILE,
198
  )
199
 
200
+ chunk_slider = gr.Slider(
201
+ label="Chunk Duration (seconds)",
202
+ minimum=0.5,
203
+ maximum=5.0,
204
+ value=1.0,
205
+ step=0.5,
206
+ interactive=True,
207
+ )
208
+
209
  progress_bar = gr.Slider(
210
  label="Streaming Progress (%)",
211
  minimum=0,
 
220
  label="Elapsed Time (hh:mm:ss)",
221
  interactive=False,
222
  visible=False,
 
223
  )
224
 
225
  with gr.Row():
226
+ start_button = gr.Button("▢️ Start Streaming", variant="primary")
227
+ stop_button = gr.Button("⏹️ Stop Streaming", variant="stop", interactive=False)
 
 
228
 
229
  with gr.Column():
230
  webrtc_stream = WebRTC(
 
235
  visible=True,
236
  )
237
 
238
+ # --- Transcription Controls ---
239
+ with gr.Row(equal_height=True):
240
+ with gr.Column():
241
+ start_transcribe = gr.Button("πŸŽ™οΈ Start Transcribe", interactive=False)
242
+ stop_transcribe = gr.Button("πŸ›‘ Stop Transcribe", interactive=False)
243
 
244
+ # --- UI Logic ---
245
  def start_streaming_ui(session_id):
 
246
  return {
247
  start_button: gr.Button(interactive=False),
248
  stop_button: gr.Button(interactive=True),
249
+ start_transcribe: gr.Button(interactive=True),
250
+ stop_transcribe: gr.Button(interactive=False),
251
+ chunk_slider: gr.Slider(interactive=False),
252
  main_audio: gr.Audio(visible=False),
253
  progress_bar: gr.Slider(value=0, visible=True),
254
  progress_text: gr.Textbox(value="00:00:00", visible=True),
255
  }
256
 
257
  def stop_streaming_ui(session_id):
258
+ logging.debug(f"[{session_id}] UI: Stop clicked β†’ restoring controls.")
259
+
260
  return {
261
  start_button: gr.Button(interactive=True),
262
  stop_button: gr.Button(interactive=False),
263
+ start_transcribe: gr.Button(interactive=False),
264
+ stop_transcribe: gr.Button(interactive=False),
265
+ chunk_slider: gr.Slider(interactive=True),
266
+ main_audio: gr.Audio(visible=True),
 
 
 
267
  progress_bar: gr.Slider(value=0, visible=False),
268
  progress_text: gr.Textbox(value="00:00:00", visible=False),
269
  }
270
 
271
+ # --- Streaming event ---
 
272
  webrtc_stream.stream(
273
  fn=read_and_stream_audio,
274
+ inputs=[active_filepath, session_id, chunk_slider],
275
  outputs=[webrtc_stream],
276
  trigger=start_button.click,
 
 
277
  )
278
 
279
+ start_button.click(fn=start_streaming_ui, inputs=[session_id], outputs=[
280
+ start_button, stop_button, start_transcribe, stop_transcribe,
281
+ chunk_slider, main_audio, progress_bar, progress_text,
282
+ ])
283
+
284
+ stop_button.click(fn=stop_streaming, inputs=[session_id], outputs=[webrtc_stream]).then(
285
+ fn=stop_streaming_ui,
286
+ inputs=[session_id],
287
+ outputs=[
288
+ start_button, stop_button, start_transcribe, stop_transcribe,
289
+ chunk_slider, main_audio, progress_bar, progress_text,
290
+ ],
291
+ )
292
+
293
+ # --- Transcription control logic ---
294
+ def start_transcribe_ui(session_id: str):
295
+ """Create transcription flag and update UI."""
296
+ start_flag = os.path.join(TMP_DIR, f"transcribe_active_{session_id}.txt")
297
+ with open(start_flag, "w") as f:
298
+ f.write("1")
299
+ logging.info(f"[{session_id}] Transcription started.")
300
+ return {
301
+ start_transcribe: gr.Button(interactive=False),
302
+ stop_transcribe: gr.Button(interactive=True),
303
+ progress_text: gr.Textbox(value="πŸŽ™οΈ Transcription started..."),
304
+ }
305
+
306
+ def stop_transcribe_ui(session_id: str):
307
+ """Stop transcription by removing flag and update UI."""
308
+ flag_path = os.path.join(TMP_DIR, f"transcribe_active_{session_id}.txt")
309
+ if os.path.exists(flag_path):
310
+ os.remove(flag_path)
311
+ logging.info(f"[{session_id}] Transcription stopped.")
312
+ return {
313
+ start_transcribe: gr.Button(interactive=True),
314
+ stop_transcribe: gr.Button(interactive=False),
315
+ progress_text: gr.Textbox(value="πŸ›‘ Transcription stopped."),
316
+ }
317
+
318
+ # --- UI binding ---
319
+ start_transcribe.click(
320
+ fn=start_transcribe_ui,
321
  inputs=[session_id],
322
+ outputs=[start_transcribe, stop_transcribe, progress_text],
323
  )
324
 
325
+ # πŸ”₯ Actual transcription loop launch
326
+ start_transcribe.click(
327
+ fn=transcribe,
328
  inputs=[session_id],
329
+ outputs=None,
330
+ )
331
+
332
+ stop_transcribe.click(
333
+ fn=stop_transcribe_ui,
334
  inputs=[session_id],
335
+ outputs=[start_transcribe, stop_transcribe, progress_text],
336
  )
337
 
338
+ # --- Active sessions ---
339
  with gr.Accordion("πŸ“Š Active Sessions", open=False):
340
  sessions_table = gr.DataFrame(
341
  headers=["session_id", "file", "start_time", "status"],
342
  interactive=False,
343
  wrap=True,
 
344
  max_height=200,
345
  )
346
 
347
+ gr.Timer(3.0).tick(fn=get_active_sessions, outputs=sessions_table)
348
+ gr.Timer(1.0).tick(fn=get_session_progress, inputs=[session_id], outputs=[progress_bar, progress_text])
 
 
 
349
 
350
+ # --------------------------------------------------------
351
+ # CSS
352
+ # --------------------------------------------------------
353
  custom_css = """
354
  #column_source {
355
  display: flex;
 
367
  """
368
  demo.css = custom_css
369
 
370
+
371
+ # --------------------------------------------------------
372
+ # MAIN
373
+ # --------------------------------------------------------
374
  if __name__ == "__main__":
375
  demo.queue(max_size=50, api_open=False).launch(show_api=False, debug=True)
app/session_utils.py CHANGED
@@ -5,6 +5,8 @@ from datetime import datetime
5
  from app.logger_config import logger as logging
6
 
7
  TMP_DIR = "/tmp/canary_aed_streaming"
 
 
8
  ACTIVE_SESSIONS_FILE = os.path.join(TMP_DIR, "active_sessions.json")
9
 
10
 
@@ -45,6 +47,24 @@ def reset_active_sessions():
45
  logging.debug(f"Removed leftover stop flag file: {f}")
46
  except Exception as e:
47
  logging.warning(f"Failed to remove stop flag file {f}: {e}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
48
 
49
  except Exception as e:
50
  logging.error(f"Error resetting active sessions: {e}")
 
5
  from app.logger_config import logger as logging
6
 
7
  TMP_DIR = "/tmp/canary_aed_streaming"
8
+ # TMP_DIR = "/home/sifar-dev/workspace/canary_aed_streaming/tmp/canary_aed_streaming"
9
+
10
  ACTIVE_SESSIONS_FILE = os.path.join(TMP_DIR, "active_sessions.json")
11
 
12
 
 
47
  logging.debug(f"Removed leftover stop flag file: {f}")
48
  except Exception as e:
49
  logging.warning(f"Failed to remove stop flag file {f}: {e}")
50
+ # Clean up old transcribe_stop_flag
51
+ for f in os.listdir(TMP_DIR):
52
+ if f.startswith("transcribe_stop_flag_") and f.endswith(".txt"):
53
+ try:
54
+ os.remove(os.path.join(TMP_DIR, f))
55
+ logging.debug(f"Removed leftover transcribe_stop_flag flag file: {f}")
56
+ except Exception as e:
57
+ logging.warning(f"Failed to remove transcribe_stop_flag file {f}: {e}")
58
+ # Clean up old transcribe_active_flag
59
+ for f in os.listdir(TMP_DIR):
60
+ if f.startswith("transcribe_active_") and f.endswith(".txt"):
61
+ try:
62
+ os.remove(os.path.join(TMP_DIR, f))
63
+ logging.debug(f"Removed leftover transcribe active flag file: {f}")
64
+ except Exception as e:
65
+ logging.warning(f"Failed to remove transcribe active file {f}: {e}")
66
+
67
+
68
 
69
  except Exception as e:
70
  logging.error(f"Error resetting active sessions: {e}")