sofieff commited on
Commit
68f254f
Β·
1 Parent(s): 288d9b6

fix app.py

Browse files
Files changed (1) hide show
  1. app.py +31 -31
app.py CHANGED
@@ -28,14 +28,14 @@ app_state = {
28
  'ch_names': None
29
  }
30
 
31
- sound_manager = None
32
  data_processor = None
33
  classifier = None
34
 
35
  def lazy_init():
36
- global sound_manager, data_processor, classifier
37
- if sound_manager is None:
38
- sound_manager = SoundManager()
39
  if data_processor is None:
40
  data_processor = EEGDataProcessor()
41
  if classifier is None:
@@ -63,18 +63,18 @@ def get_movement_sounds() -> Dict[str, str]:
63
  if not hasattr(get_movement_sounds, 'play_counter'):
64
  get_movement_sounds.play_counter = {m: 0 for m in ['left_hand', 'right_hand', 'left_leg', 'right_leg']}
65
  get_movement_sounds.total_calls = 0
66
- from sound_manager import AudioEffectsProcessor
67
  import tempfile
68
  import soundfile as sf
69
  # If in DJ mode, use effect-processed file if effect is ON
70
- dj_mode = getattr(sound_manager, 'current_phase', None) == 'dj_effects'
71
- for movement, sound_file in sound_manager.current_sound_mapping.items():
72
  if movement in ['left_hand', 'right_hand', 'left_leg', 'right_leg']:
73
  if sound_file is not None:
74
- sound_path = sound_manager.sound_dir / sound_file
75
  if sound_path.exists():
76
  # Sticky effect for all movements: if effect was ON, keep returning processed audio until next ON
77
- effect_on = dj_mode and sound_manager.active_effects.get(movement, False)
78
  # If effect just turned ON, update sticky state
79
  if effect_on:
80
  get_movement_sounds.last_effect_state[movement] = True
@@ -102,7 +102,7 @@ def get_movement_sounds() -> Dict[str, str]:
102
  if effect_on:
103
  # Apply effect
104
  processed = AudioEffectsProcessor.process_layer_with_effects(
105
- data, sr, movement, sound_manager.active_effects
106
  )
107
  # Save to temp file (persistent for this effect state)
108
  tmp = tempfile.NamedTemporaryFile(delete=False, suffix=f'_{movement}_effect.wav')
@@ -166,11 +166,11 @@ def start_composition():
166
  lazy_init()
167
  if not app_state['composition_active']:
168
  app_state['composition_active'] = True
169
- sound_manager.start_new_cycle()
170
  if app_state['demo_data'] is None:
171
  return "❌ No data", "❌ No data", "❌ No data", None, None, None, None, None, None, "No EEG data available"
172
  # Force first trial to always be left_hand/instrumental
173
- if len(sound_manager.movements_completed) == 0:
174
  next_movement = 'left_hand'
175
  left_hand_label = [k for k, v in classifier.class_names.items() if v == 'left_hand'][0]
176
  import numpy as np
@@ -182,20 +182,20 @@ def start_composition():
182
  else:
183
  epoch_data, true_label = data_processor.simulate_real_time_data(app_state['demo_data'], app_state['demo_labels'], mode="class_balanced")
184
  true_label_name = classifier.class_names[true_label]
185
- next_movement = sound_manager.get_current_target_movement()
186
  if next_movement == "cycle_complete":
187
  return continue_dj_phase()
188
  predicted_class, confidence, probabilities = classifier.predict(epoch_data)
189
  predicted_name = classifier.class_names[predicted_class]
190
  # Only add sound if confidence > threshold, predicted == true label, and true label matches the prompt
191
  if confidence > CONFIDENCE_THRESHOLD and predicted_name == true_label_name:
192
- result = sound_manager.process_classification(predicted_name, confidence, CONFIDENCE_THRESHOLD, force_add=True)
193
  else:
194
  result = {'sound_added': False}
195
  fig = create_eeg_plot(epoch_data, true_label_name, predicted_name, confidence, result['sound_added'], app_state.get('ch_names'))
196
  # Only play completed movement sounds (layered)
197
  sounds = get_movement_sounds()
198
- completed_movements = sound_manager.movements_completed
199
  # Assign audio paths only for completed movements
200
  left_hand_audio = sounds.get('left_hand') if 'left_hand' in completed_movements else None
201
  right_hand_audio = sounds.get('right_hand') if 'right_hand' in completed_movements else None
@@ -210,9 +210,9 @@ def start_composition():
210
  }
211
  movement_command_lines = []
212
  # Show 'Now Playing' for all completed movements (layers that are currently playing)
213
- completed_movements = sound_manager.movements_completed
214
  for movement in ["left_hand", "right_hand", "left_leg", "right_leg"]:
215
- sound_file = sound_manager.current_sound_mapping.get(movement, "")
216
  instrument_type = ""
217
  for key in ["bass", "drums", "instruments", "vocals"]:
218
  if key in sound_file.lower():
@@ -233,7 +233,7 @@ def start_composition():
233
  movement_command_text = "🎼 Composition Mode - Movement to Stems Mapping\n" + "\n".join(movement_command_lines)
234
  # 3. Next Trial: always prompt user
235
  next_trial_text = "Imagine next movement"
236
- composition_info = sound_manager.get_composition_info()
237
  status_text = format_composition_summary(composition_info)
238
  return (
239
  movement_command_text,
@@ -274,7 +274,7 @@ def continue_dj_phase():
274
  # UI update: show which movement is expected
275
  # Always play all completed movement sounds (layered)
276
  sounds = get_movement_sounds()
277
- completed_movements = sound_manager.movements_completed
278
  left_hand_audio = sounds.get('left_hand') if 'left_hand' in completed_movements else None
279
  right_hand_audio = sounds.get('right_hand') if 'right_hand' in completed_movements else None
280
  left_leg_audio = sounds.get('left_leg') if 'left_leg' in completed_movements else None
@@ -288,10 +288,10 @@ def continue_dj_phase():
288
  emoji_map = {"left_hand": "🫲", "right_hand": "🫱", "left_leg": "🦡", "right_leg": "🦡"}
289
  movement_command_lines = []
290
  for m in ["left_hand", "right_hand", "left_leg", "right_leg"]:
291
- status = "ON" if sound_manager.active_effects.get(m, False) else "off"
292
  movement_command_lines.append(f"{emoji_map[m]} {m.replace('_', ' ').title()}: {movement_map[m]['effect']} [{'ON' if status == 'ON' else 'off'}] β†’ {movement_map[m]['instrument']}")
293
  target_text = "🎧 DJ Mode - Movement to Effect Mapping\n" + "\n".join(movement_command_lines)
294
- composition_info = sound_manager.get_composition_info()
295
  status_text = format_composition_summary(composition_info)
296
  fig = create_eeg_plot(epoch_data, classifier.class_names[true_label], predicted_name, confidence, False, app_state.get('ch_names'))
297
  return (
@@ -309,7 +309,7 @@ def continue_dj_phase():
309
  # If correct movement, apply effect and advance order
310
  effect_applied = False
311
  if confidence > CONFIDENCE_THRESHOLD and predicted_name == continue_dj_phase.dj_order[continue_dj_phase.dj_index]:
312
- result = sound_manager.toggle_dj_effect(predicted_name, brief=True, duration=1.0)
313
  effect_applied = result.get("effect_applied", False)
314
  continue_dj_phase.dj_index += 1
315
  else:
@@ -317,7 +317,7 @@ def continue_dj_phase():
317
  fig = create_eeg_plot(epoch_data, classifier.class_names[true_label], predicted_name, confidence, effect_applied, app_state.get('ch_names'))
318
  # Always play all completed movement sounds (layered)
319
  sounds = get_movement_sounds()
320
- completed_movements = sound_manager.movements_completed
321
  left_hand_audio = sounds.get('left_hand') if 'left_hand' in completed_movements else None
322
  right_hand_audio = sounds.get('right_hand') if 'right_hand' in completed_movements else None
323
  left_leg_audio = sounds.get('left_leg') if 'left_leg' in completed_movements else None
@@ -330,16 +330,16 @@ def continue_dj_phase():
330
  "right_leg": {"effect": "Echo", "instrument": "Vocals"},
331
  }
332
  emoji_map = {"left_hand": "🫲", "right_hand": "🫱", "left_leg": "🦡", "right_leg": "🦡"}
333
- # Get effect ON/OFF status from sound_manager.active_effects
334
  movement_command_lines = []
335
  for m in ["left_hand", "right_hand", "left_leg", "right_leg"]:
336
  # Show [ON] only if effect is currently active (True), otherwise [off]
337
- status = "ON" if sound_manager.active_effects.get(m, False) else "off"
338
  movement_command_lines.append(f"{emoji_map[m]} {m.replace('_', ' ').title()}: {movement_map[m]['effect']} [{'ON' if status == 'ON' else 'off'}] β†’ {movement_map[m]['instrument']}")
339
  target_text = "🎧 DJ Mode - Movement to Effect Mapping\n" + "\n".join(movement_command_lines)
340
  # In DJ mode, Next Trial should only show the prompt, not the predicted/target movement
341
  predicted_text = "Imagine next movement"
342
- composition_info = sound_manager.get_composition_info()
343
  status_text = format_composition_summary(composition_info)
344
  # Ensure exactly 10 outputs: [textbox, textbox, plot, audio, audio, audio, audio, textbox, timer, button]
345
  # Use fig for the plot, and fill all outputs with correct types
@@ -471,11 +471,11 @@ def create_interface():
471
  ''' Stop composing and reset state (works in both building and DJ mode). '''
472
  timer_counter["count"] = 0
473
  app_state['composition_active'] = False # Ensure new cycle on next start
474
- # Reset sound_manager state for new session
475
- sound_manager.current_phase = "building"
476
- sound_manager.composition_layers = {}
477
- sound_manager.movements_completed = set()
478
- sound_manager.active_effects = {m: False for m in ["left_hand", "right_hand", "left_leg", "right_leg"]}
479
  # Clear static audio cache in get_movement_sounds
480
  if hasattr(get_movement_sounds, 'audio_cache'):
481
  for m in get_movement_sounds.audio_cache:
 
28
  'ch_names': None
29
  }
30
 
31
+ sound_control= None
32
  data_processor = None
33
  classifier = None
34
 
35
  def lazy_init():
36
+ global sound_control, data_processor, classifier
37
+ if sound_control is None:
38
+ sound_control = SoundManager()
39
  if data_processor is None:
40
  data_processor = EEGDataProcessor()
41
  if classifier is None:
 
63
  if not hasattr(get_movement_sounds, 'play_counter'):
64
  get_movement_sounds.play_counter = {m: 0 for m in ['left_hand', 'right_hand', 'left_leg', 'right_leg']}
65
  get_movement_sounds.total_calls = 0
66
+ from sound_control import AudioEffectsProcessor
67
  import tempfile
68
  import soundfile as sf
69
  # If in DJ mode, use effect-processed file if effect is ON
70
+ dj_mode = getattr(sound_control, 'current_phase', None) == 'dj_effects'
71
+ for movement, sound_file in sound_control.current_sound_mapping.items():
72
  if movement in ['left_hand', 'right_hand', 'left_leg', 'right_leg']:
73
  if sound_file is not None:
74
+ sound_path = sound_control.sound_dir / sound_file
75
  if sound_path.exists():
76
  # Sticky effect for all movements: if effect was ON, keep returning processed audio until next ON
77
+ effect_on = dj_mode and sound_control.active_effects.get(movement, False)
78
  # If effect just turned ON, update sticky state
79
  if effect_on:
80
  get_movement_sounds.last_effect_state[movement] = True
 
102
  if effect_on:
103
  # Apply effect
104
  processed = AudioEffectsProcessor.process_layer_with_effects(
105
+ data, sr, movement, sound_control.active_effects
106
  )
107
  # Save to temp file (persistent for this effect state)
108
  tmp = tempfile.NamedTemporaryFile(delete=False, suffix=f'_{movement}_effect.wav')
 
166
  lazy_init()
167
  if not app_state['composition_active']:
168
  app_state['composition_active'] = True
169
+ sound_control.start_new_cycle()
170
  if app_state['demo_data'] is None:
171
  return "❌ No data", "❌ No data", "❌ No data", None, None, None, None, None, None, "No EEG data available"
172
  # Force first trial to always be left_hand/instrumental
173
+ if len(sound_control.movements_completed) == 0:
174
  next_movement = 'left_hand'
175
  left_hand_label = [k for k, v in classifier.class_names.items() if v == 'left_hand'][0]
176
  import numpy as np
 
182
  else:
183
  epoch_data, true_label = data_processor.simulate_real_time_data(app_state['demo_data'], app_state['demo_labels'], mode="class_balanced")
184
  true_label_name = classifier.class_names[true_label]
185
+ next_movement = sound_control.get_current_target_movement()
186
  if next_movement == "cycle_complete":
187
  return continue_dj_phase()
188
  predicted_class, confidence, probabilities = classifier.predict(epoch_data)
189
  predicted_name = classifier.class_names[predicted_class]
190
  # Only add sound if confidence > threshold, predicted == true label, and true label matches the prompt
191
  if confidence > CONFIDENCE_THRESHOLD and predicted_name == true_label_name:
192
+ result = sound_control.process_classification(predicted_name, confidence, CONFIDENCE_THRESHOLD, force_add=True)
193
  else:
194
  result = {'sound_added': False}
195
  fig = create_eeg_plot(epoch_data, true_label_name, predicted_name, confidence, result['sound_added'], app_state.get('ch_names'))
196
  # Only play completed movement sounds (layered)
197
  sounds = get_movement_sounds()
198
+ completed_movements = sound_control.movements_completed
199
  # Assign audio paths only for completed movements
200
  left_hand_audio = sounds.get('left_hand') if 'left_hand' in completed_movements else None
201
  right_hand_audio = sounds.get('right_hand') if 'right_hand' in completed_movements else None
 
210
  }
211
  movement_command_lines = []
212
  # Show 'Now Playing' for all completed movements (layers that are currently playing)
213
+ completed_movements = sound_control.movements_completed
214
  for movement in ["left_hand", "right_hand", "left_leg", "right_leg"]:
215
+ sound_file = sound_control.current_sound_mapping.get(movement, "")
216
  instrument_type = ""
217
  for key in ["bass", "drums", "instruments", "vocals"]:
218
  if key in sound_file.lower():
 
233
  movement_command_text = "🎼 Composition Mode - Movement to Stems Mapping\n" + "\n".join(movement_command_lines)
234
  # 3. Next Trial: always prompt user
235
  next_trial_text = "Imagine next movement"
236
+ composition_info = sound_control.get_composition_info()
237
  status_text = format_composition_summary(composition_info)
238
  return (
239
  movement_command_text,
 
274
  # UI update: show which movement is expected
275
  # Always play all completed movement sounds (layered)
276
  sounds = get_movement_sounds()
277
+ completed_movements = sound_control.movements_completed
278
  left_hand_audio = sounds.get('left_hand') if 'left_hand' in completed_movements else None
279
  right_hand_audio = sounds.get('right_hand') if 'right_hand' in completed_movements else None
280
  left_leg_audio = sounds.get('left_leg') if 'left_leg' in completed_movements else None
 
288
  emoji_map = {"left_hand": "🫲", "right_hand": "🫱", "left_leg": "🦡", "right_leg": "🦡"}
289
  movement_command_lines = []
290
  for m in ["left_hand", "right_hand", "left_leg", "right_leg"]:
291
+ status = "ON" if sound_control.active_effects.get(m, False) else "off"
292
  movement_command_lines.append(f"{emoji_map[m]} {m.replace('_', ' ').title()}: {movement_map[m]['effect']} [{'ON' if status == 'ON' else 'off'}] β†’ {movement_map[m]['instrument']}")
293
  target_text = "🎧 DJ Mode - Movement to Effect Mapping\n" + "\n".join(movement_command_lines)
294
+ composition_info = sound_control.get_composition_info()
295
  status_text = format_composition_summary(composition_info)
296
  fig = create_eeg_plot(epoch_data, classifier.class_names[true_label], predicted_name, confidence, False, app_state.get('ch_names'))
297
  return (
 
309
  # If correct movement, apply effect and advance order
310
  effect_applied = False
311
  if confidence > CONFIDENCE_THRESHOLD and predicted_name == continue_dj_phase.dj_order[continue_dj_phase.dj_index]:
312
+ result = sound_control.toggle_dj_effect(predicted_name, brief=True, duration=1.0)
313
  effect_applied = result.get("effect_applied", False)
314
  continue_dj_phase.dj_index += 1
315
  else:
 
317
  fig = create_eeg_plot(epoch_data, classifier.class_names[true_label], predicted_name, confidence, effect_applied, app_state.get('ch_names'))
318
  # Always play all completed movement sounds (layered)
319
  sounds = get_movement_sounds()
320
+ completed_movements = sound_control.movements_completed
321
  left_hand_audio = sounds.get('left_hand') if 'left_hand' in completed_movements else None
322
  right_hand_audio = sounds.get('right_hand') if 'right_hand' in completed_movements else None
323
  left_leg_audio = sounds.get('left_leg') if 'left_leg' in completed_movements else None
 
330
  "right_leg": {"effect": "Echo", "instrument": "Vocals"},
331
  }
332
  emoji_map = {"left_hand": "🫲", "right_hand": "🫱", "left_leg": "🦡", "right_leg": "🦡"}
333
+ # Get effect ON/OFF status from sound_control.active_effects
334
  movement_command_lines = []
335
  for m in ["left_hand", "right_hand", "left_leg", "right_leg"]:
336
  # Show [ON] only if effect is currently active (True), otherwise [off]
337
+ status = "ON" if sound_control.active_effects.get(m, False) else "off"
338
  movement_command_lines.append(f"{emoji_map[m]} {m.replace('_', ' ').title()}: {movement_map[m]['effect']} [{'ON' if status == 'ON' else 'off'}] β†’ {movement_map[m]['instrument']}")
339
  target_text = "🎧 DJ Mode - Movement to Effect Mapping\n" + "\n".join(movement_command_lines)
340
  # In DJ mode, Next Trial should only show the prompt, not the predicted/target movement
341
  predicted_text = "Imagine next movement"
342
+ composition_info = sound_control.get_composition_info()
343
  status_text = format_composition_summary(composition_info)
344
  # Ensure exactly 10 outputs: [textbox, textbox, plot, audio, audio, audio, audio, textbox, timer, button]
345
  # Use fig for the plot, and fill all outputs with correct types
 
471
  ''' Stop composing and reset state (works in both building and DJ mode). '''
472
  timer_counter["count"] = 0
473
  app_state['composition_active'] = False # Ensure new cycle on next start
474
+ # Reset sound_control state for new session
475
+ sound_control.current_phase = "building"
476
+ sound_control.composition_layers = {}
477
+ sound_control.movements_completed = set()
478
+ sound_control.active_effects = {m: False for m in ["left_hand", "right_hand", "left_leg", "right_leg"]}
479
  # Clear static audio cache in get_movement_sounds
480
  if hasattr(get_movement_sounds, 'audio_cache'):
481
  for m in get_movement_sounds.audio_cache: