Spaces:
Sleeping
Sleeping
Refactor app.py for lazy loading to fix Space timeout
Browse files
app.py
CHANGED
|
@@ -24,22 +24,32 @@ app_state = {
|
|
| 24 |
'demo_data': None,
|
| 25 |
'demo_labels': None,
|
| 26 |
'composition_active': False,
|
| 27 |
-
'auto_mode': False
|
|
|
|
| 28 |
}
|
| 29 |
|
| 30 |
-
sound_manager =
|
| 31 |
-
data_processor =
|
| 32 |
-
classifier =
|
| 33 |
|
| 34 |
-
|
| 35 |
-
|
| 36 |
-
if
|
| 37 |
-
|
| 38 |
-
|
| 39 |
-
|
| 40 |
-
|
| 41 |
-
|
| 42 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 43 |
|
| 44 |
# --- Helper Functions ---
|
| 45 |
def get_movement_sounds() -> Dict[str, str]:
|
|
@@ -153,6 +163,7 @@ def start_composition():
|
|
| 153 |
Start the composition process.
|
| 154 |
'''
|
| 155 |
global app_state
|
|
|
|
| 156 |
if not app_state['composition_active']:
|
| 157 |
app_state['composition_active'] = True
|
| 158 |
sound_manager.start_new_cycle()
|
|
@@ -182,17 +193,14 @@ def start_composition():
|
|
| 182 |
else:
|
| 183 |
result = {'sound_added': False}
|
| 184 |
fig = create_eeg_plot(epoch_data, true_label_name, predicted_name, confidence, result['sound_added'], app_state.get('ch_names'))
|
| 185 |
-
|
| 186 |
# Only play completed movement sounds (layered)
|
| 187 |
sounds = get_movement_sounds()
|
| 188 |
completed_movements = sound_manager.movements_completed
|
| 189 |
-
|
| 190 |
# Assign audio paths only for completed movements
|
| 191 |
left_hand_audio = sounds.get('left_hand') if 'left_hand' in completed_movements else None
|
| 192 |
right_hand_audio = sounds.get('right_hand') if 'right_hand' in completed_movements else None
|
| 193 |
left_leg_audio = sounds.get('left_leg') if 'left_leg' in completed_movements else None
|
| 194 |
right_leg_audio = sounds.get('right_leg') if 'right_leg' in completed_movements else None
|
| 195 |
-
|
| 196 |
# 2. Movement Commands: show mapping for all movements
|
| 197 |
movement_emojis = {
|
| 198 |
"left_hand": "🫲",
|
|
@@ -200,7 +208,6 @@ def start_composition():
|
|
| 200 |
"left_leg": "🦵",
|
| 201 |
"right_leg": "🦵",
|
| 202 |
}
|
| 203 |
-
|
| 204 |
movement_command_lines = []
|
| 205 |
# Show 'Now Playing' for all completed movements (layers that are currently playing)
|
| 206 |
completed_movements = sound_manager.movements_completed
|
|
@@ -224,10 +231,8 @@ def start_composition():
|
|
| 224 |
else:
|
| 225 |
movement_command_lines.append(f"{emoji} {pretty_movement}: {pretty_instrument}")
|
| 226 |
movement_command_text = "🎼 Composition Mode - Movement to Stems Mapping\n" + "\n".join(movement_command_lines)
|
| 227 |
-
|
| 228 |
# 3. Next Trial: always prompt user
|
| 229 |
next_trial_text = "Imagine next movement"
|
| 230 |
-
|
| 231 |
composition_info = sound_manager.get_composition_info()
|
| 232 |
status_text = format_composition_summary(composition_info)
|
| 233 |
return (
|
|
@@ -578,4 +583,4 @@ if __name__ == "__main__":
|
|
| 578 |
demo = create_interface()
|
| 579 |
demo.launch(server_name="0.0.0.0", server_port=7867)
|
| 580 |
|
| 581 |
-
|
|
|
|
| 24 |
'demo_data': None,
|
| 25 |
'demo_labels': None,
|
| 26 |
'composition_active': False,
|
| 27 |
+
'auto_mode': False,
|
| 28 |
+
'ch_names': None
|
| 29 |
}
|
| 30 |
|
| 31 |
+
sound_manager = None
|
| 32 |
+
data_processor = None
|
| 33 |
+
classifier = None
|
| 34 |
|
| 35 |
+
def lazy_init():
|
| 36 |
+
global sound_manager, data_processor, classifier
|
| 37 |
+
if sound_manager is None:
|
| 38 |
+
sound_manager = SoundManager()
|
| 39 |
+
if data_processor is None:
|
| 40 |
+
data_processor = EEGDataProcessor()
|
| 41 |
+
if classifier is None:
|
| 42 |
+
classifier = MotorImageryClassifier()
|
| 43 |
+
# Load demo data and model if not already loaded
|
| 44 |
+
if app_state['demo_data'] is None or app_state['demo_labels'] is None or app_state['ch_names'] is None:
|
| 45 |
+
existing_files = [f for f in DEMO_DATA_PATHS if os.path.exists(f)]
|
| 46 |
+
if existing_files:
|
| 47 |
+
app_state['demo_data'], app_state['demo_labels'], app_state['ch_names'] = data_processor.process_files(existing_files)
|
| 48 |
+
else:
|
| 49 |
+
app_state['demo_data'], app_state['demo_labels'], app_state['ch_names'] = None, None, None
|
| 50 |
+
if app_state['demo_data'] is not None and classifier is not None and not hasattr(classifier, '_model_loaded'):
|
| 51 |
+
classifier.load_model(n_chans=app_state['demo_data'].shape[1], n_times=app_state['demo_data'].shape[2])
|
| 52 |
+
classifier._model_loaded = True
|
| 53 |
|
| 54 |
# --- Helper Functions ---
|
| 55 |
def get_movement_sounds() -> Dict[str, str]:
|
|
|
|
| 163 |
Start the composition process.
|
| 164 |
'''
|
| 165 |
global app_state
|
| 166 |
+
lazy_init()
|
| 167 |
if not app_state['composition_active']:
|
| 168 |
app_state['composition_active'] = True
|
| 169 |
sound_manager.start_new_cycle()
|
|
|
|
| 193 |
else:
|
| 194 |
result = {'sound_added': False}
|
| 195 |
fig = create_eeg_plot(epoch_data, true_label_name, predicted_name, confidence, result['sound_added'], app_state.get('ch_names'))
|
|
|
|
| 196 |
# Only play completed movement sounds (layered)
|
| 197 |
sounds = get_movement_sounds()
|
| 198 |
completed_movements = sound_manager.movements_completed
|
|
|
|
| 199 |
# Assign audio paths only for completed movements
|
| 200 |
left_hand_audio = sounds.get('left_hand') if 'left_hand' in completed_movements else None
|
| 201 |
right_hand_audio = sounds.get('right_hand') if 'right_hand' in completed_movements else None
|
| 202 |
left_leg_audio = sounds.get('left_leg') if 'left_leg' in completed_movements else None
|
| 203 |
right_leg_audio = sounds.get('right_leg') if 'right_leg' in completed_movements else None
|
|
|
|
| 204 |
# 2. Movement Commands: show mapping for all movements
|
| 205 |
movement_emojis = {
|
| 206 |
"left_hand": "🫲",
|
|
|
|
| 208 |
"left_leg": "🦵",
|
| 209 |
"right_leg": "🦵",
|
| 210 |
}
|
|
|
|
| 211 |
movement_command_lines = []
|
| 212 |
# Show 'Now Playing' for all completed movements (layers that are currently playing)
|
| 213 |
completed_movements = sound_manager.movements_completed
|
|
|
|
| 231 |
else:
|
| 232 |
movement_command_lines.append(f"{emoji} {pretty_movement}: {pretty_instrument}")
|
| 233 |
movement_command_text = "🎼 Composition Mode - Movement to Stems Mapping\n" + "\n".join(movement_command_lines)
|
|
|
|
| 234 |
# 3. Next Trial: always prompt user
|
| 235 |
next_trial_text = "Imagine next movement"
|
|
|
|
| 236 |
composition_info = sound_manager.get_composition_info()
|
| 237 |
status_text = format_composition_summary(composition_info)
|
| 238 |
return (
|
|
|
|
| 583 |
demo = create_interface()
|
| 584 |
demo.launch(server_name="0.0.0.0", server_port=7867)
|
| 585 |
|
| 586 |
+
|