sofieff commited on
Commit
66947ed
·
1 Parent(s): b906dc7

ready for deply

Browse files
Files changed (11) hide show
  1. .gitignore +5 -36
  2. app.log +0 -452
  3. app.py +187 -69
  4. classifier.py +4 -3
  5. config.py +2 -70
  6. data_processor.py +8 -38
  7. demo.py +0 -96
  8. enhanced_utils.py +0 -236
  9. requirements.txt +11 -14
  10. sound_manager.py +36 -18
  11. utils.py +0 -226
.gitignore CHANGED
@@ -1,31 +1,10 @@
1
  # Python
2
  __pycache__/
3
  *.py[cod]
4
- *$py.class
5
  *.so
6
- .Python
7
- env/
8
- build/
9
- develop-eggs/
10
- dist/
11
- downloads/
12
- eggs/
13
- .eggs/
14
- lib/
15
- lib64/
16
- parts/
17
- sdist/
18
- var/
19
- wheels/
20
- *.egg-info/
21
- .installed.cfg
22
- *.egg
23
 
24
  # Virtual Environment
25
  .venv/
26
- venv/
27
- ENV/
28
- env/
29
 
30
  # IDE
31
  .vscode/
@@ -36,26 +15,16 @@ env/
36
 
37
  # OS
38
  .DS_Store
39
- .DS_Store?
40
- ._*
41
- .Spotlight-V100
42
- .Trashes
43
- ehthumbs.db
44
- Thumbs.db
45
 
46
  # Project specific
47
  *.wav
48
  *.mp3
49
  *.pth
50
-
51
-
52
- # Data files
53
- data/
54
  *.mat
55
-
56
- otherfiles/
57
-
58
  app.log
59
 
60
- source/
61
- sounds/
 
 
 
 
1
  # Python
2
  __pycache__/
3
  *.py[cod]
 
4
  *.so
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5
 
6
  # Virtual Environment
7
  .venv/
 
 
 
8
 
9
  # IDE
10
  .vscode/
 
15
 
16
  # OS
17
  .DS_Store
 
 
 
 
 
 
18
 
19
  # Project specific
20
  *.wav
21
  *.mp3
22
  *.pth
 
 
 
 
23
  *.mat
 
 
 
24
  app.log
25
 
26
+ # Data and generated folders
27
+ data/
28
+ sounds/
29
+ otherfiles/
30
+ source/
app.log DELETED
@@ -1,452 +0,0 @@
1
- 🧠 EEG Motor Imagery Music Composer
2
- ==================================================
3
- Starting Gradio application...
4
- 🎯 New random sequence (5 movements): Left Leg → Right Leg → Right Hand → Left Hand → Tongue
5
- Loaded sound for left_hand: 1_SoundHelix-Song-6_(Bass).wav
6
- Loaded sound for right_hand: 1_SoundHelix-Song-6_(Drums).wav
7
- Loaded sound for left_leg: 1_SoundHelix-Song-6_(Other).wav
8
- Loaded sound for tongue: 1_SoundHelix-Song-6_(Vocals).wav
9
- Loaded sound for right_leg: 1_SoundHelix-Song-6_(Bass).wav
10
- Dropped auxiliary channels [np.str_('X5')]. Remaining channels: 21
11
- Creating RawArray with float64 data, n_channels=21, n_times=667600
12
- Range : 0 ... 667599 = 0.000 ... 3337.995 secs
13
- Ready.
14
- Not setting metadata
15
- 959 matching events found
16
- No baseline correction applied
17
- 0 projection items activated
18
- Using data from preloaded Raw for 959 events and 301 original time points ...
19
- 0 bad epochs dropped
20
- Dropped auxiliary channels [np.str_('X5')]. Remaining channels: 21
21
- Creating RawArray with float64 data, n_channels=21, n_times=681400
22
- Range : 0 ... 681399 = 0.000 ... 3406.995 secs
23
- Ready.
24
- Not setting metadata
25
- 958 matching events found
26
- No baseline correction applied
27
- 0 projection items activated
28
- Using data from preloaded Raw for 958 events and 301 original time points ...
29
- 0 bad epochs dropped
30
- Dropped auxiliary channels [np.str_('X5')]. Remaining channels: 21
31
- Creating RawArray with float64 data, n_channels=21, n_times=813600
32
- Range : 0 ... 813599 = 0.000 ... 4067.995 secs
33
- Ready.
34
- Not setting metadata
35
- 960 matching events found
36
- No baseline correction applied
37
- 0 projection items activated
38
- Using data from preloaded Raw for 960 events and 301 original time points ...
39
- 0 bad epochs dropped
40
- Not setting metadata
41
- 2877 matching events found
42
- No baseline correction applied
43
- ✅ Pre-trained model loaded successfully from shallow_weights_all.pth
44
- ✅ Pre-trained model loaded successfully from shallow_weights_all.pth
45
- Pre-trained Demo: 2877 samples from 3 subjects
46
- Available sound classes: ['left_hand', 'right_hand', 'neutral', 'left_leg', 'tongue', 'right_leg']
47
- * Running on local URL: http://0.0.0.0:7867
48
- * To create a public link, set `share=True` in `launch()`.
49
- 🎯 New random sequence (5 movements): Right Leg → Tongue → Right Hand → Left Hand → Left Leg
50
- 🔄 Starting Cycle 2
51
- 💪 Let's create your first brain-music composition!
52
- DEBUG start_automatic_composition: current target = right_leg
53
- DEBUG: Added layer 1, total layers now: 1
54
- DEBUG: Composition layers: ['left_leg']
55
- DEBUG: Current composition has 1 layers: ['left_leg']
56
- DEBUG: Audio files to mix: ['1_SoundHelix-Song-6_(Other).wav']
57
- DEBUG: Using base audio file as fallback: 1_SoundHelix-Song-6_(Vocals).wav (FILE SAVING DISABLED)
58
- ✓ Cycle 2: Added 1_SoundHelix-Song-6_(Other).wav for left_leg (1/5 complete)
59
- DEBUG start_automatic_composition: predicted=left_leg, confidence=0.413, sound_added=True
60
- DEBUG: Available sounds: ['left_hand', 'right_hand', 'left_leg', 'tongue', 'right_leg']
61
- DEBUG: Completed movements: {'left_leg'}
62
- DEBUG: Showing individual sounds that will layer together: ['left_leg']
63
- DEBUG: Left leg playing: /Users/sofiafregni/Downloads/portfolio_ML_project/Gradio/sounds/1_SoundHelix-Song-6_(Other).wav
64
- DEBUG: 1 individual sounds will play together creating layered composition
65
- DEBUG continue_automatic_composition: current target = right_leg
66
- DEBUG: Added layer 2, total layers now: 2
67
- DEBUG: Composition layers: ['left_leg', 'left_hand']
68
- DEBUG: Current composition has 2 layers: ['left_leg', 'left_hand']
69
- DEBUG: Audio files to mix: ['1_SoundHelix-Song-6_(Other).wav', '1_SoundHelix-Song-6_(Bass).wav']
70
- DEBUG: Using base audio file as fallback: 1_SoundHelix-Song-6_(Vocals).wav (FILE SAVING DISABLED)
71
- ✓ Cycle 2: Added 1_SoundHelix-Song-6_(Bass).wav for left_hand (2/5 complete)
72
- DEBUG continue: Available sounds: ['left_hand', 'right_hand', 'left_leg', 'tongue', 'right_leg']
73
- DEBUG continue: Completed movements: {'left_hand', 'left_leg'}
74
- DEBUG continue: Showing individual sounds that will layer together: ['left_hand', 'left_leg']
75
- DEBUG continue: Left hand playing: /Users/sofiafregni/Downloads/portfolio_ML_project/Gradio/sounds/1_SoundHelix-Song-6_(Bass).wav
76
- DEBUG continue: Left leg playing: /Users/sofiafregni/Downloads/portfolio_ML_project/Gradio/sounds/1_SoundHelix-Song-6_(Other).wav
77
- DEBUG continue: 2 individual sounds will play together creating layered composition
78
- DEBUG continue_automatic_composition: current target = right_leg
79
- DEBUG: Added layer 3, total layers now: 3
80
- DEBUG: Composition layers: ['left_leg', 'left_hand', 'right_hand']
81
- DEBUG: Current composition has 3 layers: ['left_leg', 'left_hand', 'right_hand']
82
- DEBUG: Audio files to mix: ['1_SoundHelix-Song-6_(Other).wav', '1_SoundHelix-Song-6_(Bass).wav', '1_SoundHelix-Song-6_(Drums).wav']
83
- DEBUG: Using base audio file as fallback: 1_SoundHelix-Song-6_(Vocals).wav (FILE SAVING DISABLED)
84
- ✓ Cycle 2: Added 1_SoundHelix-Song-6_(Drums).wav for right_hand (3/5 complete)
85
- DEBUG continue: Available sounds: ['left_hand', 'right_hand', 'left_leg', 'tongue', 'right_leg']
86
- DEBUG continue: Completed movements: {'left_hand', 'left_leg', 'right_hand'}
87
- DEBUG continue: Showing individual sounds that will layer together: ['left_hand', 'left_leg', 'right_hand']
88
- DEBUG continue: Left hand playing: /Users/sofiafregni/Downloads/portfolio_ML_project/Gradio/sounds/1_SoundHelix-Song-6_(Bass).wav
89
- DEBUG continue: Right hand playing: /Users/sofiafregni/Downloads/portfolio_ML_project/Gradio/sounds/1_SoundHelix-Song-6_(Drums).wav
90
- DEBUG continue: Left leg playing: /Users/sofiafregni/Downloads/portfolio_ML_project/Gradio/sounds/1_SoundHelix-Song-6_(Other).wav
91
- DEBUG continue: 3 individual sounds will play together creating layered composition
92
- DEBUG continue_automatic_composition: current target = right_leg
93
- DEBUG: Added layer 4, total layers now: 4
94
- DEBUG: Composition layers: ['left_leg', 'left_hand', 'right_hand', 'right_leg']
95
- DEBUG: Current composition has 4 layers: ['left_leg', 'left_hand', 'right_hand', 'right_leg']
96
- DEBUG: Audio files to mix: ['1_SoundHelix-Song-6_(Other).wav', '1_SoundHelix-Song-6_(Bass).wav', '1_SoundHelix-Song-6_(Drums).wav', '1_SoundHelix-Song-6_(Bass).wav']
97
- DEBUG: Using base audio file as fallback: 1_SoundHelix-Song-6_(Vocals).wav (FILE SAVING DISABLED)
98
- ✓ Cycle 2: Added 1_SoundHelix-Song-6_(Bass).wav for right_leg (4/5 complete)
99
- DEBUG continue: Available sounds: ['left_hand', 'right_hand', 'left_leg', 'tongue', 'right_leg']
100
- DEBUG continue: Completed movements: {'left_hand', 'left_leg', 'right_leg', 'right_hand'}
101
- DEBUG continue: Showing individual sounds that will layer together: ['left_hand', 'left_leg', 'right_leg', 'right_hand']
102
- DEBUG continue: Left hand playing: /Users/sofiafregni/Downloads/portfolio_ML_project/Gradio/sounds/1_SoundHelix-Song-6_(Bass).wav
103
- DEBUG continue: Right hand playing: /Users/sofiafregni/Downloads/portfolio_ML_project/Gradio/sounds/1_SoundHelix-Song-6_(Drums).wav
104
- DEBUG continue: Left leg playing: /Users/sofiafregni/Downloads/portfolio_ML_project/Gradio/sounds/1_SoundHelix-Song-6_(Other).wav
105
- DEBUG continue: Right leg playing: /Users/sofiafregni/Downloads/portfolio_ML_project/Gradio/sounds/1_SoundHelix-Song-6_(Bass).wav
106
- DEBUG continue: 4 individual sounds will play together creating layered composition
107
- DEBUG continue_automatic_composition: current target = right_leg
108
- DEBUG: Added layer 5, total layers now: 5
109
- DEBUG: Composition layers: ['left_leg', 'left_hand', 'right_hand', 'right_leg', 'tongue']
110
- DEBUG: Current composition has 5 layers: ['left_leg', 'left_hand', 'right_hand', 'right_leg', 'tongue']
111
- DEBUG: Audio files to mix: ['1_SoundHelix-Song-6_(Other).wav', '1_SoundHelix-Song-6_(Bass).wav', '1_SoundHelix-Song-6_(Drums).wav', '1_SoundHelix-Song-6_(Bass).wav', '1_SoundHelix-Song-6_(Vocals).wav']
112
- DEBUG: Using base audio file as fallback: 1_SoundHelix-Song-6_(Vocals).wav (FILE SAVING DISABLED)
113
- ✓ Cycle 2: Added 1_SoundHelix-Song-6_(Vocals).wav for tongue (5/5 complete)
114
- 🎵 Cycle 2 complete! All 5 movements successfully classified!
115
- 📀 Using existing audio as mixed composition: /Users/sofiafregni/Downloads/portfolio_ML_project/Gradio/sounds/1_SoundHelix-Song-6_(Bass).wav (FILE SAVING DISABLED)
116
- 🎵 Composition Complete! Transitioning to DJ Effects Phase...
117
- 🎧 You are now the DJ! Use movements to control effects:
118
- 👈 Left Hand: Volume Fade
119
- 👉 Right Hand: High Pass Filter
120
- 🦵 Left Leg: Reverb Effect
121
- 🦵 Right Leg: Low Pass Filter
122
- 👅 Tongue: Bass Boost
123
- DEBUG: Successfully transitioned to DJ phase with 5 completed movements
124
- DEBUG continue: DJ mode - NEW mixed composition loaded: /Users/sofiafregni/Downloads/portfolio_ML_project/Gradio/sounds/1_SoundHelix-Song-6_(Bass).wav
125
- DEBUG continue_automatic_composition: current target = right_leg
126
- 🎛️ Reverb Effect: ON
127
- 🎛️ Audio processed with effects: rev (FILE SAVING DISABLED)
128
- DEBUG continue_automatic_composition: current target = right_leg
129
- 🎛️ Reverb Effect: OFF
130
- 🎛️ Audio processed with effects: clean (FILE SAVING DISABLED)
131
- DEBUG continue_automatic_composition: current target = right_leg
132
- 🎛️ High Pass Filter: ON
133
- 🎛️ Audio processed with effects: hpf (FILE SAVING DISABLED)
134
- DEBUG continue_automatic_composition: current target = right_leg
135
- 🎛️ Reverb Effect: ON
136
- 🎛️ Audio processed with effects: hpf_rev (FILE SAVING DISABLED)
137
- DEBUG continue_automatic_composition: current target = right_leg
138
- 🎛️ High Pass Filter: OFF
139
- 🎛️ Audio processed with effects: rev (FILE SAVING DISABLED)
140
- DEBUG continue_automatic_composition: current target = right_leg
141
- 🎛️ Bass Boost: ON
142
- 🎛️ Audio processed with effects: rev_bass (FILE SAVING DISABLED)
143
- DEBUG continue_automatic_composition: current target = right_leg
144
- 🎛️ High Pass Filter: ON
145
- 🎛️ Audio processed with effects: hpf_rev_bass (FILE SAVING DISABLED)
146
- DEBUG continue_automatic_composition: current target = right_leg
147
- 🎛️ Bass Boost: OFF
148
- 🎛️ Audio processed with effects: hpf_rev (FILE SAVING DISABLED)
149
- DEBUG continue_automatic_composition: current target = right_leg
150
- 🎵 Cycle 2 complete! All 5 movements successfully classified!
151
- DEBUG continue_automatic_composition: current target = right_leg
152
- DEBUG start_automatic_composition: current target = right_leg
153
- 🎛️ High Pass Filter: OFF
154
- 🎛️ Low Pass Filter: ON
155
- 🎛️ Audio processed with effects: rev_lpf (FILE SAVING DISABLED)/Users/sofiafregni/Downloads/portfolio_ML_project/Gradio/app.py:729: RuntimeWarning: More than 20 figures have been opened. Figures created through the pyplot interface (`matplotlib.pyplot.figure`) are retained until explicitly closed and may consume too much memory. (To control this warning, see the rcParam `figure.max_open_warning`). Consider using `matplotlib.pyplot.close()`.
156
- fig, axes = plt.subplots(2, 2, figsize=(12, 8))
157
-
158
- 🎛️ Audio processed with effects: rev_lpf (FILE SAVING DISABLED)
159
- DEBUG start_automatic_composition: predicted=right_hand, confidence=0.424, sound_added=True
160
- DEBUG DJ: Playing mixed composition with effects: /Users/sofiafregni/Downloads/portfolio_ML_project/Gradio/sounds/1_SoundHelix-Song-6_(Bass).wav
161
- DEBUG continue_automatic_composition: current target = right_leg
162
- 🎛️ Reverb Effect: OFF
163
- 🎛️ Audio processed with effects: lpf (FILE SAVING DISABLED)
164
- DEBUG continue_automatic_composition: current target = right_leg
165
- 🎛️ Reverb Effect: ON
166
- 🎛️ Audio processed with effects: rev_lpf (FILE SAVING DISABLED)
167
- DEBUG continue_automatic_composition: current target = right_leg
168
- 🎛️ Bass Boost: ON
169
- 🎛️ Audio processed with effects: rev_lpf_bass (FILE SAVING DISABLED)
170
- DEBUG continue_automatic_composition: current target = right_leg
171
- 🎛️ High Pass Filter: ON
172
- 🎛️ Audio processed with effects: hpf_rev_lpf_bass (FILE SAVING DISABLED)
173
- DEBUG continue_automatic_composition: current target = right_leg
174
- 🎛️ Reverb Effect: OFF
175
- 🎛️ Audio processed with effects: hpf_lpf_bass (FILE SAVING DISABLED)
176
- DEBUG continue_automatic_composition: current target = right_leg
177
- 🎛️ High Pass Filter: OFF
178
- 🎛️ Audio processed with effects: lpf_bass (FILE SAVING DISABLED)
179
- DEBUG continue_automatic_composition: current target = right_leg
180
- 🎛️ High Pass Filter: ON
181
- 🎛️ Audio processed with effects: hpf_lpf_bass (FILE SAVING DISABLED)
182
- DEBUG continue_automatic_composition: current target = right_leg
183
- DEBUG continue_automatic_composition: current target = right_leg
184
- 🎛️ Low Pass Filter: OFF
185
- 🎛️ Audio processed with effects: hpf_bass (FILE SAVING DISABLED)
186
- DEBUG continue_automatic_composition: current target = right_leg
187
- 🎛️ Reverb Effect: ON
188
- 🎛️ Audio processed with effects: hpf_rev_bass (FILE SAVING DISABLED)
189
- DEBUG continue_automatic_composition: current target = right_leg
190
- 🎛️ High Pass Filter: OFF
191
- 🎛️ Audio processed with effects: rev_bass (FILE SAVING DISABLED)
192
- DEBUG continue_automatic_composition: current target = right_leg
193
- 🎛️ Bass Boost: OFF
194
- 🎛️ Audio processed with effects: rev (FILE SAVING DISABLED)
195
- DEBUG continue_automatic_composition: current target = right_leg
196
- 🎛️ Reverb Effect: OFF
197
- 🎛️ Audio processed with effects: clean (FILE SAVING DISABLED)
198
- DEBUG continue_automatic_composition: current target = right_leg
199
- 🎛️ Reverb Effect: ON
200
- 🎛️ Audio processed with effects: rev (FILE SAVING DISABLED)
201
- DEBUG continue_automatic_composition: current target = right_leg
202
- 🎛️ High Pass Filter: ON
203
- 🎛️ Audio processed with effects: hpf_rev (FILE SAVING DISABLED)
204
- DEBUG continue_automatic_composition: current target = right_leg
205
- 🎛️ Bass Boost: ON
206
- 🎛️ Audio processed with effects: hpf_rev_bass (FILE SAVING DISABLED)
207
- DEBUG continue_automatic_composition: current target = right_leg
208
- 🎛️ High Pass Filter: OFF
209
- 🎛️ Audio processed with effects: rev_bass (FILE SAVING DISABLED)
210
- DEBUG continue_automatic_composition: current target = right_leg
211
- 🎛️ Low Pass Filter: ON
212
- 🎛️ Audio processed with effects: rev_lpf_bass (FILE SAVING DISABLED)
213
- DEBUG continue_automatic_composition: current target = right_leg
214
- DEBUG continue_automatic_composition: current target = right_leg
215
- 🎛️ Bass Boost: OFF
216
- 🎛️ Audio processed with effects: rev_lpf (FILE SAVING DISABLED)
217
- DEBUG continue_automatic_composition: current target = right_leg
218
- DEBUG continue_automatic_composition: current target = right_leg
219
- 🎛️ Bass Boost: ON
220
- 🎛️ Audio processed with effects: rev_lpf_bass (FILE SAVING DISABLED)
221
- DEBUG continue_automatic_composition: current target = right_leg
222
- 🎛️ Low Pass Filter: OFF
223
- 🎛️ Audio processed with effects: rev_bass (FILE SAVING DISABLED)
224
- DEBUG continue_automatic_composition: current target = right_leg
225
- 🎛️ Reverb Effect: OFF
226
- 🎛️ Audio processed with effects: bass (FILE SAVING DISABLED)
227
- DEBUG continue_automatic_composition: current target = right_leg
228
- 🎛️ Bass Boost: OFF
229
- 🎛️ Audio processed with effects: clean (FILE SAVING DISABLED)
230
- DEBUG continue_automatic_composition: current target = right_leg
231
- 🎛️ Bass Boost: ON
232
- 🎛️ Audio processed with effects: bass (FILE SAVING DISABLED)
233
- DEBUG continue_automatic_composition: current target = right_leg
234
- 🎛️ High Pass Filter: ON
235
- 🎛️ Audio processed with effects: hpf_bass (FILE SAVING DISABLED)
236
- DEBUG continue_automatic_composition: current target = right_leg
237
- 🎛️ Bass Boost: OFF
238
- 🎛️ Audio processed with effects: hpf (FILE SAVING DISABLED)
239
- DEBUG continue_automatic_composition: current target = right_leg
240
- 🎛️ Reverb Effect: ON
241
- 🎛️ Audio processed with effects: hpf_rev (FILE SAVING DISABLED)
242
- DEBUG continue_automatic_composition: current target = right_leg
243
- 🎛️ Bass Boost: ON
244
- 🎛️ Audio processed with effects: hpf_rev_bass (FILE SAVING DISABLED)
245
- DEBUG continue_automatic_composition: current target = right_leg
246
- 🎛️ Low Pass Filter: ON
247
- 🎛️ Audio processed with effects: hpf_rev_lpf_bass (FILE SAVING DISABLED)
248
- DEBUG continue_automatic_composition: current target = right_leg
249
- 🎛️ High Pass Filter: OFF
250
- 🎛️ Audio processed with effects: rev_lpf_bass (FILE SAVING DISABLED)
251
- DEBUG continue_automatic_composition: current target = right_leg
252
- DEBUG continue_automatic_composition: current target = right_leg
253
- 🎛️ High Pass Filter: ON
254
- 🎛️ Audio processed with effects: hpf_rev_lpf_bass (FILE SAVING DISABLED)
255
- DEBUG continue_automatic_composition: current target = right_leg
256
- 🎛️ High Pass Filter: OFF
257
- 🎛️ Audio processed with effects: rev_lpf_bass (FILE SAVING DISABLED)
258
- DEBUG continue_automatic_composition: current target = right_leg
259
- 🎛️ High Pass Filter: ON
260
- 🎛️ Audio processed with effects: hpf_rev_lpf_bass (FILE SAVING DISABLED)
261
- DEBUG continue_automatic_composition: current target = right_leg
262
- 🎛️ Volume Fade: ON
263
- 🎛️ Audio processed with effects: fade_hpf_rev_lpf_bass (FILE SAVING DISABLED)
264
- DEBUG continue_automatic_composition: current target = right_leg
265
- 🎛️ Bass Boost: OFF
266
- 🎛️ Audio processed with effects: fade_hpf_rev_lpf (FILE SAVING DISABLED)
267
- DEBUG continue_automatic_composition: current target = right_leg
268
- 🎛️ Low Pass Filter: OFF
269
- 🎛️ Audio processed with effects: fade_hpf_rev (FILE SAVING DISABLED)
270
- DEBUG continue_automatic_composition: current target = right_leg
271
- 🎛️ High Pass Filter: OFF
272
- 🎛️ Audio processed with effects: fade_rev (FILE SAVING DISABLED)
273
- DEBUG continue_automatic_composition: current target = right_leg
274
- 🎛️ High Pass Filter: ON
275
- 🎛️ Audio processed with effects: fade_hpf_rev (FILE SAVING DISABLED)
276
- DEBUG continue_automatic_composition: current target = right_leg
277
- 🎛️ Reverb Effect: OFF
278
- 🎛️ Audio processed with effects: fade_hpf (FILE SAVING DISABLED)
279
- DEBUG continue_automatic_composition: current target = right_leg
280
- 🎛️ High Pass Filter: OFF
281
- 🎛️ Audio processed with effects: fade (FILE SAVING DISABLED)
282
- DEBUG continue_automatic_composition: current target = right_leg
283
- 🎛️ Volume Fade: OFF
284
- 🎛️ Audio processed with effects: clean (FILE SAVING DISABLED)
285
- DEBUG continue_automatic_composition: current target = right_leg
286
- 🎛️ Bass Boost: ON
287
- 🎛️ Audio processed with effects: bass (FILE SAVING DISABLED)
288
- DEBUG continue_automatic_composition: current target = right_leg
289
- 🎛️ High Pass Filter: ON
290
- 🎛️ Audio processed with effects: hpf_bass (FILE SAVING DISABLED)
291
- DEBUG continue_automatic_composition: current target = right_leg
292
- 🎛️ Volume Fade: ON
293
- 🎛️ Audio processed with effects: fade_hpf_bass (FILE SAVING DISABLED)
294
- DEBUG continue_automatic_composition: current target = right_leg
295
- 🎛️ Volume Fade: OFF
296
- 🎛️ Audio processed with effects: hpf_bass (FILE SAVING DISABLED)
297
- DEBUG continue_automatic_composition: current target = right_leg
298
- 🎛️ Reverb Effect: ON
299
- 🎛️ Audio processed with effects: hpf_rev_bass (FILE SAVING DISABLED)
300
- DEBUG continue_automatic_composition: current target = right_leg
301
- 🎛️ Volume Fade: ON
302
- 🎛️ Audio processed with effects: fade_hpf_rev_bass (FILE SAVING DISABLED)
303
- DEBUG continue_automatic_composition: current target = right_leg
304
- 🎛️ Low Pass Filter: ON
305
- 🎛️ Audio processed with effects: fade_hpf_rev_lpf_bass (FILE SAVING DISABLED)
306
- DEBUG continue_automatic_composition: current target = right_leg
307
- DEBUG continue_automatic_composition: current target = right_leg
308
- 🎛️ Low Pass Filter: OFF
309
- 🎛️ Audio processed with effects: fade_hpf_rev_bass (FILE SAVING DISABLED)
310
- DEBUG continue_automatic_composition: current target = right_leg
311
- DEBUG continue_automatic_composition: current target = right_leg
312
- 🎛️ Volume Fade: OFF
313
- 🎛️ Audio processed with effects: hpf_rev_bass (FILE SAVING DISABLED)
314
- DEBUG continue_automatic_composition: current target = right_leg
315
- DEBUG continue_automatic_composition: current target = right_leg
316
- DEBUG continue_automatic_composition: current target = right_leg
317
- 🎛️ Bass Boost: OFF
318
- 🎛️ Audio processed with effects: hpf_rev (FILE SAVING DISABLED)
319
- DEBUG continue_automatic_composition: current target = right_leg
320
- 🎛️ Low Pass Filter: ON
321
- 🎛️ Audio processed with effects: hpf_rev_lpf (FILE SAVING DISABLED)
322
- DEBUG continue_automatic_composition: current target = right_leg
323
- 🎛️ Reverb Effect: OFF
324
- 🎛️ Audio processed with effects: hpf_lpf (FILE SAVING DISABLED)
325
- DEBUG continue_automatic_composition: current target = right_leg
326
- 🎛️ High Pass Filter: OFF
327
- 🎛️ Audio processed with effects: lpf (FILE SAVING DISABLED)
328
- DEBUG continue_automatic_composition: current target = right_leg
329
- 🎛️ Low Pass Filter: OFF
330
- 🎛️ Audio processed with effects: clean (FILE SAVING DISABLED)
331
- DEBUG continue_automatic_composition: current target = right_leg
332
- 🎛️ Reverb Effect: ON
333
- 🎛️ Audio processed with effects: rev (FILE SAVING DISABLED)
334
- DEBUG continue_automatic_composition: current target = right_leg
335
- 🎛️ High Pass Filter: ON
336
- 🎛️ Audio processed with effects: hpf_rev (FILE SAVING DISABLED)
337
- DEBUG continue_automatic_composition: current target = right_leg
338
- 🎛️ Bass Boost: ON
339
- 🎛️ Audio processed with effects: hpf_rev_bass (FILE SAVING DISABLED)
340
- DEBUG continue_automatic_composition: current target = right_leg
341
- 🎛️ Reverb Effect: OFF
342
- 🎛️ Audio processed with effects: hpf_bass (FILE SAVING DISABLED)
343
- DEBUG continue_automatic_composition: current target = right_leg
344
- 🎛️ Reverb Effect: ON
345
- 🎛️ Audio processed with effects: hpf_rev_bass (FILE SAVING DISABLED)
346
- DEBUG continue_automatic_composition: current target = right_leg
347
- 🎛️ Volume Fade: ON
348
- 🎛️ Audio processed with effects: fade_hpf_rev_bass (FILE SAVING DISABLED)
349
- DEBUG continue_automatic_composition: current target = right_leg
350
- 🎛️ Volume Fade: OFF
351
- 🎛️ Audio processed with effects: hpf_rev_bass (FILE SAVING DISABLED)
352
- DEBUG continue_automatic_composition: current target = right_leg
353
- 🎛️ Reverb Effect: OFF
354
- 🎛️ Audio processed with effects: hpf_bass (FILE SAVING DISABLED)
355
- DEBUG continue_automatic_composition: current target = right_leg
356
- 🎛️ Low Pass Filter: ON
357
- 🎛️ Audio processed with effects: hpf_lpf_bass (FILE SAVING DISABLED)
358
- DEBUG continue_automatic_composition: current target = right_leg
359
- 🎛️ High Pass Filter: OFF
360
- 🎛️ Audio processed with effects: lpf_bass (FILE SAVING DISABLED)
361
- DEBUG continue_automatic_composition: current target = right_leg
362
- DEBUG continue_automatic_composition: current target = right_leg
363
- DEBUG continue_automatic_composition: current target = right_leg
364
- 🎛️ Volume Fade: ON
365
- 🎛️ Audio processed with effects: fade_lpf_bass (FILE SAVING DISABLED)
366
- DEBUG continue_automatic_composition: current target = right_leg
367
- DEBUG continue_automatic_composition: current target = right_leg
368
- 🎛️ Reverb Effect: ON
369
- 🎛️ Audio processed with effects: fade_rev_lpf_bass (FILE SAVING DISABLED)
370
- DEBUG continue_automatic_composition: current target = right_leg
371
- 🎛️ Low Pass Filter: OFF
372
- 🎛️ Audio processed with effects: fade_rev_bass (FILE SAVING DISABLED)
373
- DEBUG continue_automatic_composition: current target = right_leg
374
- 🎛️ Bass Boost: OFF
375
- 🎛️ Audio processed with effects: fade_rev (FILE SAVING DISABLED)
376
- DEBUG continue_automatic_composition: current target = right_leg
377
- 🎵 Cycle 2 complete! All 5 movements successfully classified!
378
- DEBUG continue_automatic_composition: current target = right_leg
379
- 🎛️ Low Pass Filter: ON
380
- 🎛️ Audio processed with effects: fade_rev_lpf (FILE SAVING DISABLED)
381
- DEBUG continue_automatic_composition: current target = right_leg
382
- 🎛️ Volume Fade: OFF
383
- 🎛️ Audio processed with effects: rev_lpf (FILE SAVING DISABLED)
384
- DEBUG continue_automatic_composition: current target = right_leg
385
- 🎛️ High Pass Filter: ON
386
- 🎛️ Audio processed with effects: hpf_rev_lpf (FILE SAVING DISABLED)
387
- DEBUG continue_automatic_composition: current target = right_leg
388
- 🎛️ Low Pass Filter: OFF
389
- 🎛️ Audio processed with effects: hpf_rev (FILE SAVING DISABLED)
390
- DEBUG continue_automatic_composition: current target = right_leg
391
- 🎛️ Volume Fade: ON
392
- 🎛️ Audio processed with effects: fade_hpf_rev (FILE SAVING DISABLED)
393
- DEBUG continue_automatic_composition: current target = right_leg
394
- DEBUG continue_automatic_composition: current target = right_leg
395
- 🎛️ Low Pass Filter: ON
396
- 🎛️ Audio processed with effects: fade_hpf_rev_lpf (FILE SAVING DISABLED)
397
- DEBUG continue_automatic_composition: current target = right_leg
398
- 🎛️ Reverb Effect: OFF
399
- 🎛️ Audio processed with effects: fade_hpf_lpf (FILE SAVING DISABLED)
400
- DEBUG continue_automatic_composition: current target = right_leg
401
- 🎛️ Bass Boost: ON
402
- 🎛️ Audio processed with effects: fade_hpf_lpf_bass (FILE SAVING DISABLED)
403
- DEBUG continue_automatic_composition: current target = right_leg
404
- DEBUG continue_automatic_composition: current target = right_leg
405
- 🎛️ High Pass Filter: OFF
406
- 🎛️ Audio processed with effects: fade_lpf_bass (FILE SAVING DISABLED)
407
- DEBUG continue_automatic_composition: current target = right_leg
408
- DEBUG continue_automatic_composition: current target = right_leg
409
- 🎛️ Reverb Effect: ON
410
- 🎛️ Audio processed with effects: fade_rev_lpf_bass (FILE SAVING DISABLED)
411
- DEBUG continue_automatic_composition: current target = right_leg
412
- 🎛️ Volume Fade: OFF
413
- 🎛️ Audio processed with effects: rev_lpf_bass (FILE SAVING DISABLED)
414
- DEBUG continue_automatic_composition: current target = right_leg
415
- DEBUG continue_automatic_composition: current target = right_leg
416
- DEBUG continue_automatic_composition: current target = right_leg
417
- 🎛️ High Pass Filter: ON
418
- 🎛️ Audio processed with effects: hpf_rev_lpf_bass (FILE SAVING DISABLED)
419
- DEBUG continue_automatic_composition: current target = right_leg
420
- 🎛️ Reverb Effect: OFF
421
- 🎛️ Audio processed with effects: hpf_lpf_bass (FILE SAVING DISABLED)
422
- DEBUG continue_automatic_composition: current target = right_leg
423
- 🎛️ Volume Fade: ON
424
- 🎛️ Audio processed with effects: fade_hpf_lpf_bass (FILE SAVING DISABLED)
425
- DEBUG continue_automatic_composition: current target = right_leg
426
- 🎛️ Volume Fade: OFF
427
- 🎛️ Audio processed with effects: hpf_lpf_bass (FILE SAVING DISABLED)
428
- DEBUG continue_automatic_composition: current target = right_leg
429
- 🎛️ High Pass Filter: OFF
430
- 🎛️ Audio processed with effects: lpf_bass (FILE SAVING DISABLED)
431
- DEBUG continue_automatic_composition: current target = right_leg
432
- 🎛️ Bass Boost: OFF
433
- 🎛️ Audio processed with effects: lpf (FILE SAVING DISABLED)
434
- DEBUG continue_automatic_composition: current target = right_leg
435
- 🎛️ Volume Fade: ON
436
- 🎛️ Audio processed with effects: fade_lpf (FILE SAVING DISABLED)
437
- DEBUG continue_automatic_composition: current target = right_leg
438
- 🎛️ Reverb Effect: ON
439
- 🎛️ Audio processed with effects: fade_rev_lpf (FILE SAVING DISABLED)
440
- DEBUG continue_automatic_composition: current target = right_leg
441
- DEBUG continue_automatic_composition: current target = right_leg
442
- 🎛️ Volume Fade: OFF
443
- 🎛️ Audio processed with effects: rev_lpf (FILE SAVING DISABLED)
444
- DEBUG continue_automatic_composition: current target = right_leg
445
- 🎛️ Bass Boost: ON
446
- 🎛️ Audio processed with effects: rev_lpf_bass (FILE SAVING DISABLED)
447
- DEBUG continue_automatic_composition: current target = right_leg
448
- 🎛️ Low Pass Filter: OFF
449
- 🎛️ Audio processed with effects: rev_bass (FILE SAVING DISABLED)
450
- DEBUG continue_automatic_composition: current target = right_leg
451
- 🎛️ Low Pass Filter: ON
452
- 🎛️ Audio processed with effects: rev_lpf_bass (FILE SAVING DISABLED)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
app.py CHANGED
@@ -34,9 +34,9 @@ classifier = MotorImageryClassifier()
34
  # Load demo data
35
  existing_files = [f for f in DEMO_DATA_PATHS if os.path.exists(f)]
36
  if existing_files:
37
- app_state['demo_data'], app_state['demo_labels'] = data_processor.process_files(existing_files)
38
  else:
39
- app_state['demo_data'], app_state['demo_labels'] = None, None
40
 
41
  if app_state['demo_data'] is not None:
42
  classifier.load_model(n_chans=app_state['demo_data'].shape[1], n_times=app_state['demo_data'].shape[2])
@@ -45,6 +45,14 @@ if app_state['demo_data'] is not None:
45
  def get_movement_sounds() -> Dict[str, str]:
46
  """Get the current sound files for each movement."""
47
  sounds = {}
 
 
 
 
 
 
 
 
48
  from sound_manager import AudioEffectsProcessor
49
  import tempfile
50
  import soundfile as sf
@@ -55,33 +63,69 @@ def get_movement_sounds() -> Dict[str, str]:
55
  if sound_file is not None:
56
  sound_path = sound_manager.sound_dir / sound_file
57
  if sound_path.exists():
58
- if dj_mode and sound_manager.active_effects.get(movement, False):
59
- # Load audio, apply effect, save to temp file
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
60
  data, sr = sf.read(str(sound_path))
61
  if len(data.shape) > 1:
62
  data = np.mean(data, axis=1)
63
- processed = AudioEffectsProcessor.process_layer_with_effects(
64
- data, sr, movement, sound_manager.active_effects
65
- )
66
- # Save to temp file
67
- tmp = tempfile.NamedTemporaryFile(delete=False, suffix='.wav')
68
- sf.write(tmp.name, processed, sr)
69
- print(f"DEBUG: Playing PROCESSED audio for {movement}: {tmp.name}")
70
- sounds[movement] = tmp.name
71
- else:
72
- print(f"DEBUG: Playing ORIGINAL audio for {movement}: {sound_path.resolve()}")
73
- sounds[movement] = str(sound_path.resolve())
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
74
  return sounds
75
 
76
- def create_eeg_plot(eeg_data: np.ndarray, target_movement: str, predicted_name: str, confidence: float, sound_added: bool) -> plt.Figure:
 
 
 
 
 
 
77
  fig, axes = plt.subplots(1, 2, figsize=(10, 4))
78
  axes = axes.flatten()
79
  time_points = np.arange(eeg_data.shape[1]) / 200
80
- channel_names = ['C3', 'C4']
81
- for i in range(min(2, eeg_data.shape[0])):
82
  color = 'green' if sound_added else 'blue'
83
- axes[i].plot(time_points, eeg_data[i], color=color, linewidth=1)
84
- axes[i].set_title(f'{channel_names[i] if i < len(channel_names) else f"Channel {i+1}"}')
85
  axes[i].set_xlabel('Time (s)')
86
  axes[i].set_ylabel('Amplitude (µV)')
87
  axes[i].grid(True, alpha=0.3)
@@ -92,6 +136,8 @@ def create_eeg_plot(eeg_data: np.ndarray, target_movement: str, predicted_name:
92
  return fig
93
 
94
  def format_composition_summary(composition_info: Dict) -> str:
 
 
95
  if not composition_info.get('layers_by_cycle'):
96
  return "No composition layers yet"
97
  summary = []
@@ -141,7 +187,7 @@ def start_composition():
141
  result = sound_manager.process_classification(predicted_name, confidence, CONFIDENCE_THRESHOLD, force_add=True)
142
  else:
143
  result = {'sound_added': False}
144
- fig = create_eeg_plot(epoch_data, true_label_name, predicted_name, confidence, result['sound_added'])
145
 
146
  # Only play completed movement sounds (layered)
147
  sounds = get_movement_sounds()
@@ -153,24 +199,17 @@ def start_composition():
153
  left_leg_audio = sounds.get('left_leg') if 'left_leg' in completed_movements else None
154
  right_leg_audio = sounds.get('right_leg') if 'right_leg' in completed_movements else None
155
 
156
- print("DEBUG: movement sound paths:", sounds)
157
- print("DEBUG: completed movements:", completed_movements)
158
- print("DEBUG: left_hand_audio:", left_hand_audio, "exists:", os.path.exists(left_hand_audio) if left_hand_audio else None)
159
- print("DEBUG: right_hand_audio:", right_hand_audio, "exists:", os.path.exists(right_hand_audio) if right_hand_audio else None)
160
- print("DEBUG: left_leg_audio:", left_leg_audio, "exists:", os.path.exists(left_leg_audio) if left_leg_audio else None)
161
- print("DEBUG: right_leg_audio:", right_leg_audio, "exists:", os.path.exists(right_leg_audio) if right_leg_audio else None)
162
-
163
-
164
-
165
-
166
  # 2. Movement Commands: show mapping for all movements
167
  movement_emojis = {
168
- "left_hand": "👈",
169
- "right_hand": "👉",
170
  "left_leg": "🦵",
171
  "right_leg": "🦵",
172
  }
 
173
  movement_command_lines = []
 
 
174
  for movement in ["left_hand", "right_hand", "left_leg", "right_leg"]:
175
  sound_file = sound_manager.current_sound_mapping.get(movement, "")
176
  instrument_type = ""
@@ -179,13 +218,21 @@ def start_composition():
179
  instrument_type = key if key != "instruments" else "instrument"
180
  break
181
  pretty_movement = movement.replace("_", " ").title()
182
- pretty_instrument = instrument_type.capitalize() if instrument_type else "--"
 
 
 
 
183
  emoji = movement_emojis.get(movement, "")
184
- movement_command_lines.append(f"{emoji} {pretty_movement}: {pretty_instrument}")
185
- movement_command_text = "🎼 Composition Mode - Movement to Layers Mapping\n" + "\n".join(movement_command_lines)
 
 
 
 
186
 
187
- # 3. Next Trial: will be set dynamically in timer_tick
188
- next_trial_text = ""
189
 
190
  composition_info = sound_manager.get_composition_info()
191
  status_text = format_composition_summary(composition_info)
@@ -201,29 +248,75 @@ def start_composition():
201
  )
202
 
203
  def continue_dj_phase():
 
 
204
  global app_state
205
  print(f"DEBUG: [continue_dj_phase] Entered DJ mode. current_phase={sound_manager.current_phase}")
206
  if not app_state['composition_active']:
207
  return "❌ Not active", "❌ Not active", "❌ Not active", None, None, None, None, None, None, "Click 'Start Composing' first"
208
  if app_state['demo_data'] is None:
209
  return "❌ No data", "❌ No data", "❌ No data", None, None, None, None, None, None, "No EEG data available"
210
- # DJ phase: classify and apply effects, but always play all layered sounds
211
  epoch_data, true_label = data_processor.simulate_real_time_data(app_state['demo_data'], app_state['demo_labels'], mode="class_balanced")
212
  predicted_class, confidence, probabilities = classifier.predict(epoch_data)
213
  predicted_name = classifier.class_names[predicted_class]
214
- # Toggle effect if confidence is high
215
- if confidence > CONFIDENCE_THRESHOLD:
216
- print(f"DEBUG: [continue_dj_phase] Toggling DJ effect for movement: {predicted_name}")
217
- sound_manager.toggle_dj_effect(predicted_name, brief=True, duration=1.0)
218
- true_label_name = classifier.class_names[true_label]
219
- # Only turn plot green if effect is actually toggled (applied)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
220
  effect_applied = False
221
- if confidence > CONFIDENCE_THRESHOLD:
222
  result = sound_manager.toggle_dj_effect(predicted_name, brief=True, duration=1.0)
223
  effect_applied = result.get("effect_applied", False)
 
224
  else:
225
  result = None
226
- fig = create_eeg_plot(epoch_data, true_label_name, predicted_name, confidence, effect_applied)
227
  # Always play all completed movement sounds (layered)
228
  sounds = get_movement_sounds()
229
  completed_movements = sound_manager.movements_completed
@@ -233,17 +326,18 @@ def continue_dj_phase():
233
  right_leg_audio = sounds.get('right_leg') if 'right_leg' in completed_movements else None
234
  # Show DJ effect mapping for each movement with ON/OFF status and correct instrument mapping
235
  movement_map = {
236
- "left_hand": {"effect": "Echo", "instrument": "Instrument"},
237
  "right_hand": {"effect": "Low Pass", "instrument": "Bass"},
238
  "left_leg": {"effect": "Compressor", "instrument": "Drums"},
239
- "right_leg": {"effect": "High Pass", "instrument": "Vocals"},
240
  }
241
- emoji_map = {"left_hand": "👈", "right_hand": "👉", "left_leg": "🦵", "right_leg": "🦵"}
242
  # Get effect ON/OFF status from sound_manager.active_effects
243
  movement_command_lines = []
244
  for m in ["left_hand", "right_hand", "left_leg", "right_leg"]:
 
245
  status = "ON" if sound_manager.active_effects.get(m, False) else "off"
246
- movement_command_lines.append(f"{emoji_map[m]} {m.replace('_', ' ').title()}: {movement_map[m]['effect']} [{status}] → {movement_map[m]['instrument']}")
247
  target_text = "🎧 DJ Mode - Movement to Effect Mapping\n" + "\n".join(movement_command_lines)
248
  # In DJ mode, Next Trial should only show the prompt, not the predicted/target movement
249
  predicted_text = "Imagine next movement"
@@ -266,23 +360,22 @@ def continue_dj_phase():
266
 
267
  # --- Gradio UI ---
268
  def create_interface():
 
 
269
  with gr.Blocks(title="EEG Motor Imagery Music Composer", theme=gr.themes.Citrus()) as demo:
270
  with gr.Tabs():
271
- with gr.TabItem("Automatic Music Composition"):
272
- gr.Markdown("# 🧠🎵 EEG Motor Imagery Rehabilitation Composer")
273
- #gr.Markdown("**Therapeutic Brain-Computer Interface for Motor Recovery**\n\nCreate beautiful music compositions using your brain signals! This rehabilitation tool helps strengthen motor imagery skills while creating personalized musical pieces.")
274
  gr.Markdown("""
275
- **How the Task Works**
276
-
277
- This app has **two stages**:
278
 
279
- 1. **Music Composition Stage**: Use your motor imagery (imagine moving your left hand, right hand, left leg, or right leg) to add musical layers. Each correct, high-confidence brain signal prediction adds a new sound to your composition. The system will prompt you with a movement to imagine, and you should focus on that movement until the next prompt.
280
 
281
- 2. **DJ Effects Stage**: After all four movements are completed, you enter DJ mode. Here, you can apply effects and control playback of your own composition using new commands. The interface and available controls will change to let you experiment with your music.
282
 
283
- > **Note:** In DJ mode, each effect is only triggered every 4th time you perform the same movement. This prevents tracks from reloading too frequently.
284
 
285
- **Commands and controls will change between stages.** Follow the on-screen instructions for each phase.
286
  """)
287
 
288
  with gr.Row():
@@ -295,13 +388,14 @@ def create_interface():
295
  timer_display = gr.Textbox(label="⏱️ Next Trial", interactive=False, value="--")
296
  eeg_plot = gr.Plot(label="EEG Data Visualization")
297
  with gr.Column(scale=1):
298
- left_hand_sound = gr.Audio(label="👈 Left Hand", interactive=False, autoplay=True, visible=True)
299
- right_hand_sound = gr.Audio(label="👉 Right Hand", interactive=False, autoplay=True, visible=True)
300
  left_leg_sound = gr.Audio(label="🦵 Left Leg", interactive=False, autoplay=True, visible=True)
301
  right_leg_sound = gr.Audio(label="🦵 Right Leg", interactive=False, autoplay=True, visible=True)
302
  composition_status = gr.Textbox(label="Composition Status", interactive=False, lines=5)
303
-
304
  def start_and_activate_timer():
 
 
305
  result = start_composition()
306
  last_trial_result[:] = result # Initialize with first trial result
307
  if "DJ Mode" not in result[0]:
@@ -313,6 +407,8 @@ def create_interface():
313
  timer_counter = {"count": 0}
314
  last_trial_result = [None] * 9 # Adjust length to match your outputs
315
  def timer_tick():
 
 
316
  # 0,1,2: blank, 3: prompt, 4: trial
317
  if timer_counter["count"] < 3:
318
  timer_counter["count"] += 1
@@ -357,6 +453,8 @@ def create_interface():
357
  raise ValueError(f"Unexpected result length in timer_tick: {len(result)}")
358
 
359
  def continue_dj():
 
 
360
  result = continue_dj_phase()
361
  if len(result) == 8:
362
  return (*result, gr.update(active=False), gr.update(visible=True))
@@ -375,10 +473,28 @@ def create_interface():
375
  left_hand_sound, right_hand_sound, left_leg_sound, right_leg_sound, composition_status, timer, continue_btn]
376
  )
377
  def stop_composing():
 
378
  timer_counter["count"] = 0
379
- last_trial_result[:] = ["--"] * 9
380
  app_state['composition_active'] = False # Ensure new cycle on next start
381
- # Clear UI and deactivate timer, hide continue button
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
382
  return ("--", "Stopped", None, None, None, None, None, "Stopped", gr.update(active=False), gr.update(visible=False))
383
 
384
  stop_btn.click(
@@ -393,8 +509,8 @@ def create_interface():
393
  left_hand_sound, right_hand_sound, left_leg_sound, right_leg_sound, composition_status, timer, continue_btn]
394
  )
395
 
396
- with gr.TabItem("Manual Classifier"):
397
- gr.Markdown("# 🧑‍💻 Manual Classifier Test")
398
  gr.Markdown("Select a movement and run the classifier manually on a random epoch for that movement. Results will be accumulated below.")
399
  movement_dropdown = gr.Dropdown(choices=["left_hand", "right_hand", "left_leg", "right_leg"], label="Select Movement")
400
  manual_btn = gr.Button("Run Classifier", variant="primary")
@@ -409,6 +525,8 @@ def create_interface():
409
  session_confmat = defaultdict(lambda: defaultdict(int))
410
 
411
  def manual_classify(selected_movement):
 
 
412
  import matplotlib.pyplot as plt
413
  import numpy as np
414
  if app_state['demo_data'] is None or app_state['demo_labels'] is None:
@@ -452,7 +570,7 @@ def create_interface():
452
  ax_probs.set_ylim(0, 1)
453
  fig_probs.tight_layout()
454
  # EEG plot
455
- fig = create_eeg_plot(epoch_data, selected_movement, predicted_name, confidence, False)
456
  # Close all open figures to avoid warnings
457
  plt.close(fig_confmat)
458
  plt.close(fig_probs)
 
34
  # Load demo data
35
  existing_files = [f for f in DEMO_DATA_PATHS if os.path.exists(f)]
36
  if existing_files:
37
+ app_state['demo_data'], app_state['demo_labels'], app_state['ch_names'] = data_processor.process_files(existing_files)
38
  else:
39
+ app_state['demo_data'], app_state['demo_labels'], app_state['ch_names'] = None, None, None
40
 
41
  if app_state['demo_data'] is not None:
42
  classifier.load_model(n_chans=app_state['demo_data'].shape[1], n_times=app_state['demo_data'].shape[2])
 
45
  def get_movement_sounds() -> Dict[str, str]:
46
  """Get the current sound files for each movement."""
47
  sounds = {}
48
+ # Add a static cache for audio file paths per movement and effect state
49
+ if not hasattr(get_movement_sounds, 'audio_cache'):
50
+ get_movement_sounds.audio_cache = {m: {False: None, True: None} for m in ['left_hand', 'right_hand', 'left_leg', 'right_leg']}
51
+ get_movement_sounds.last_effect_state = {m: None for m in ['left_hand', 'right_hand', 'left_leg', 'right_leg']}
52
+ # Add a static counter to track how many times each movement's audio is played
53
+ if not hasattr(get_movement_sounds, 'play_counter'):
54
+ get_movement_sounds.play_counter = {m: 0 for m in ['left_hand', 'right_hand', 'left_leg', 'right_leg']}
55
+ get_movement_sounds.total_calls = 0
56
  from sound_manager import AudioEffectsProcessor
57
  import tempfile
58
  import soundfile as sf
 
63
  if sound_file is not None:
64
  sound_path = sound_manager.sound_dir / sound_file
65
  if sound_path.exists():
66
+ # Sticky effect for all movements: if effect was ON, keep returning processed audio until next ON
67
+ effect_on = dj_mode and sound_manager.active_effects.get(movement, False)
68
+ # If effect just turned ON, update sticky state
69
+ if effect_on:
70
+ get_movement_sounds.last_effect_state[movement] = True
71
+ # If effect is OFF, but sticky is set, keep using processed audio
72
+ elif get_movement_sounds.last_effect_state[movement]:
73
+ effect_on = True
74
+ else:
75
+ effect_on = False
76
+ # Check cache for this movement/effect state
77
+ cached_path = get_movement_sounds.audio_cache[movement][effect_on]
78
+ # Only regenerate if cache is empty or effect state just changed
79
+ if cached_path is not None and get_movement_sounds.last_effect_state[movement] == effect_on:
80
+ sounds[movement] = cached_path
81
+ else:
82
+ # Load audio
83
  data, sr = sf.read(str(sound_path))
84
  if len(data.shape) > 1:
85
  data = np.mean(data, axis=1)
86
+ # Fade-in: apply to all audio on restart (0.5s fade for more gradual effect)
87
+ fade_duration = 10 # seconds
88
+ fade_samples = int(fade_duration * sr)
89
+ if fade_samples > 0 and fade_samples < len(data):
90
+ fade_curve = np.linspace(0, 1, fade_samples)
91
+ data[:fade_samples] = data[:fade_samples] * fade_curve
92
+ if effect_on:
93
+ # Apply effect
94
+ processed = AudioEffectsProcessor.process_layer_with_effects(
95
+ data, sr, movement, sound_manager.active_effects
96
+ )
97
+ # Save to temp file (persistent for this effect state)
98
+ tmp = tempfile.NamedTemporaryFile(delete=False, suffix=f'_{movement}_effect.wav')
99
+ sf.write(tmp.name, processed, sr)
100
+ print(f"DEBUG: Playing PROCESSED audio for {movement}: {tmp.name}")
101
+ get_movement_sounds.audio_cache[movement][True] = tmp.name
102
+ sounds[movement] = tmp.name
103
+ else:
104
+ print(f"DEBUG: Playing ORIGINAL audio for {movement}: {sound_path.resolve()}")
105
+ get_movement_sounds.audio_cache[movement][False] = str(sound_path.resolve())
106
+ sounds[movement] = str(sound_path.resolve())
107
+ get_movement_sounds.last_effect_state[movement] = effect_on
108
+ get_movement_sounds.play_counter[movement] += 1
109
+ get_movement_sounds.total_calls += 1
110
+ # Print summary every 20 calls
111
+ if get_movement_sounds.total_calls % 20 == 0:
112
+ print("AUDIO PLAY COUNTS (DJ mode):", dict(get_movement_sounds.play_counter))
113
  return sounds
114
 
115
+ def create_eeg_plot(eeg_data: np.ndarray, target_movement: str, predicted_name: str, confidence: float, sound_added: bool, ch_names=None) -> plt.Figure:
116
+ '''Create a plot of EEG data with annotations. Plots C3 and C4 channels by name.'''
117
+ if ch_names is None:
118
+ ch_names = ['C3', 'C4']
119
+ # Find indices for C3 and C4
120
+ idx_c3 = ch_names.index('C3') if 'C3' in ch_names else 0
121
+ idx_c4 = ch_names.index('C4') if 'C4' in ch_names else 1
122
  fig, axes = plt.subplots(1, 2, figsize=(10, 4))
123
  axes = axes.flatten()
124
  time_points = np.arange(eeg_data.shape[1]) / 200
125
+ for i, idx in enumerate([idx_c3, idx_c4]):
 
126
  color = 'green' if sound_added else 'blue'
127
+ axes[i].plot(time_points, eeg_data[idx], color=color, linewidth=1)
128
+ axes[i].set_title(f'{ch_names[idx] if idx < len(ch_names) else f"Channel {idx+1}"}')
129
  axes[i].set_xlabel('Time (s)')
130
  axes[i].set_ylabel('Amplitude (µV)')
131
  axes[i].grid(True, alpha=0.3)
 
136
  return fig
137
 
138
  def format_composition_summary(composition_info: Dict) -> str:
139
+ '''Format the composition summary for display.
140
+ '''
141
  if not composition_info.get('layers_by_cycle'):
142
  return "No composition layers yet"
143
  summary = []
 
187
  result = sound_manager.process_classification(predicted_name, confidence, CONFIDENCE_THRESHOLD, force_add=True)
188
  else:
189
  result = {'sound_added': False}
190
+ fig = create_eeg_plot(epoch_data, true_label_name, predicted_name, confidence, result['sound_added'], app_state.get('ch_names'))
191
 
192
  # Only play completed movement sounds (layered)
193
  sounds = get_movement_sounds()
 
199
  left_leg_audio = sounds.get('left_leg') if 'left_leg' in completed_movements else None
200
  right_leg_audio = sounds.get('right_leg') if 'right_leg' in completed_movements else None
201
 
 
 
 
 
 
 
 
 
 
 
202
  # 2. Movement Commands: show mapping for all movements
203
  movement_emojis = {
204
+ "left_hand": "🫲",
205
+ "right_hand": "🫱",
206
  "left_leg": "🦵",
207
  "right_leg": "🦵",
208
  }
209
+
210
  movement_command_lines = []
211
+ # Show 'Now Playing' for all completed movements (layers that are currently playing)
212
+ completed_movements = sound_manager.movements_completed
213
  for movement in ["left_hand", "right_hand", "left_leg", "right_leg"]:
214
  sound_file = sound_manager.current_sound_mapping.get(movement, "")
215
  instrument_type = ""
 
218
  instrument_type = key if key != "instruments" else "instrument"
219
  break
220
  pretty_movement = movement.replace("_", " ").title()
221
+ # Always use 'Instruments' (plural) for the left hand stem
222
+ if movement == "left_hand" and instrument_type.lower() == "instrument":
223
+ pretty_instrument = "Instruments"
224
+ else:
225
+ pretty_instrument = instrument_type.capitalize() if instrument_type else "--"
226
  emoji = movement_emojis.get(movement, "")
227
+ # Add 'Now Playing' indicator for all completed movements
228
+ if movement in completed_movements:
229
+ movement_command_lines.append(f"{emoji} {pretty_movement}: {pretty_instrument} ▶️ Now Playing")
230
+ else:
231
+ movement_command_lines.append(f"{emoji} {pretty_movement}: {pretty_instrument}")
232
+ movement_command_text = "🎼 Composition Mode - Movement to Stems Mapping\n" + "\n".join(movement_command_lines)
233
 
234
+ # 3. Next Trial: always prompt user
235
+ next_trial_text = "Imagine next movement"
236
 
237
  composition_info = sound_manager.get_composition_info()
238
  status_text = format_composition_summary(composition_info)
 
248
  )
249
 
250
  def continue_dj_phase():
251
+ ''' Continue in DJ phase, applying effects and always playing all layered sounds.
252
+ '''
253
  global app_state
254
  print(f"DEBUG: [continue_dj_phase] Entered DJ mode. current_phase={sound_manager.current_phase}")
255
  if not app_state['composition_active']:
256
  return "❌ Not active", "❌ Not active", "❌ Not active", None, None, None, None, None, None, "Click 'Start Composing' first"
257
  if app_state['demo_data'] is None:
258
  return "❌ No data", "❌ No data", "❌ No data", None, None, None, None, None, None, "No EEG data available"
259
+ # DJ phase: enforce strict DJ effect order
260
  epoch_data, true_label = data_processor.simulate_real_time_data(app_state['demo_data'], app_state['demo_labels'], mode="class_balanced")
261
  predicted_class, confidence, probabilities = classifier.predict(epoch_data)
262
  predicted_name = classifier.class_names[predicted_class]
263
+ # Strict DJ order: right_hand, right_leg, left_leg, left_hand
264
+ if not hasattr(continue_dj_phase, 'dj_order'):
265
+ continue_dj_phase.dj_order = ["right_hand", "right_leg", "left_leg", "left_hand"]
266
+ continue_dj_phase.dj_index = 0
267
+ # Find the next movement in the DJ order that hasn't been toggled yet (using effect counters)
268
+ while continue_dj_phase.dj_index < 4:
269
+ next_movement = continue_dj_phase.dj_order[continue_dj_phase.dj_index]
270
+ # Only proceed if the predicted movement matches the next in order
271
+ if predicted_name == next_movement:
272
+ break
273
+ else:
274
+ # Ignore this prediction, do not apply effect
275
+ next_trial_text = "Imagine next movement"
276
+ # UI update: show which movement is expected
277
+ # Always play all completed movement sounds (layered)
278
+ sounds = get_movement_sounds()
279
+ completed_movements = sound_manager.movements_completed
280
+ left_hand_audio = sounds.get('left_hand') if 'left_hand' in completed_movements else None
281
+ right_hand_audio = sounds.get('right_hand') if 'right_hand' in completed_movements else None
282
+ left_leg_audio = sounds.get('left_leg') if 'left_leg' in completed_movements else None
283
+ right_leg_audio = sounds.get('right_leg') if 'right_leg' in completed_movements else None
284
+ movement_map = {
285
+ "left_hand": {"effect": "Fade In/Out", "instrument": "Instruments"},
286
+ "right_hand": {"effect": "Low Pass", "instrument": "Bass"},
287
+ "left_leg": {"effect": "Compressor", "instrument": "Drums"},
288
+ "right_leg": {"effect": "Echo", "instrument": "Vocals"},
289
+ }
290
+ emoji_map = {"left_hand": "🫲", "right_hand": "🫱", "left_leg": "🦵", "right_leg": "🦵"}
291
+ movement_command_lines = []
292
+ for m in ["left_hand", "right_hand", "left_leg", "right_leg"]:
293
+ status = "ON" if sound_manager.active_effects.get(m, False) else "off"
294
+ movement_command_lines.append(f"{emoji_map[m]} {m.replace('_', ' ').title()}: {movement_map[m]['effect']} [{'ON' if status == 'ON' else 'off'}] → {movement_map[m]['instrument']}")
295
+ target_text = "🎧 DJ Mode - Movement to Effect Mapping\n" + "\n".join(movement_command_lines)
296
+ composition_info = sound_manager.get_composition_info()
297
+ status_text = format_composition_summary(composition_info)
298
+ fig = create_eeg_plot(epoch_data, classifier.class_names[true_label], predicted_name, confidence, False, app_state.get('ch_names'))
299
+ return (
300
+ target_text, # Movement Commands (textbox)
301
+ next_trial_text, # Next Trial (textbox)
302
+ fig, # EEG Plot (plot)
303
+ left_hand_audio, # Left Hand (audio)
304
+ right_hand_audio, # Right Hand (audio)
305
+ left_leg_audio, # Left Leg (audio)
306
+ right_leg_audio, # Right Leg (audio)
307
+ status_text, # Composition Status (textbox)
308
+ gr.update(), # Timer (update object)
309
+ gr.update() # Continue DJ Button (update object)
310
+ )
311
+ # If correct movement, apply effect and advance order
312
  effect_applied = False
313
+ if confidence > CONFIDENCE_THRESHOLD and predicted_name == continue_dj_phase.dj_order[continue_dj_phase.dj_index]:
314
  result = sound_manager.toggle_dj_effect(predicted_name, brief=True, duration=1.0)
315
  effect_applied = result.get("effect_applied", False)
316
+ continue_dj_phase.dj_index += 1
317
  else:
318
  result = None
319
+ fig = create_eeg_plot(epoch_data, classifier.class_names[true_label], predicted_name, confidence, effect_applied, app_state.get('ch_names'))
320
  # Always play all completed movement sounds (layered)
321
  sounds = get_movement_sounds()
322
  completed_movements = sound_manager.movements_completed
 
326
  right_leg_audio = sounds.get('right_leg') if 'right_leg' in completed_movements else None
327
  # Show DJ effect mapping for each movement with ON/OFF status and correct instrument mapping
328
  movement_map = {
329
+ "left_hand": {"effect": "Fade In/Out", "instrument": "Instruments"},
330
  "right_hand": {"effect": "Low Pass", "instrument": "Bass"},
331
  "left_leg": {"effect": "Compressor", "instrument": "Drums"},
332
+ "right_leg": {"effect": "Echo", "instrument": "Vocals"},
333
  }
334
+ emoji_map = {"left_hand": "🫲", "right_hand": "🫱", "left_leg": "🦵", "right_leg": "🦵"}
335
  # Get effect ON/OFF status from sound_manager.active_effects
336
  movement_command_lines = []
337
  for m in ["left_hand", "right_hand", "left_leg", "right_leg"]:
338
+ # Show [ON] only if effect is currently active (True), otherwise [off]
339
  status = "ON" if sound_manager.active_effects.get(m, False) else "off"
340
+ movement_command_lines.append(f"{emoji_map[m]} {m.replace('_', ' ').title()}: {movement_map[m]['effect']} [{'ON' if status == 'ON' else 'off'}] → {movement_map[m]['instrument']}")
341
  target_text = "🎧 DJ Mode - Movement to Effect Mapping\n" + "\n".join(movement_command_lines)
342
  # In DJ mode, Next Trial should only show the prompt, not the predicted/target movement
343
  predicted_text = "Imagine next movement"
 
360
 
361
  # --- Gradio UI ---
362
  def create_interface():
363
+ ''' Create the Gradio interface.
364
+ '''
365
  with gr.Blocks(title="EEG Motor Imagery Music Composer", theme=gr.themes.Citrus()) as demo:
366
  with gr.Tabs():
367
+ with gr.TabItem("🎵 Automatic Music Composer"):
368
+ gr.Markdown("# 🧠 NeuroMusic Studio: An accessible, easy to use motor rehabilitation device.")
 
369
  gr.Markdown("""
370
+ **How it works:**
 
 
371
 
372
+ 1. **Compose:** Imagine moving your left hand, right hand, left leg, or right leg to add musical layers. Each correct, high-confidence prediction adds a sound. Just follow the prompts.
373
 
374
+ 2. **DJ Mode:** After all four layers are added, you can apply effects and remix your composition using new brain commands.
375
 
376
+ > **Tip:** In DJ mode, each effect is triggered only every 4th time you repeat a movement, to keep playback smooth.
377
 
378
+ Commands and controls update as you progress. Just follow the on-screen instructions!
379
  """)
380
 
381
  with gr.Row():
 
388
  timer_display = gr.Textbox(label="⏱️ Next Trial", interactive=False, value="--")
389
  eeg_plot = gr.Plot(label="EEG Data Visualization")
390
  with gr.Column(scale=1):
391
+ left_hand_sound = gr.Audio(label="🫲 Left Hand", interactive=False, autoplay=True, visible=True)
392
+ right_hand_sound = gr.Audio(label="🫱 Right Hand", interactive=False, autoplay=True, visible=True)
393
  left_leg_sound = gr.Audio(label="🦵 Left Leg", interactive=False, autoplay=True, visible=True)
394
  right_leg_sound = gr.Audio(label="🦵 Right Leg", interactive=False, autoplay=True, visible=True)
395
  composition_status = gr.Textbox(label="Composition Status", interactive=False, lines=5)
 
396
  def start_and_activate_timer():
397
+ ''' Start composing and activate timer for trials.
398
+ '''
399
  result = start_composition()
400
  last_trial_result[:] = result # Initialize with first trial result
401
  if "DJ Mode" not in result[0]:
 
407
  timer_counter = {"count": 0}
408
  last_trial_result = [None] * 9 # Adjust length to match your outputs
409
  def timer_tick():
410
+ ''' Timer tick handler for ITI and trials.
411
+ '''
412
  # 0,1,2: blank, 3: prompt, 4: trial
413
  if timer_counter["count"] < 3:
414
  timer_counter["count"] += 1
 
453
  raise ValueError(f"Unexpected result length in timer_tick: {len(result)}")
454
 
455
  def continue_dj():
456
+ ''' Continue DJ phase from button click.
457
+ '''
458
  result = continue_dj_phase()
459
  if len(result) == 8:
460
  return (*result, gr.update(active=False), gr.update(visible=True))
 
473
  left_hand_sound, right_hand_sound, left_leg_sound, right_leg_sound, composition_status, timer, continue_btn]
474
  )
475
  def stop_composing():
476
+ ''' Stop composing and reset state (works in both building and DJ mode). '''
477
  timer_counter["count"] = 0
 
478
  app_state['composition_active'] = False # Ensure new cycle on next start
479
+ # Reset sound_manager state for new session
480
+ sound_manager.current_phase = "building"
481
+ sound_manager.composition_layers = {}
482
+ sound_manager.movements_completed = set()
483
+ sound_manager.active_effects = {m: False for m in ["left_hand", "right_hand", "left_leg", "right_leg"]}
484
+ # Clear static audio cache in get_movement_sounds
485
+ if hasattr(get_movement_sounds, 'audio_cache'):
486
+ for m in get_movement_sounds.audio_cache:
487
+ get_movement_sounds.audio_cache[m][True] = None
488
+ get_movement_sounds.audio_cache[m][False] = None
489
+ if hasattr(get_movement_sounds, 'last_effect_state'):
490
+ for m in get_movement_sounds.last_effect_state:
491
+ get_movement_sounds.last_effect_state[m] = None
492
+ if hasattr(get_movement_sounds, 'play_counter'):
493
+ for m in get_movement_sounds.play_counter:
494
+ get_movement_sounds.play_counter[m] = 0
495
+ get_movement_sounds.total_calls = 0
496
+ # Clear UI and deactivate timer, hide continue button, clear all audio
497
+ last_trial_result[:] = ["--", "Stopped", None, None, None, None, None, "Stopped"]
498
  return ("--", "Stopped", None, None, None, None, None, "Stopped", gr.update(active=False), gr.update(visible=False))
499
 
500
  stop_btn.click(
 
509
  left_hand_sound, right_hand_sound, left_leg_sound, right_leg_sound, composition_status, timer, continue_btn]
510
  )
511
 
512
+ with gr.TabItem("📝 Manual Classifier"):
513
+ gr.Markdown("# Manual Classifier")
514
  gr.Markdown("Select a movement and run the classifier manually on a random epoch for that movement. Results will be accumulated below.")
515
  movement_dropdown = gr.Dropdown(choices=["left_hand", "right_hand", "left_leg", "right_leg"], label="Select Movement")
516
  manual_btn = gr.Button("Run Classifier", variant="primary")
 
525
  session_confmat = defaultdict(lambda: defaultdict(int))
526
 
527
  def manual_classify(selected_movement):
528
+ ''' Manually classify a random epoch for the selected movement.
529
+ '''
530
  import matplotlib.pyplot as plt
531
  import numpy as np
532
  if app_state['demo_data'] is None or app_state['demo_labels'] is None:
 
570
  ax_probs.set_ylim(0, 1)
571
  fig_probs.tight_layout()
572
  # EEG plot
573
+ fig = create_eeg_plot(epoch_data, selected_movement, predicted_name, confidence, False, app_state.get('ch_names'))
574
  # Close all open figures to avoid warnings
575
  plt.close(fig_confmat)
576
  plt.close(fig_probs)
classifier.py CHANGED
@@ -10,9 +10,8 @@ import torch.nn as nn
10
  import numpy as np
11
  from braindecode.models.shallow_fbcsp import ShallowFBCSPNet
12
  from braindecode.modules.layers import Ensure4d # necessary for loading
13
- from typing import Dict, Tuple, Optional
14
  import os
15
- from sklearn.metrics import accuracy_score
16
  from data_processor import EEGDataProcessor
17
  from config import DEMO_DATA_PATHS
18
 
@@ -36,7 +35,9 @@ class MotorImageryClassifier:
36
  self.is_loaded = False
37
 
38
  def load_model(self, n_chans: int, n_times: int, n_outputs: int = 6):
39
- """Load the pre-trained ShallowFBCSPNet model."""
 
 
40
  try:
41
  self.model = ShallowFBCSPNet(
42
  n_chans=n_chans,
 
10
  import numpy as np
11
  from braindecode.models.shallow_fbcsp import ShallowFBCSPNet
12
  from braindecode.modules.layers import Ensure4d # necessary for loading
13
+ from typing import Dict, Tuple
14
  import os
 
15
  from data_processor import EEGDataProcessor
16
  from config import DEMO_DATA_PATHS
17
 
 
35
  self.is_loaded = False
36
 
37
  def load_model(self, n_chans: int, n_times: int, n_outputs: int = 6):
38
+ """Load the pre-trained ShallowFBCSPNet model.
39
+ If model file not found or incompatible, fallback to LOSO training.
40
+ """
41
  try:
42
  self.model = ShallowFBCSPNet(
43
  n_chans=n_chans,
config.py CHANGED
@@ -5,59 +5,17 @@ Configuration settings for the EEG Motor Imagery Music Composer
5
  import os
6
  from pathlib import Path
7
 
8
- # Application settings
9
- APP_NAME = "EEG Motor Imagery Music Composer"
10
- VERSION = "1.0.0"
11
-
12
  # Data paths
13
  BASE_DIR = Path(__file__).parent
14
  DATA_DIR = BASE_DIR / "data"
15
  SOUND_DIR = BASE_DIR / "sounds"
16
  MODEL_DIR = BASE_DIR
17
 
18
- # Model settings
19
- MODEL_PATH = MODEL_DIR / "model.pth"
20
- # Model architecture: Always uses ShallowFBCSPNet from braindecode
21
- # If pre-trained weights not found, will train using LOSO on available data
22
-
23
- # EEG Data settings
24
- SAMPLING_RATE = 200 # Hz
25
- EPOCH_DURATION = 1.5 # seconds
26
- N_CHANNELS = 19 # without ground and reference 19 electrodes
27
- N_CLASSES = 6 # or 4
28
 
29
  # Classification settings
30
- CONFIDENCE_THRESHOLD = 0.3 # Minimum confidence to add sound layer (lowered for testing)
31
- MAX_COMPOSITION_LAYERS = 6 # Maximum layers in composition
32
-
33
- # Sound settings
34
- SOUND_MAPPING = {
35
- "left_hand": "1_SoundHelix-Song-6_(Bass).wav",
36
- "right_hand": "1_SoundHelix-Song-6_(Drums).wav",
37
- "neutral": None, # No sound for neutral/rest state
38
- "left_leg": "1_SoundHelix-Song-6_(Other).wav",
39
- "tongue": "1_SoundHelix-Song-6_(Vocals).wav",
40
- "right_leg": "1_SoundHelix-Song-6_(Bass).wav" # Can be remapped by user
41
- }
42
-
43
- # Motor imagery class names
44
- CLASS_NAMES = {
45
- 0: "left_hand",
46
- 1: "right_hand",
47
- 2: "neutral",
48
- 3: "left_leg",
49
- 4: "tongue",
50
- 5: "right_leg"
51
- }
52
 
53
- CLASS_DESCRIPTIONS = {
54
- "left_hand": "🤚 Left Hand Movement",
55
- "right_hand": "🤚 Right Hand Movement",
56
- "neutral": "😐 Neutral/Rest State",
57
- "left_leg": "🦵 Left Leg Movement",
58
- "tongue": "👅 Tongue Movement",
59
- "right_leg": "🦵 Right Leg Movement"
60
- }
61
 
62
  # Demo data paths (optional) - updated with available files
63
  DEMO_DATA_PATHS = [
@@ -65,29 +23,3 @@ DEMO_DATA_PATHS = [
65
  "data/HaLTSubjectA1603086StLRHandLegTongue.mat",
66
  "data/HaLTSubjectA1603106StLRHandLegTongue.mat",
67
  ]
68
-
69
- # Gradio settings
70
- GRADIO_PORT = 7860
71
- GRADIO_HOST = "0.0.0.0"
72
- GRADIO_SHARE = False # Set to True to create public links
73
-
74
- # Logging settings
75
- LOG_LEVEL = "INFO"
76
- LOG_FILE = BASE_DIR / "logs" / "app.log"
77
-
78
- # Create necessary directories
79
- def create_directories():
80
- """Create necessary directories if they don't exist."""
81
- directories = [
82
- DATA_DIR,
83
- SOUND_DIR,
84
- MODEL_DIR,
85
- LOG_FILE.parent
86
- ]
87
-
88
- for directory in directories:
89
- directory.mkdir(parents=True, exist_ok=True)
90
-
91
- if __name__ == "__main__":
92
- create_directories()
93
- print("Configuration directories created successfully!")
 
5
  import os
6
  from pathlib import Path
7
 
 
 
 
 
8
  # Data paths
9
  BASE_DIR = Path(__file__).parent
10
  DATA_DIR = BASE_DIR / "data"
11
  SOUND_DIR = BASE_DIR / "sounds"
12
  MODEL_DIR = BASE_DIR
13
 
 
 
 
 
 
 
 
 
 
 
14
 
15
  # Classification settings
16
+ CONFIDENCE_THRESHOLD = 0.7 # Minimum confidence to add sound layer (lowered for testing)
17
+ MAX_COMPOSITION_LAYERS = 4 # Maximum layers in composition
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
18
 
 
 
 
 
 
 
 
 
19
 
20
  # Demo data paths (optional) - updated with available files
21
  DEMO_DATA_PATHS = [
 
23
  "data/HaLTSubjectA1603086StLRHandLegTongue.mat",
24
  "data/HaLTSubjectA1603106StLRHandLegTongue.mat",
25
  ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
data_processor.py CHANGED
@@ -8,11 +8,8 @@ Adapted from the original eeg_motor_imagery.py script.
8
  import scipy.io
9
  import numpy as np
10
  import mne
11
- import torch
12
  import pandas as pd
13
- from typing import List, Tuple, Dict, Optional
14
- from pathlib import Path
15
- from scipy.signal import butter, lfilter
16
 
17
  class EEGDataProcessor:
18
  """
@@ -96,7 +93,7 @@ class EEGDataProcessor:
96
  )
97
  return epochs
98
 
99
- def process_files(self, file_paths: List[str]) -> Tuple[np.ndarray, np.ndarray]:
100
  """Process multiple EEG files and return combined data."""
101
  all_epochs = []
102
  allowed_labels = {1, 2, 4, 6}
@@ -110,18 +107,18 @@ class EEGDataProcessor:
110
  events = events[np.isin(events[:, -1], list(allowed_labels))]
111
  # create epochs only for allowed labels
112
  epochs = self.create_epochs(raw, events, event_id=allowed_event_id)
113
- all_epochs.append(epochs)
114
 
115
  if len(all_epochs) > 1:
116
- epochs_combined = mne.concatenate_epochs(all_epochs)
 
117
  else:
118
- epochs_combined = all_epochs[0]
119
-
120
  # Convert to arrays for model input
121
  X = epochs_combined.get_data().astype("float32")
122
  y = (epochs_combined.events[:, -1] - 1).astype("int64") # classes 0..5
123
-
124
- return X, y
125
 
126
  def load_continuous_data(self, file_paths: List[str]) -> Tuple[np.ndarray, int]:
127
  """
@@ -232,31 +229,4 @@ class EEGDataProcessor:
232
 
233
  return X[idx], y[idx]
234
 
235
- # def simulate_continuous_stream(self, raw_data: np.ndarray, fs: int, window_size: float = 1.5) -> np.ndarray:
236
- # """
237
- # Simulate continuous EEG stream by extracting sliding windows from raw data.
238
-
239
- # Args:
240
- # raw_data: Continuous EEG data [n_channels, n_timepoints]
241
- # fs: Sampling frequency
242
- # window_size: Window size in seconds
243
-
244
- # Returns:
245
- # Single window of EEG data [n_channels, window_samples]
246
- # """
247
- # window_samples = int(window_size * fs) # e.g., 1.5s * 200Hz = 300 samples
248
-
249
- # # Ensure we don't go beyond the data
250
- # max_start = raw_data.shape[1] - window_samples
251
- # if max_start <= 0:
252
- # return raw_data # Return full data if too short
253
-
254
- # # Random starting point in the continuous stream
255
- # start_idx = np.random.randint(0, max_start)
256
- # end_idx = start_idx + window_samples
257
-
258
- # # Extract window
259
- # window = raw_data[:, start_idx:end_idx]
260
-
261
- # return window
262
 
 
8
  import scipy.io
9
  import numpy as np
10
  import mne
 
11
  import pandas as pd
12
+ from typing import List, Tuple
 
 
13
 
14
  class EEGDataProcessor:
15
  """
 
93
  )
94
  return epochs
95
 
96
+ def process_files(self, file_paths: List[str]) -> Tuple[np.ndarray, np.ndarray, List[str]]:
97
  """Process multiple EEG files and return combined data."""
98
  all_epochs = []
99
  allowed_labels = {1, 2, 4, 6}
 
107
  events = events[np.isin(events[:, -1], list(allowed_labels))]
108
  # create epochs only for allowed labels
109
  epochs = self.create_epochs(raw, events, event_id=allowed_event_id)
110
+ all_epochs.append((epochs, channels))
111
 
112
  if len(all_epochs) > 1:
113
+ epochs_combined = mne.concatenate_epochs([ep for ep, _ in all_epochs])
114
+ ch_names = all_epochs[0][1] # Assume same channel order for all files
115
  else:
116
+ epochs_combined = all_epochs[0][0]
117
+ ch_names = all_epochs[0][1]
118
  # Convert to arrays for model input
119
  X = epochs_combined.get_data().astype("float32")
120
  y = (epochs_combined.events[:, -1] - 1).astype("int64") # classes 0..5
121
+ return X, y, ch_names
 
122
 
123
  def load_continuous_data(self, file_paths: List[str]) -> Tuple[np.ndarray, int]:
124
  """
 
229
 
230
  return X[idx], y[idx]
231
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
232
 
demo.py DELETED
@@ -1,96 +0,0 @@
1
- #!/usr/bin/env python3
2
- """
3
- Demo script for the EEG Motor Imagery Music Composer
4
- """
5
-
6
- import sys
7
- import os
8
- from pathlib import Path
9
-
10
- # Add the current directory to Python path
11
- current_dir = Path(__file__).parent
12
- sys.path.insert(0, str(current_dir))
13
-
14
- from data_processor import EEGDataProcessor
15
- from classifier import MotorImageryClassifier
16
- from sound_library import SoundManager
17
- from utils import setup_logging, create_classification_summary
18
- import numpy as np
19
-
20
- def run_demo():
21
- """Run a simple demo of the system components."""
22
- print("🧠 EEG Motor Imagery Music Composer - Demo")
23
- print("=" * 50)
24
-
25
- # Initialize components
26
- print("Initializing components...")
27
- data_processor = EEGDataProcessor()
28
- classifier = MotorImageryClassifier()
29
- sound_manager = SoundManager()
30
-
31
- # Create mock data for demo
32
- print("Creating mock EEG data...")
33
- mock_data = np.random.randn(10, 32, 384).astype(np.float32) # 10 samples, 32 channels, 384 time points
34
- mock_labels = np.random.randint(0, 6, 10)
35
-
36
- # Initialize classifier
37
- print("Loading classifier...")
38
- classifier.load_model(n_chans=32, n_times=384)
39
-
40
- print(f"Available sounds: {sound_manager.get_available_sounds()}")
41
- print()
42
-
43
- # Run classification demo
44
- print("Running classification demo...")
45
- print("-" * 30)
46
-
47
- for i in range(5):
48
- # Get random sample
49
- sample_idx = np.random.randint(0, len(mock_data))
50
- eeg_sample = mock_data[sample_idx]
51
- true_label = mock_labels[sample_idx]
52
-
53
- # Classify
54
- predicted_class, confidence, probabilities = classifier.predict(eeg_sample)
55
- predicted_name = classifier.class_names[predicted_class]
56
- true_name = classifier.class_names[true_label]
57
-
58
- # Create summary
59
- summary = create_classification_summary(predicted_name, confidence, probabilities)
60
-
61
- print(f"Sample {i+1}:")
62
- print(f" True class: {true_name}")
63
- print(f" Predicted: {summary['emoji']} {predicted_name} ({summary['confidence_percent']})")
64
-
65
- # Add to composition if confidence is high
66
- if confidence > 0.7 and predicted_name != 'neutral':
67
- sound_manager.add_layer(predicted_name, confidence)
68
- print(f" ♪ Added to composition: {predicted_name}")
69
- else:
70
- print(f" - Not added (low confidence or neutral)")
71
-
72
- print()
73
-
74
- # Show composition summary
75
- composition_info = sound_manager.get_composition_info()
76
- print("Final Composition:")
77
- print("-" * 20)
78
- if composition_info:
79
- for i, layer in enumerate(composition_info, 1):
80
- print(f"{i}. {layer['class']} (confidence: {layer['confidence']:.2f})")
81
- else:
82
- print("No composition layers created")
83
-
84
- print()
85
- print("Demo completed! 🎵")
86
- print("To run the full Gradio interface, execute: python app.py")
87
-
88
- if __name__ == "__main__":
89
- try:
90
- run_demo()
91
- except KeyboardInterrupt:
92
- print("\nDemo interrupted by user")
93
- except Exception as e:
94
- print(f"Error running demo: {e}")
95
- import traceback
96
- traceback.print_exc()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
enhanced_utils.py DELETED
@@ -1,236 +0,0 @@
1
- """
2
- Enhanced utilities incorporating useful functions from the original src/ templates
3
- """
4
-
5
- import torch
6
- import numpy as np
7
- import matplotlib.pyplot as plt
8
- import seaborn as sns
9
- from sklearn.metrics import confusion_matrix, classification_report, accuracy_score
10
- from typing import Dict, List, Tuple, Optional
11
- import logging
12
-
13
- def evaluate_model_performance(model, dataloader, device, class_names: List[str]) -> Dict:
14
- """
15
- Comprehensive model evaluation with metrics and visualizations.
16
- Enhanced version of src/evaluate.py
17
- """
18
- model.eval()
19
- all_preds = []
20
- all_labels = []
21
- all_probs = []
22
-
23
- with torch.no_grad():
24
- for inputs, labels in dataloader:
25
- inputs = inputs.to(device)
26
- labels = labels.to(device)
27
-
28
- outputs = model(inputs)
29
- probs = torch.softmax(outputs, dim=1)
30
- _, preds = torch.max(outputs, 1)
31
-
32
- all_preds.extend(preds.cpu().numpy())
33
- all_labels.extend(labels.cpu().numpy())
34
- all_probs.extend(probs.cpu().numpy())
35
-
36
- # Calculate metrics
37
- accuracy = accuracy_score(all_labels, all_preds)
38
- cm = confusion_matrix(all_labels, all_preds)
39
- report = classification_report(all_labels, all_preds, target_names=class_names, output_dict=True)
40
-
41
- return {
42
- 'accuracy': accuracy,
43
- 'predictions': all_preds,
44
- 'labels': all_labels,
45
- 'probabilities': all_probs,
46
- 'confusion_matrix': cm,
47
- 'classification_report': report
48
- }
49
-
50
- def plot_confusion_matrix(cm: np.ndarray, class_names: List[str], title: str = "Confusion Matrix") -> plt.Figure:
51
- """
52
- Plot confusion matrix with proper formatting.
53
- Enhanced version from src/visualize.py
54
- """
55
- fig, ax = plt.subplots(figsize=(8, 6))
56
-
57
- sns.heatmap(cm, annot=True, fmt='d', cmap='Blues',
58
- xticklabels=class_names, yticklabels=class_names, ax=ax)
59
-
60
- ax.set_title(title)
61
- ax.set_ylabel('True Label')
62
- ax.set_xlabel('Predicted Label')
63
-
64
- plt.tight_layout()
65
- return fig
66
-
67
- def plot_classification_probabilities(probabilities: np.ndarray, class_names: List[str],
68
- sample_indices: Optional[List[int]] = None) -> plt.Figure:
69
- """
70
- Plot classification probabilities for selected samples.
71
- """
72
- if sample_indices is None:
73
- sample_indices = list(range(min(10, len(probabilities))))
74
-
75
- fig, ax = plt.subplots(figsize=(12, 6))
76
-
77
- x = np.arange(len(class_names))
78
- width = 0.8 / len(sample_indices)
79
-
80
- for i, sample_idx in enumerate(sample_indices):
81
- offset = (i - len(sample_indices)/2) * width
82
- ax.bar(x + offset, probabilities[sample_idx], width,
83
- label=f'Sample {sample_idx}', alpha=0.8)
84
-
85
- ax.set_xlabel('Motor Imagery Classes')
86
- ax.set_ylabel('Probability')
87
- ax.set_title('Classification Probabilities')
88
- ax.set_xticks(x)
89
- ax.set_xticklabels(class_names, rotation=45)
90
- ax.legend()
91
- ax.grid(True, alpha=0.3)
92
-
93
- plt.tight_layout()
94
- return fig
95
-
96
- def plot_training_history(history: Dict[str, List[float]]) -> plt.Figure:
97
- """
98
- Plot training history (loss and accuracy).
99
- Enhanced version from src/visualize.py
100
- """
101
- fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(15, 5))
102
-
103
- # Plot accuracy
104
- if 'train_accuracy' in history and 'val_accuracy' in history:
105
- ax1.plot(history['train_accuracy'], label='Train', linewidth=2)
106
- ax1.plot(history['val_accuracy'], label='Validation', linewidth=2)
107
- ax1.set_title('Model Accuracy')
108
- ax1.set_xlabel('Epoch')
109
- ax1.set_ylabel('Accuracy')
110
- ax1.legend()
111
- ax1.grid(True, alpha=0.3)
112
-
113
- # Plot loss
114
- if 'train_loss' in history and 'val_loss' in history:
115
- ax2.plot(history['train_loss'], label='Train', linewidth=2)
116
- ax2.plot(history['val_loss'], label='Validation', linewidth=2)
117
- ax2.set_title('Model Loss')
118
- ax2.set_xlabel('Epoch')
119
- ax2.set_ylabel('Loss')
120
- ax2.legend()
121
- ax2.grid(True, alpha=0.3)
122
-
123
- plt.tight_layout()
124
- return fig
125
-
126
- def plot_eeg_channels(eeg_data: np.ndarray, channel_names: Optional[List[str]] = None,
127
- sample_rate: int = 256, title: str = "EEG Channels") -> plt.Figure:
128
- """
129
- Plot multiple EEG channels.
130
- Enhanced visualization for EEG data.
131
- """
132
- n_channels, n_samples = eeg_data.shape
133
- time_axis = np.arange(n_samples) / sample_rate
134
-
135
- # Determine subplot layout
136
- n_rows = int(np.ceil(np.sqrt(n_channels)))
137
- n_cols = int(np.ceil(n_channels / n_rows))
138
-
139
- fig, axes = plt.subplots(n_rows, n_cols, figsize=(15, 10))
140
- if n_channels == 1:
141
- axes = [axes]
142
- else:
143
- axes = axes.flatten()
144
-
145
- for i in range(n_channels):
146
- ax = axes[i]
147
- ax.plot(time_axis, eeg_data[i], 'b-', linewidth=1)
148
-
149
- channel_name = channel_names[i] if channel_names else f'Channel {i+1}'
150
- ax.set_title(channel_name)
151
- ax.set_xlabel('Time (s)')
152
- ax.set_ylabel('Amplitude')
153
- ax.grid(True, alpha=0.3)
154
-
155
- # Hide unused subplots
156
- for i in range(n_channels, len(axes)):
157
- axes[i].set_visible(False)
158
-
159
- plt.suptitle(title)
160
- plt.tight_layout()
161
- return fig
162
-
163
- class EarlyStopping:
164
- """
165
- Early stopping utility from src/types/index.py
166
- """
167
- def __init__(self, patience=7, min_delta=0, restore_best_weights=True, verbose=False):
168
- self.patience = patience
169
- self.min_delta = min_delta
170
- self.restore_best_weights = restore_best_weights
171
- self.verbose = verbose
172
- self.best_loss = None
173
- self.counter = 0
174
- self.best_weights = None
175
-
176
- def __call__(self, val_loss, model):
177
- if self.best_loss is None:
178
- self.best_loss = val_loss
179
- self.save_checkpoint(model)
180
- elif val_loss < self.best_loss - self.min_delta:
181
- self.best_loss = val_loss
182
- self.counter = 0
183
- self.save_checkpoint(model)
184
- else:
185
- self.counter += 1
186
-
187
- if self.counter >= self.patience:
188
- if self.verbose:
189
- print(f'Early stopping triggered after {self.counter} epochs of no improvement')
190
- if self.restore_best_weights:
191
- model.load_state_dict(self.best_weights)
192
- return True
193
- return False
194
-
195
- def save_checkpoint(self, model):
196
- """Save model when validation loss decreases."""
197
- if self.restore_best_weights:
198
- self.best_weights = model.state_dict().copy()
199
-
200
- def create_enhanced_evaluation_report(model, test_loader, class_names: List[str],
201
- device, save_plots: bool = True) -> Dict:
202
- """
203
- Create a comprehensive evaluation report with plots and metrics.
204
- """
205
- # Get evaluation results
206
- results = evaluate_model_performance(model, test_loader, device, class_names)
207
-
208
- # Create visualizations
209
- plots = {}
210
-
211
- # Confusion Matrix
212
- plots['confusion_matrix'] = plot_confusion_matrix(
213
- results['confusion_matrix'], class_names,
214
- title="Motor Imagery Classification - Confusion Matrix"
215
- )
216
-
217
- # Classification Probabilities (sample)
218
- plots['probabilities'] = plot_classification_probabilities(
219
- np.array(results['probabilities']), class_names,
220
- sample_indices=list(range(min(5, len(results['probabilities']))))
221
- )
222
-
223
- if save_plots:
224
- for plot_name, fig in plots.items():
225
- fig.savefig(f'{plot_name}.png', dpi=300, bbox_inches='tight')
226
-
227
- return {
228
- 'metrics': results,
229
- 'plots': plots,
230
- 'summary': {
231
- 'accuracy': results['accuracy'],
232
- 'n_samples': len(results['labels']),
233
- 'n_classes': len(class_names),
234
- 'class_names': class_names
235
- }
236
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
requirements.txt CHANGED
@@ -1,14 +1,11 @@
1
- gradio
2
- pandas
3
- numpy
4
- scipy
5
- matplotlib
6
- torch
7
- torchvision
8
- mne
9
- braindecode
10
- scikit-learn
11
- pydub
12
- soundfile
13
- librosa
14
- threading
 
1
+ gradio==5.47.0
2
+ numpy==2.3.3
3
+ pandas==2.3.2
4
+ matplotlib==3.10.6
5
+ mne==1.10.1
6
+ soundfile==0.13.1
7
+ scipy==1.16.2
8
+ torch==2.8.0
9
+ torchaudio==2.8.0
10
+ braindecode==1.2.0
11
+ scikit-learn==1.7.2
 
 
 
sound_manager.py CHANGED
@@ -7,12 +7,21 @@ Supports seamless transition from building (layering) to DJ (effects) phase.
7
 
8
  import numpy as np
9
  import soundfile as sf
10
- import os
11
- from typing import Dict, Optional, List
12
  from pathlib import Path
13
 
14
  class AudioEffectsProcessor:
15
  @staticmethod
 
 
 
 
 
 
 
 
 
 
16
  def apply_high_pass_filter(data: np.ndarray, samplerate: int, cutoff: float = 800.0) -> np.ndarray:
17
  from scipy import signal
18
  nyquist = samplerate / 2
@@ -60,23 +69,26 @@ class AudioEffectsProcessor:
60
  def process_layer_with_effects(audio_data: np.ndarray, samplerate: int, movement: str, active_effects: Dict[str, bool]) -> np.ndarray:
61
  processed_data = np.copy(audio_data)
62
  effect_map = {
63
- "left_hand": AudioEffectsProcessor.apply_echo, # Echo
64
  "right_hand": AudioEffectsProcessor.apply_low_pass_filter, # Low Pass
65
- "left_leg": AudioEffectsProcessor.apply_compressor, # Compressor
66
- "right_leg": AudioEffectsProcessor.apply_high_pass_filter, # High Pass
67
  }
68
  effect_func = effect_map.get(movement)
69
  if active_effects.get(movement, False) and effect_func:
70
- processed_data = effect_func(processed_data, samplerate)
 
 
 
71
  return processed_data
72
 
73
  class SoundManager:
74
  def __init__(self, sound_dir: str = "sounds"):
75
  self.available_sounds = [
76
- "SoundHelix-Song-4_bass.wav",
77
- "SoundHelix-Song-4_drums.wav",
78
- "SoundHelix-Song-4_instruments.wav",
79
- "SoundHelix-Song-4_vocals.wav"
80
  ]
81
  self.sound_dir = Path(sound_dir)
82
  self.current_cycle = 0
@@ -125,12 +137,17 @@ class SoundManager:
125
  self._load_sound_files()
126
 
127
  def get_current_target_movement(self) -> str:
128
- # Randomly select a movement from those not yet completed
129
- import random
130
  incomplete = [m for m in self.active_movements if m not in self.movements_completed]
131
  if not incomplete:
132
  print("DEBUG: All movements completed, cycle complete.")
133
  return "cycle_complete"
 
 
 
 
 
 
134
  movement = random.choice(incomplete)
135
  print(f"DEBUG: Next target is {movement}, completed: {self.movements_completed}")
136
  return movement
@@ -198,15 +215,16 @@ class SoundManager:
198
  return {"effect_applied": False, "message": "Not in DJ effects phase"}
199
  if movement not in self.active_effects:
200
  return {"effect_applied": False, "message": f"Unknown movement: {movement}"}
201
- # Only toggle effect every 4th time this movement is detected
202
  self.dj_effect_counters[movement] += 1
203
- if self.dj_effect_counters[movement] % 4 != 0:
204
- print(f"🎛️ {movement}: Skipped effect toggle (count={self.dj_effect_counters[movement]})")
205
- return {"effect_applied": False, "message": f"Effect for {movement} only toggled every 4th time (count={self.dj_effect_counters[movement]})"}
 
206
  # Toggle effect ON
207
  self.active_effects[movement] = True
208
  effect_status = "ON"
209
- print(f"🎛️ {movement}: {effect_status} (brief={brief}) [count={self.dj_effect_counters[movement]}]")
210
  # Schedule effect OFF after duration if brief
211
  def turn_off_effect():
212
  self.active_effects[movement] = False
@@ -215,7 +233,7 @@ class SoundManager:
215
  timer = threading.Timer(duration, turn_off_effect)
216
  timer.daemon = True
217
  timer.start()
218
- return {"effect_applied": True, "effect_name": movement, "effect_status": effect_status, "count": self.dj_effect_counters[movement]}
219
 
220
  def get_composition_info(self) -> Dict:
221
  layers_by_cycle = {0: []}
 
7
 
8
  import numpy as np
9
  import soundfile as sf
10
+ from typing import Dict
 
11
  from pathlib import Path
12
 
13
  class AudioEffectsProcessor:
14
  @staticmethod
15
+ def apply_fade_in_out(data: np.ndarray, samplerate: int, fade_duration: float = 0.5) -> np.ndarray:
16
+ fade_samples = int(fade_duration * samplerate)
17
+ data = np.copy(data)
18
+ if fade_samples > 0 and fade_samples * 2 < len(data):
19
+ fade_in_curve = np.linspace(0, 1, fade_samples)
20
+ fade_out_curve = np.linspace(1, 0, fade_samples)
21
+ data[:fade_samples] = data[:fade_samples] * fade_in_curve
22
+ data[-fade_samples:] = data[-fade_samples:] * fade_out_curve
23
+ return data
24
+ @staticmethod
25
  def apply_high_pass_filter(data: np.ndarray, samplerate: int, cutoff: float = 800.0) -> np.ndarray:
26
  from scipy import signal
27
  nyquist = samplerate / 2
 
69
  def process_layer_with_effects(audio_data: np.ndarray, samplerate: int, movement: str, active_effects: Dict[str, bool]) -> np.ndarray:
70
  processed_data = np.copy(audio_data)
71
  effect_map = {
72
+ "left_hand": AudioEffectsProcessor.apply_fade_in_out, # Fade in/out
73
  "right_hand": AudioEffectsProcessor.apply_low_pass_filter, # Low Pass
74
+ "left_leg": AudioEffectsProcessor.apply_compressor, # Compressor
75
+ "right_leg": AudioEffectsProcessor.apply_echo, # Echo (vocals)
76
  }
77
  effect_func = effect_map.get(movement)
78
  if active_effects.get(movement, False) and effect_func:
79
+ if movement == "left_hand":
80
+ processed_data = effect_func(processed_data, samplerate, fade_duration=0.5)
81
+ else:
82
+ processed_data = effect_func(processed_data, samplerate)
83
  return processed_data
84
 
85
  class SoundManager:
86
  def __init__(self, sound_dir: str = "sounds"):
87
  self.available_sounds = [
88
+ "SoundHelix-Song-6_bass.wav",
89
+ "SoundHelix-Song-6_drums.wav",
90
+ "SoundHelix-Song-6_instruments.wav",
91
+ "SoundHelix-Song-6_vocals.wav"
92
  ]
93
  self.sound_dir = Path(sound_dir)
94
  self.current_cycle = 0
 
137
  self._load_sound_files()
138
 
139
  def get_current_target_movement(self) -> str:
140
+ # Always process left_hand last in DJ mode
 
141
  incomplete = [m for m in self.active_movements if m not in self.movements_completed]
142
  if not incomplete:
143
  print("DEBUG: All movements completed, cycle complete.")
144
  return "cycle_complete"
145
+ # If in DJ mode, left_hand should be last
146
+ if getattr(self, 'current_phase', None) == 'dj_effects':
147
+ # Remove left_hand from incomplete unless it's the only one left
148
+ if 'left_hand' in incomplete and len(incomplete) > 1:
149
+ incomplete = [m for m in incomplete if m != 'left_hand']
150
+ import random
151
  movement = random.choice(incomplete)
152
  print(f"DEBUG: Next target is {movement}, completed: {self.movements_completed}")
153
  return movement
 
215
  return {"effect_applied": False, "message": "Not in DJ effects phase"}
216
  if movement not in self.active_effects:
217
  return {"effect_applied": False, "message": f"Unknown movement: {movement}"}
218
+ # Only toggle effect at counts 1, 4, 8, ... (i.e., 1 and then every multiple of 4)
219
  self.dj_effect_counters[movement] += 1
220
+ count = self.dj_effect_counters[movement]
221
+ if count != 1 and (count - 1) % 4 != 0:
222
+ print(f"🎛️ {movement}: Skipped effect toggle (count={count})")
223
+ return {"effect_applied": False, "message": f"Effect for {movement} only toggled at 1, 4, 8, ... (count={count})"}
224
  # Toggle effect ON
225
  self.active_effects[movement] = True
226
  effect_status = "ON"
227
+ print(f"🎛️ {movement}: {effect_status} (brief={brief}) [count={count}]")
228
  # Schedule effect OFF after duration if brief
229
  def turn_off_effect():
230
  self.active_effects[movement] = False
 
233
  timer = threading.Timer(duration, turn_off_effect)
234
  timer.daemon = True
235
  timer.start()
236
+ return {"effect_applied": True, "effect_name": movement, "effect_status": effect_status, "count": count}
237
 
238
  def get_composition_info(self) -> Dict:
239
  layers_by_cycle = {0: []}
utils.py DELETED
@@ -1,226 +0,0 @@
1
- """
2
- Utility functions for the EEG Motor Imagery Music Composer
3
- """
4
-
5
- import numpy as np
6
- import logging
7
- import time
8
- from pathlib import Path
9
- from typing import Dict, List, Optional, Tuple
10
- import json
11
-
12
- from config import LOG_LEVEL, LOG_FILE, CLASS_NAMES, CLASS_DESCRIPTIONS
13
-
14
- def setup_logging():
15
- """Set up logging configuration."""
16
- LOG_FILE.parent.mkdir(parents=True, exist_ok=True)
17
-
18
- logging.basicConfig(
19
- level=getattr(logging, LOG_LEVEL),
20
- format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
21
- handlers=[
22
- logging.FileHandler(LOG_FILE),
23
- logging.StreamHandler()
24
- ]
25
- )
26
-
27
- return logging.getLogger(__name__)
28
-
29
- def validate_eeg_data(data: np.ndarray) -> bool:
30
- """
31
- Validate EEG data format and dimensions.
32
-
33
- Args:
34
- data: EEG data array
35
-
36
- Returns:
37
- bool: True if data is valid, False otherwise
38
- """
39
- if not isinstance(data, np.ndarray):
40
- return False
41
-
42
- if data.ndim not in [2, 3]:
43
- return False
44
-
45
- if data.ndim == 2 and data.shape[0] == 0:
46
- return False
47
-
48
- if data.ndim == 3 and (data.shape[0] == 0 or data.shape[1] == 0):
49
- return False
50
-
51
- return True
52
-
53
- def format_confidence(confidence: float) -> str:
54
- """Format confidence score as percentage string."""
55
- return f"{confidence * 100:.1f}%"
56
-
57
- def format_timestamp(timestamp: float) -> str:
58
- """Format timestamp for display."""
59
- return time.strftime("%H:%M:%S", time.localtime(timestamp))
60
-
61
- def get_class_emoji(class_name: str) -> str:
62
- """Get emoji representation for motor imagery class."""
63
- emoji_map = {
64
- "left_hand": "🤚",
65
- "right_hand": "🤚",
66
- "neutral": "😐",
67
- "left_leg": "🦵",
68
- "tongue": "👅",
69
- "right_leg": "🦵"
70
- }
71
- return emoji_map.get(class_name, "❓")
72
-
73
- def create_classification_summary(
74
- predicted_class: str,
75
- confidence: float,
76
- probabilities: Dict[str, float],
77
- timestamp: Optional[float] = None
78
- ) -> Dict:
79
- """
80
- Create a formatted summary of classification results.
81
-
82
- Args:
83
- predicted_class: Predicted motor imagery class
84
- confidence: Confidence score (0-1)
85
- probabilities: Dictionary of class probabilities
86
- timestamp: Optional timestamp
87
-
88
- Returns:
89
- Dict: Formatted classification summary
90
- """
91
- if timestamp is None:
92
- timestamp = time.time()
93
-
94
- return {
95
- "predicted_class": predicted_class,
96
- "confidence": confidence,
97
- "confidence_percent": format_confidence(confidence),
98
- "probabilities": probabilities,
99
- "timestamp": timestamp,
100
- "formatted_time": format_timestamp(timestamp),
101
- "emoji": get_class_emoji(predicted_class),
102
- "description": CLASS_DESCRIPTIONS.get(predicted_class, predicted_class)
103
- }
104
-
105
- def save_session_data(session_data: Dict, filepath: str) -> bool:
106
- """
107
- Save session data to JSON file.
108
-
109
- Args:
110
- session_data: Dictionary containing session information
111
- filepath: Path to save the file
112
-
113
- Returns:
114
- bool: True if successful, False otherwise
115
- """
116
- try:
117
- with open(filepath, 'w') as f:
118
- json.dump(session_data, f, indent=2, default=str)
119
- return True
120
- except Exception as e:
121
- logging.error(f"Error saving session data: {e}")
122
- return False
123
-
124
- def load_session_data(filepath: str) -> Optional[Dict]:
125
- """
126
- Load session data from JSON file.
127
-
128
- Args:
129
- filepath: Path to the JSON file
130
-
131
- Returns:
132
- Dict or None: Session data if successful, None otherwise
133
- """
134
- try:
135
- with open(filepath, 'r') as f:
136
- return json.load(f)
137
- except Exception as e:
138
- logging.error(f"Error loading session data: {e}")
139
- return None
140
-
141
- def calculate_classification_statistics(history: List[Dict]) -> Dict:
142
- """
143
- Calculate statistics from classification history.
144
-
145
- Args:
146
- history: List of classification results
147
-
148
- Returns:
149
- Dict: Statistics summary
150
- """
151
- if not history:
152
- return {"total": 0, "class_counts": {}, "average_confidence": 0.0}
153
-
154
- class_counts = {}
155
- total_confidence = 0.0
156
-
157
- for item in history:
158
- class_name = item.get("predicted_class", "unknown")
159
- confidence = item.get("confidence", 0.0)
160
-
161
- class_counts[class_name] = class_counts.get(class_name, 0) + 1
162
- total_confidence += confidence
163
-
164
- return {
165
- "total": len(history),
166
- "class_counts": class_counts,
167
- "average_confidence": total_confidence / len(history),
168
- "most_common_class": max(class_counts, key=class_counts.get) if class_counts else None
169
- }
170
-
171
- def create_progress_bar(value: float, max_value: float = 1.0, width: int = 20) -> str:
172
- """
173
- Create a text-based progress bar.
174
-
175
- Args:
176
- value: Current value
177
- max_value: Maximum value
178
- width: Width of progress bar in characters
179
-
180
- Returns:
181
- str: Progress bar string
182
- """
183
- percentage = min(value / max_value, 1.0)
184
- filled = int(width * percentage)
185
- bar = "█" * filled + "░" * (width - filled)
186
- return f"[{bar}] {percentage * 100:.1f}%"
187
-
188
- def validate_audio_file(file_path: str) -> bool:
189
- """
190
- Validate if an audio file exists and is readable.
191
-
192
- Args:
193
- file_path: Path to audio file
194
-
195
- Returns:
196
- bool: True if file is valid, False otherwise
197
- """
198
- path = Path(file_path)
199
- if not path.exists():
200
- return False
201
-
202
- if not path.is_file():
203
- return False
204
-
205
- # Check file extension
206
- valid_extensions = ['.wav', '.mp3', '.flac', '.ogg']
207
- if path.suffix.lower() not in valid_extensions:
208
- return False
209
-
210
- return True
211
-
212
- def generate_composition_filename(prefix: str = "composition") -> str:
213
- """
214
- Generate a unique filename for composition exports.
215
-
216
- Args:
217
- prefix: Filename prefix
218
-
219
- Returns:
220
- str: Unique filename with timestamp
221
- """
222
- timestamp = time.strftime("%Y%m%d_%H%M%S")
223
- return f"{prefix}_{timestamp}.wav"
224
-
225
- # Initialize logger when module is imported
226
- logger = setup_logging()