Shreyas commited on
Commit
c20196f
Β·
verified Β·
1 Parent(s): 937edd0

Upload 9 files

Browse files
Files changed (9) hide show
  1. Procfile +1 -0
  2. app.py +59 -0
  3. best.pt +3 -0
  4. face_det.py +39 -0
  5. face_model.py +103 -0
  6. model.py +298 -0
  7. requirements.txt +27 -0
  8. train_bert.py +73 -0
  9. voice_det.py +22 -0
Procfile ADDED
@@ -0,0 +1 @@
 
 
1
+ web: gunicorn app:app
app.py ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from flask import Flask, render_template
2
+ import subprocess
3
+ import os
4
+ import signal
5
+ import atexit
6
+ import platform
7
+ from pathlib import Path
8
+
9
+ app = Flask(__name__, template_folder="templates")
10
+ streamlit_process = None
11
+
12
+ @app.route("/")
13
+ def home():
14
+ return render_template("index.html")
15
+
16
+ @app.route("/model")
17
+ def model():
18
+ # simple client redirect to Streamlit
19
+ return """
20
+ <script>window.location.href = 'http://localhost:8501';</script>
21
+ <p>Redirecting to AutVid AI...</p>
22
+ """
23
+
24
+ def start_streamlit():
25
+ """Launch Streamlit as subprocess (works on Windows/macOS/Linux)."""
26
+ global streamlit_process
27
+ if streamlit_process is not None:
28
+ return
29
+ # ensure model.py exists
30
+ if not Path("model.py").exists():
31
+ raise FileNotFoundError("model.py not found next to app.py")
32
+ cmd = ["streamlit", "run", "model.py", "--server.port", "8501", "--server.headless", "true"]
33
+ if platform.system() == "Windows":
34
+ streamlit_process = subprocess.Popen(cmd, creationflags=subprocess.CREATE_NEW_PROCESS_GROUP)
35
+ else:
36
+ # setsid to create a new process group so we can kill gracefully
37
+ streamlit_process = subprocess.Popen(cmd, preexec_fn=os.setsid)
38
+
39
+ def stop_streamlit():
40
+ """Kill Streamlit process when Flask exits."""
41
+ global streamlit_process
42
+ if streamlit_process:
43
+ try:
44
+ if platform.system() == "Windows":
45
+ streamlit_process.send_signal(signal.CTRL_BREAK_EVENT)
46
+ else:
47
+ os.killpg(os.getpgid(streamlit_process.pid), signal.SIGTERM)
48
+ except Exception:
49
+ try:
50
+ streamlit_process.terminate()
51
+ except Exception:
52
+ pass
53
+ streamlit_process = None
54
+
55
+ atexit.register(stop_streamlit)
56
+
57
+ if __name__ == "__main__":
58
+ start_streamlit()
59
+ app.run(debug=True, port=5000)
best.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:df93006e95a96763ab7e5833b31ff72f335ab01e40a6edd7a1d13f6adef18da0
3
+ size 5474899
face_det.py ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ from ultralytics import YOLO
3
+ import supervision as sv
4
+ import numpy as np
5
+ import os
6
+ from typing import Union, Tuple
7
+
8
+ class FacialEmotionDetector:
9
+ """
10
+ A class to detect facial emotions from an image or video frame using a YOLO model.
11
+ """
12
+ def __init__(self, model_path='best.pt'):
13
+ if not os.path.exists(model_path):
14
+ raise FileNotFoundError(
15
+ f"Model file not found at '{model_path}'. "
16
+ f"Please ensure the YOLO model is in the correct directory."
17
+ )
18
+ self.model = YOLO(model_path)
19
+ self.box_annotator = sv.BoxAnnotator(thickness=2)
20
+ self.label_annotator = sv.LabelAnnotator(text_scale=0.5, text_thickness=1)
21
+ print("FacialEmotionDetector initialized successfully.")
22
+
23
+ def detect_emotion(self, frame: np.ndarray) -> Tuple[np.ndarray, Union[str, None]]:
24
+ result = self.model(frame, agnostic_nms=True)[0]
25
+ detections = sv.Detections.from_ultralytics(result)
26
+
27
+ dominant_emotion = None
28
+ if len(detections) > 0:
29
+ most_confident_idx = np.argmax(detections.confidence)
30
+ dominant_emotion = detections.data['class_name'][most_confident_idx]
31
+
32
+ labels = [
33
+ f"{self.model.model.names[class_id]} {confidence:0.2f}"
34
+ for _, _, confidence, class_id, _, _ in detections
35
+ ]
36
+
37
+ annotated_frame = self.box_annotator.annotate(scene=frame.copy(), detections=detections)
38
+ annotated_frame = self.label_annotator.annotate(scene=annotated_frame, detections=detections, labels=labels)
39
+ return annotated_frame, dominant_emotion
face_model.py ADDED
@@ -0,0 +1,103 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ from ultralytics import YOLO
3
+ import supervision as sv
4
+ import numpy as np
5
+ import os
6
+ from typing import Union, Tuple
7
+
8
+
9
+ class FacialEmotionDetector:
10
+ """
11
+ Detect facial emotions from an image or video frame using a YOLO model.
12
+ """
13
+
14
+ def __init__(self, model_path: str = "best.pt"):
15
+ """
16
+ Initialize the detector.
17
+
18
+ Args:
19
+ model_path (str): Path to YOLO model weights (.pt).
20
+ """
21
+ if not os.path.exists(model_path):
22
+ raise FileNotFoundError(
23
+ f"❌ Model file not found at '{model_path}'. "
24
+ f"Please ensure 'best.pt' is available in the project directory."
25
+ )
26
+
27
+ # Load YOLO model
28
+ self.model = YOLO(model_path)
29
+
30
+ # Supervision annotators for boxes + labels
31
+ self.box_annotator = sv.BoxAnnotator(thickness=2)
32
+ self.label_annotator = sv.LabelAnnotator(text_scale=0.5, text_thickness=1)
33
+
34
+ print("βœ… FacialEmotionDetector initialized successfully.")
35
+
36
+ def detect_emotion(self, frame: np.ndarray) -> Tuple[np.ndarray, Union[str, None]]:
37
+ """
38
+ Detect emotions in a single frame.
39
+
40
+ Args:
41
+ frame (np.ndarray): BGR image (OpenCV).
42
+
43
+ Returns:
44
+ Tuple[np.ndarray, str|None]:
45
+ - Annotated frame (with boxes + labels).
46
+ - Most confident emotion label (or None if no detection).
47
+ """
48
+ # YOLO inference
49
+ result = self.model(frame, agnostic_nms=True)[0]
50
+
51
+ # Convert YOLO results β†’ Supervision detections
52
+ detections = sv.Detections.from_ultralytics(result)
53
+
54
+ # Find dominant (highest confidence) detection
55
+ dominant_emotion = None
56
+ if len(detections) > 0:
57
+ most_confident_idx = np.argmax(detections.confidence)
58
+ dominant_emotion = detections.data["class_name"][most_confident_idx]
59
+
60
+ # Build label strings
61
+ labels = [
62
+ f"{self.model.model.names[class_id]} {confidence:.2f}"
63
+ for _, _, confidence, class_id, _, _ in detections
64
+ ]
65
+
66
+ # Annotate boxes
67
+ annotated = self.box_annotator.annotate(scene=frame.copy(), detections=detections)
68
+ # Annotate labels
69
+ annotated = self.label_annotator.annotate(scene=annotated, detections=detections, labels=labels)
70
+
71
+ return annotated, dominant_emotion
72
+
73
+
74
+ if __name__ == "__main__":
75
+ # Quick webcam test
76
+ try:
77
+ detector = FacialEmotionDetector(model_path="best.pt")
78
+ cap = cv2.VideoCapture(0)
79
+
80
+ if not cap.isOpened():
81
+ print("❌ Could not open webcam.")
82
+ else:
83
+ while True:
84
+ ret, frame = cap.read()
85
+ if not ret:
86
+ break
87
+
88
+ annotated_frame, emotion = detector.detect_emotion(frame)
89
+ cv2.imshow("Facial Emotion Detection", annotated_frame)
90
+
91
+ if emotion:
92
+ print(f"Detected Emotion: {emotion}")
93
+
94
+ if cv2.waitKey(1) & 0xFF == ord("q"):
95
+ break
96
+
97
+ cap.release()
98
+ cv2.destroyAllWindows()
99
+
100
+ except FileNotFoundError as e:
101
+ print(e)
102
+ except Exception as e:
103
+ print(f"⚠️ Unexpected error: {e}")
model.py ADDED
@@ -0,0 +1,298 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # model.py -- Realtime Video + Audio + Subtitles + Emotion Fusion
2
+ import os
3
+ import time
4
+ import threading
5
+ import wave
6
+ from pathlib import Path
7
+ from typing import List, Tuple, Dict
8
+
9
+ import av
10
+ import cv2
11
+ import numpy as np
12
+ import streamlit as st
13
+ import torch
14
+
15
+ from transformers import BertTokenizer, BertForSequenceClassification
16
+
17
+ # Custom modules (ensure they exist)
18
+ from face_model import FacialEmotionDetector
19
+ from voice_det import Voice_Analysis
20
+
21
+ from streamlit_webrtc import webrtc_streamer, WebRtcMode, VideoTransformerBase, AudioProcessorBase
22
+
23
+ # ------------------------- Config -------------------------
24
+ FRAME_DETECT_EVERY_N = 4 # run YOLO every Nth frame (adjust for CPU)
25
+ AUDIO_SAMPLE_RATE = 48000
26
+ TEMP_AUDIO_PATH = "temp_recordings/live.wav"
27
+ BEST_PT = Path(__file__).parent / "best.pt"
28
+
29
+ st.set_page_config(page_title="AutVid AI β€” Realtime", layout="wide")
30
+ st.title("🧠 AutVid AI β€” Real-time Video + Audio Emotion")
31
+
32
+ # ------------------------- Cached model loaders -------------------------
33
+ @st.cache_resource
34
+ def load_face_model_main():
35
+ if not BEST_PT.exists():
36
+ st.warning(f"YOLO weights not found at {BEST_PT.resolve()}. Video detection will show placeholder.")
37
+ return None
38
+ try:
39
+ det = FacialEmotionDetector(model_path=str(BEST_PT))
40
+ st.info("FacialEmotionDetector loaded.")
41
+ return det
42
+ except Exception as e:
43
+ st.error(f"Failed to load FacialEmotionDetector: {e}")
44
+ return None
45
+
46
+ @st.cache_resource
47
+ def load_voice_model():
48
+ try:
49
+ vm = Voice_Analysis()
50
+ st.info("Voice_Analysis loaded.")
51
+ return vm
52
+ except Exception as e:
53
+ st.error(f"Failed to load Voice_Analysis: {e}")
54
+ return None
55
+
56
+ @st.cache_resource
57
+ def load_text_model():
58
+ try:
59
+ model_name = "bhadresh-savani/bert-base-go-emotion"
60
+ tok = BertTokenizer.from_pretrained(model_name)
61
+ mdl = BertForSequenceClassification.from_pretrained(model_name)
62
+ mdl.eval()
63
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
64
+ mdl.to(device)
65
+ id2label = mdl.config.id2label if hasattr(mdl.config, "id2label") else {i: str(i) for i in range(mdl.config.num_labels)}
66
+ label_list = [id2label[i] for i in range(len(id2label))]
67
+ st.info("Text model loaded.")
68
+ return tok, mdl, device, label_list
69
+ except Exception as e:
70
+ st.error(f"Failed to load text model: {e}")
71
+ return None, None, None, []
72
+
73
+ face_model_main = load_face_model_main()
74
+ voice_model = load_voice_model()
75
+ tokenizer, text_model, device, label_list = load_text_model()
76
+
77
+ # ------------------------- Text analysis -------------------------
78
+ def analyze_text_multilabel(text: str, threshold: float = 0.3) -> Tuple[List[str], Dict[str, float]]:
79
+ if not text.strip() or text_model is None:
80
+ return [], {}
81
+ enc = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=256).to(device)
82
+ with torch.no_grad():
83
+ logits = text_model(**enc).logits
84
+ probs = torch.sigmoid(logits)[0].cpu().numpy()
85
+ scores = {label_list[i]: float(probs[i]) for i in range(len(label_list))}
86
+ chosen = [lbl for lbl, p in scores.items() if p >= threshold]
87
+ if not chosen:
88
+ chosen = [max(scores, key=scores.get)]
89
+ return chosen, scores
90
+
91
+ # ------------------------- WebRTC processors -------------------------
92
+ class AudioRecorder(AudioProcessorBase):
93
+ def __init__(self):
94
+ self.frames = []
95
+ self.lock = threading.Lock()
96
+ self.sample_rate = AUDIO_SAMPLE_RATE
97
+
98
+ def recv_audio(self, frame: av.AudioFrame) -> av.AudioFrame:
99
+ arr = frame.to_ndarray()
100
+ mono = np.mean(arr, axis=0).astype(np.int16) if arr.ndim == 2 else arr.astype(np.int16)
101
+ with self.lock:
102
+ self.frames.append(mono)
103
+ return frame
104
+
105
+ def save_wav(self, filename: str = TEMP_AUDIO_PATH) -> str:
106
+ os.makedirs(os.path.dirname(filename), exist_ok=True)
107
+ with self.lock:
108
+ if not self.frames:
109
+ raise ValueError("No audio captured")
110
+ audio = np.concatenate(self.frames, axis=0).astype(np.int16)
111
+ with wave.open(filename, "wb") as wf:
112
+ wf.setnchannels(1)
113
+ wf.setsampwidth(2)
114
+ wf.setframerate(self.sample_rate)
115
+ wf.writeframes(audio.tobytes())
116
+ return filename
117
+
118
+ def clear(self):
119
+ with self.lock:
120
+ self.frames = []
121
+
122
+ class VideoProcessor(VideoTransformerBase):
123
+ def __init__(self):
124
+ try:
125
+ self.detector = FacialEmotionDetector(model_path=str(BEST_PT)) if BEST_PT.exists() else None
126
+ except:
127
+ self.detector = None
128
+ self.lock = threading.Lock()
129
+ self.counter = 0
130
+ self.last_annotated = None
131
+ self.last_emotion = None
132
+
133
+ def transform(self, frame: av.VideoFrame) -> av.VideoFrame:
134
+ img = frame.to_ndarray(format="bgr24")
135
+ annotated = img.copy()
136
+ emo = None
137
+ self.counter += 1
138
+ try:
139
+ if self.counter % FRAME_DETECT_EVERY_N == 0 and self.detector:
140
+ ann, emo = self.detector.detect_emotion(img)
141
+ if ann is not None:
142
+ annotated = ann
143
+ except Exception as e:
144
+ print("Frame detection error:", e)
145
+
146
+ # Overlay transcript
147
+ transcript = st.session_state.get("transcript_overlay", "")
148
+ y0 = 30
149
+ for i, line in enumerate(transcript.split("\n")[-3:]):
150
+ y = y0 + i*25
151
+ cv2.putText(annotated, line, (10, y), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255,255,255), 2)
152
+
153
+ # Overlay last emotion
154
+ if emo:
155
+ cv2.putText(annotated, f"Emotion: {emo}", (10, y0 + 100), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0,255,0), 2)
156
+
157
+ with self.lock:
158
+ self.last_annotated = annotated.copy()
159
+ self.last_emotion = emo
160
+
161
+ return av.VideoFrame.from_ndarray(annotated, format="bgr24")
162
+
163
+ def get_last(self):
164
+ with self.lock:
165
+ return self.last_annotated, self.last_emotion
166
+
167
+ # ------------------------- Session state -------------------------
168
+ for k, v in {
169
+ "video_emotion": None,
170
+ "voice_emotion": None,
171
+ "transcript": "",
172
+ "transcript_overlay": "",
173
+ "text_emotions": []
174
+ }.items():
175
+ if k not in st.session_state:
176
+ st.session_state[k] = v
177
+
178
+ # ------------------------- UI / Streamer -------------------------
179
+ st.sidebar.markdown("## Controls")
180
+ FRAME_DETECT_EVERY_N = st.sidebar.slider("Run YOLO every N frames", 1, 12, FRAME_DETECT_EVERY_N, 1)
181
+ auto_analyze = st.sidebar.checkbox("Auto analyze audio every interval", value=False)
182
+ auto_interval = st.sidebar.slider("Auto analyze interval (s)", 5, 30, 12, 1)
183
+
184
+ col_main, col_side = st.columns([2, 1])
185
+
186
+ with col_main:
187
+ st.subheader("Live camera (annotated)")
188
+ ctx = webrtc_streamer(
189
+ key="live-av",
190
+ mode=WebRtcMode.SENDRECV,
191
+ video_transformer_factory=VideoProcessor,
192
+ audio_processor_factory=AudioRecorder,
193
+ media_stream_constraints={"video": True, "audio": True},
194
+ async_processing=True,
195
+ )
196
+
197
+ st.markdown("---")
198
+ st.write("Live preview from worker:")
199
+ if ctx and ctx.video_transformer:
200
+ annotated_frame, last_emo = ctx.video_transformer.get_last()
201
+ if annotated_frame is not None:
202
+ st.image(cv2.cvtColor(annotated_frame, cv2.COLOR_BGR2RGB), caption=f"Emotion: {last_emo}")
203
+
204
+ with col_side:
205
+ st.subheader("Live outputs")
206
+ st.metric("Video emotion", st.session_state.get("video_emotion") or "N/A")
207
+ st.metric("Voice emotion", st.session_state.get("voice_emotion") or "N/A")
208
+ st.text_area("Transcript", value=st.session_state.get("transcript", ""), height=160)
209
+
210
+ if st.button("Clear audio buffer") and ctx and ctx.audio_receiver:
211
+ try:
212
+ ctx.audio_receiver._processor.clear()
213
+ st.success("Cleared audio buffer.")
214
+ except Exception as e:
215
+ st.error(f"Clear failed: {e}")
216
+
217
+ if st.button("Save & Analyze now") and ctx and ctx.audio_receiver:
218
+ proc = ctx.audio_receiver._processor
219
+ try:
220
+ wav = proc.save_wav(TEMP_AUDIO_PATH)
221
+ proc.clear()
222
+ st.audio(wav)
223
+ if voice_model:
224
+ res = voice_model.detect(wav)
225
+ if res:
226
+ st.session_state.voice_emotion = max(res, key=lambda r: r["score"])["label"]
227
+ st.session_state.transcript = voice_model.subtitles(wav)
228
+ st.session_state.transcript_overlay = st.session_state.transcript
229
+ st.success("Saved and analyzed audio.")
230
+ except Exception as e:
231
+ st.error(f"Save/analyze failed: {e}")
232
+
233
+ # Update video emotion from worker
234
+ if ctx and ctx.video_transformer:
235
+ _, last_vid_emo = ctx.video_transformer.get_last()
236
+ if last_vid_emo:
237
+ st.session_state.video_emotion = last_vid_emo
238
+
239
+ # Auto audio analyze loop
240
+ def auto_audio_loop():
241
+ while True:
242
+ if auto_analyze and ctx and ctx.audio_receiver:
243
+ try:
244
+ proc = ctx.audio_receiver._processor
245
+ wav = proc.save_wav(TEMP_AUDIO_PATH.replace(".wav","_auto.wav"))
246
+ proc.clear()
247
+ if voice_model:
248
+ res = voice_model.detect(wav)
249
+ if res:
250
+ st.session_state.voice_emotion = max(res, key=lambda r: r["score"])["label"]
251
+ txt = voice_model.subtitles(wav)
252
+ st.session_state.transcript = txt
253
+ st.session_state.transcript_overlay = txt
254
+ except Exception:
255
+ pass
256
+ time.sleep(auto_interval)
257
+
258
+ threading.Thread(target=auto_audio_loop, daemon=True).start()
259
+
260
+ # ---- Text analysis UI ----
261
+ st.markdown("---")
262
+ st.subheader("Text Emotion (BERT multi-label)")
263
+ text_in = st.text_area("Enter text to analyze", value=st.session_state.get("transcript", ""), height=140)
264
+ thresh = st.slider("Confidence threshold", 0.1, 0.9, 0.3, 0.05)
265
+ if st.button("Analyze text"):
266
+ chosen, scores = analyze_text_multilabel(text_in, threshold=thresh)
267
+ st.session_state.text_emotions = chosen
268
+ if scores:
269
+ st.json({k: round(v,4) for k,v in sorted(scores.items(), key=lambda x: x[1], reverse=True)})
270
+ if chosen:
271
+ st.success(f"Predicted (β‰₯{thresh:.2f}): {', '.join(chosen)}")
272
+
273
+ # ---- Multimodal Fusion ----
274
+ st.markdown("---")
275
+ st.subheader("Multimodal Fusion")
276
+ st.write("Video 0.5, Voice 0.3, Text 0.2")
277
+
278
+ def fuse(video_emotion, voice_emotion, text_emotions):
279
+ w = {"video":0.5, "voice":0.3, "text":0.2}
280
+ s = {}
281
+ if video_emotion:
282
+ s[video_emotion] = s.get(video_emotion,0)+w["video"]
283
+ if voice_emotion:
284
+ s[voice_emotion] = s.get(voice_emotion,0)+w["voice"]
285
+ if text_emotions:
286
+ share = w["text"]/max(1,len(text_emotions))
287
+ for t in text_emotions:
288
+ s[t] = s.get(t,0)+share
289
+ return s
290
+
291
+ if st.button("Fuse now"):
292
+ breakdown = fuse(st.session_state.get("video_emotion"), st.session_state.get("voice_emotion"), st.session_state.get("text_emotions", []))
293
+ if breakdown:
294
+ dom = max(breakdown, key=breakdown.get)
295
+ st.success(f"Dominant emotion: {dom}")
296
+ st.json({k: round(v,3) for k,v in breakdown.items()})
297
+ else:
298
+ st.warning("No modalities available yet.")
requirements.txt ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Core
2
+ flask>=3.0.0
3
+ streamlit>=1.36.0
4
+
5
+ # ML / CV / Audio
6
+ torch>=2.1.0
7
+ torchaudio>=2.1.0
8
+ transformers>=4.42.0
9
+ datasets>=2.20.0
10
+ evaluate>=0.4.2
11
+ numpy>=1.25.0
12
+ opencv-python>=4.9.0.80
13
+ ultralytics>=8.2.0
14
+ supervision>=0.18.0
15
+ streamlit-webrtc
16
+
17
+ # Whisper + recording
18
+ openai-whisper>=20231117
19
+ sounddevice>=0.4.6
20
+
21
+ # Utilities
22
+ tqdm>=4.66.0
23
+
24
+ #miscel
25
+ hf_xet
26
+ scikit-learn
27
+ gunicorn
train_bert.py ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from datasets import load_dataset
2
+ from transformers import BertTokenizer, BertForSequenceClassification, Trainer, TrainingArguments
3
+ import numpy as np
4
+ import evaluate
5
+ import torch
6
+
7
+ print("πŸ“₯ Loading GoEmotions dataset...")
8
+ dataset = load_dataset("go_emotions", "simplified")
9
+
10
+ model_name = "bert-base-uncased"
11
+ tokenizer = BertTokenizer.from_pretrained(model_name)
12
+ num_labels = dataset["train"].features["labels"].feature.num_classes
13
+ print(f"βœ… Classes: {num_labels}")
14
+
15
+ def tokenize_and_encode(batch):
16
+ enc = tokenizer(batch["text"], padding="max_length", truncation=True, max_length=128)
17
+ labels = []
18
+ for labs in batch["labels"]:
19
+ vec = [0] * num_labels
20
+ for l in labs:
21
+ vec[l] = 1
22
+ labels.append(vec)
23
+ enc["labels"] = labels
24
+ return enc
25
+
26
+ encoded = dataset.map(tokenize_and_encode, batched=True)
27
+ encoded.set_format("torch", columns=["input_ids", "attention_mask", "labels"])
28
+
29
+ model = BertForSequenceClassification.from_pretrained(
30
+ model_name, num_labels=num_labels, problem_type="multi_label_classification"
31
+ )
32
+
33
+ f1 = evaluate.load("f1")
34
+ accuracy = evaluate.load("accuracy")
35
+
36
+ def compute_metrics(eval_pred):
37
+ logits, labels = eval_pred
38
+ preds = (logits > 0).astype(int) # threshold at 0 for BCEWithLogits
39
+ return {
40
+ "accuracy": accuracy.compute(predictions=preds, references=labels)["accuracy"],
41
+ "f1_micro": f1.compute(predictions=preds, references=labels, average="micro")["f1"],
42
+ "f1_macro": f1.compute(predictions=preds, references=labels, average="macro")["f1"],
43
+ }
44
+
45
+ args = TrainingArguments(
46
+ output_dir="bert_emotion",
47
+ eval_strategy="epoch",
48
+ save_strategy="epoch",
49
+ learning_rate=2e-5,
50
+ per_device_train_batch_size=16,
51
+ per_device_eval_batch_size=16,
52
+ num_train_epochs=3,
53
+ weight_decay=0.01,
54
+ logging_dir="./logs",
55
+ logging_steps=100,
56
+ load_best_model_at_end=True,
57
+ metric_for_best_model="f1_micro"
58
+ )
59
+
60
+ trainer = Trainer(
61
+ model=model,
62
+ args=args,
63
+ train_dataset=encoded["train"],
64
+ eval_dataset=encoded["validation"],
65
+ tokenizer=tokenizer,
66
+ compute_metrics=compute_metrics
67
+ )
68
+
69
+ print("πŸš€ Training...")
70
+ trainer.train()
71
+ model.save_pretrained("./bert_emotion")
72
+ tokenizer.save_pretrained("./bert_emotion")
73
+ print("βœ… Saved fine-tuned model to ./bert_emotion")
voice_det.py ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import whisper
2
+ from transformers import pipeline
3
+
4
+ class Voice_Analysis:
5
+ def __init__(self, emotion_model="prithivMLmods/Speech-Emotion-Classification", whisper_size="base"):
6
+ # HF pipeline for speech emotion
7
+ self.classifier = pipeline(
8
+ "audio-classification",
9
+ model=emotion_model,
10
+ feature_extractor=emotion_model
11
+ )
12
+ # Whisper for ASR
13
+ self.modelwa = whisper.load_model(whisper_size)
14
+
15
+ def detect(self, path):
16
+ """Run emotion classification on an audio file. Returns list of dicts with label/score."""
17
+ return self.classifier(path)
18
+
19
+ def subtitles(self, path):
20
+ """Transcribe audio to text using Whisper."""
21
+ result = self.modelwa.transcribe(path)
22
+ return result.get("text", "").strip()