sangambhamare commited on
Commit
ae46b97
·
verified ·
1 Parent(s): a946755

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +17 -5
app.py CHANGED
@@ -5,20 +5,25 @@ import joblib
5
  import gradio as gr
6
  from huggingface_hub import hf_hub_download
7
 
8
- # Download the model from Hugging Face Hub
9
  MODEL_REPO = "sangambhamare/TruthDetection"
10
  MODEL_FILENAME = "model.joblib"
11
-
12
  model_path = hf_hub_download(repo_id=MODEL_REPO, filename=MODEL_FILENAME)
13
  model = joblib.load(model_path)
14
 
15
- # MFCC extraction
 
 
 
 
 
 
16
  def extract_mfcc(file_path):
17
  y, sr = librosa.load(file_path, sr=None)
18
  mfcc = librosa.feature.mfcc(y=y, sr=sr, n_mfcc=13)
19
  return np.mean(mfcc, axis=1)
20
 
21
- # Prediction
22
  def predict_audio(audio_file):
23
  try:
24
  features = extract_mfcc(audio_file).reshape(1, -1)
@@ -27,7 +32,7 @@ def predict_audio(audio_file):
27
  except Exception as e:
28
  return f"Error: {e}"
29
 
30
- # Gradio UI
31
  with gr.Blocks() as demo:
32
  gr.Markdown("<h1 style='text-align: center;'>Truth Detection from Audio Stories</h1>")
33
  gr.Markdown(
@@ -36,10 +41,17 @@ with gr.Blocks() as demo:
36
  "based on MFCC features and a trained Random Forest classifier."
37
  "</p>"
38
  )
 
39
  audio_input = gr.Audio(type="filepath", label="Upload WAV Audio File (30 seconds)")
40
  output = gr.Textbox(label="Prediction")
41
  submit_btn = gr.Button("Predict")
42
  submit_btn.click(fn=predict_audio, inputs=audio_input, outputs=output)
 
 
 
 
 
 
43
  gr.Markdown("<p style='text-align: center; font-size: 12px; color: gray;'>Developed by Sangam Sanjay Bhamare, 2025.</p>")
44
 
45
  if __name__ == "__main__":
 
5
  import gradio as gr
6
  from huggingface_hub import hf_hub_download
7
 
8
+ # --- Load model from Hugging Face Hub ---
9
  MODEL_REPO = "sangambhamare/TruthDetection"
10
  MODEL_FILENAME = "model.joblib"
 
11
  model_path = hf_hub_download(repo_id=MODEL_REPO, filename=MODEL_FILENAME)
12
  model = joblib.load(model_path)
13
 
14
+ # --- Load interactive report HTML (must be in same directory) ---
15
+ report_html = ""
16
+ if os.path.exists("interactive_report.html"):
17
+ with open("interactive_report.html", "r", encoding="utf-8") as f:
18
+ report_html = f.read()
19
+
20
+ # --- MFCC feature extraction ---
21
  def extract_mfcc(file_path):
22
  y, sr = librosa.load(file_path, sr=None)
23
  mfcc = librosa.feature.mfcc(y=y, sr=sr, n_mfcc=13)
24
  return np.mean(mfcc, axis=1)
25
 
26
+ # --- Prediction function ---
27
  def predict_audio(audio_file):
28
  try:
29
  features = extract_mfcc(audio_file).reshape(1, -1)
 
32
  except Exception as e:
33
  return f"Error: {e}"
34
 
35
+ # --- Gradio Interface ---
36
  with gr.Blocks() as demo:
37
  gr.Markdown("<h1 style='text-align: center;'>Truth Detection from Audio Stories</h1>")
38
  gr.Markdown(
 
41
  "based on MFCC features and a trained Random Forest classifier."
42
  "</p>"
43
  )
44
+
45
  audio_input = gr.Audio(type="filepath", label="Upload WAV Audio File (30 seconds)")
46
  output = gr.Textbox(label="Prediction")
47
  submit_btn = gr.Button("Predict")
48
  submit_btn.click(fn=predict_audio, inputs=audio_input, outputs=output)
49
+
50
+ if report_html:
51
+ gr.Markdown("<hr>")
52
+ gr.Markdown("<h3 style='text-align: center;'>Interactive Report</h3>")
53
+ gr.HTML(value=report_html)
54
+
55
  gr.Markdown("<p style='text-align: center; font-size: 12px; color: gray;'>Developed by Sangam Sanjay Bhamare, 2025.</p>")
56
 
57
  if __name__ == "__main__":