import gradio as gr import requests import os from dotenv import load_dotenv from io import BytesIO from PIL import Image import PyPDF2 from pdf2image import convert_from_path import tempfile import sqlite3 from datetime import datetime # Load environment variables load_dotenv() SERPAPI_KEY = os.getenv("SERPAPI_KEY") HYPERBOLIC_API_KEY = os.getenv("HYPERBOLIC_API_KEY") ELEVENLABS_API_KEY = os.getenv("ELEVENLABS_API_KEY") ADMIN_PASSWORD = "BT54iv!@" DB_PATH = "students.db" # --- DATABASE FUNCTIONS --- def init_database(): conn = sqlite3.connect(DB_PATH) cursor = conn.cursor() cursor.execute(""" CREATE TABLE IF NOT EXISTS students ( id INTEGER PRIMARY KEY AUTOINCREMENT, name TEXT NOT NULL, medical_school TEXT NOT NULL, year TEXT NOT NULL, registration_date TIMESTAMP DEFAULT CURRENT_TIMESTAMP ) """) conn.commit() conn.close() def save_student(name, medical_school, year): try: conn = sqlite3.connect(DB_PATH) cursor = conn.cursor() cursor.execute("INSERT INTO students (name, medical_school, year) VALUES (?, ?, ?)", (name, medical_school, year)) conn.commit() conn.close() return True except Exception as e: print(f"Error saving student: {e}") return False def get_all_students(): try: conn = sqlite3.connect(DB_PATH) cursor = conn.cursor() cursor.execute("SELECT id, name, medical_school, year, registration_date FROM students ORDER BY registration_date DESC") return cursor.fetchall() except Exception: return [] init_database() # --- API CONFIGURATION --- HYPERBOLIC_API_URL = "https://api.hyperbolic.xyz/v1/chat/completions" HYPERBOLIC_MODEL = "meta-llama/Llama-3.3-70B-Instruct" ELEVENLABS_API_URL = "https://api.elevenlabs.io/v1/text-to-speech" ELEVENLABS_VOICE_ID = "nPczCjzI2devNBz1zQrb" # --- LOGIC FUNCTIONS --- def generate_audio(text: str, student_name: str = None) -> str: if not ELEVENLABS_API_KEY or not text: return None if student_name: text = f"Welcome to Viva, Doctor {student_name}, let's start. {text}" try: url = f"{ELEVENLABS_API_URL}/{ELEVENLABS_VOICE_ID}" headers = {"Accept": "audio/mpeg", "Content-Type": "application/json", "xi-api-key": ELEVENLABS_API_KEY} data = {"text": text, "model_id": "eleven_turbo_v2", "voice_settings": {"stability": 0.5, "similarity_boost": 0.5}} response = requests.post(url, json=data, headers=headers) if response.status_code == 200: with tempfile.NamedTemporaryFile(delete=False, suffix=".mp3") as f: f.write(response.content) return f.name return None except Exception as e: print(f"Error generating audio: {e}") return None def is_anatomy_related(query: str) -> tuple[bool, str]: prompt = f"Is this question related to human anatomy ONLY? '{query}'. Respond YES or NO." try: headers = {"Content-Type": "application/json", "Authorization": f"Bearer {HYPERBOLIC_API_KEY}"} payload = {"model": HYPERBOLIC_MODEL, "messages": [{"role": "user", "content": prompt}], "max_tokens": 10} response = requests.post(HYPERBOLIC_API_URL, headers=headers, json=payload, timeout=10) if "YES" in response.json()["choices"][0]["message"]["content"].upper(): return True, "" return False, "β οΈ Please ask questions related to anatomy only." except: return True, "" def search_anatomy_image(query: str) -> tuple[list, str]: try: params = {"engine": "google_images", "q": f"{query} anatomy diagram", "api_key": SERPAPI_KEY, "num": 5, "safe": "active"} data = requests.get("https://serpapi.com/search", params=params).json() if "images_results" in data: return [img["original"] for img in data["images_results"] if not img["original"].endswith('.svg')], "" return [], "No images found." except Exception as e: return [], str(e) def download_image(url): try: headers = {'User-Agent': 'Mozilla/5.0'} return Image.open(BytesIO(requests.get(url, headers=headers, timeout=10).content)) except: return None def generate_anatomy_info(query: str) -> str: try: headers = {"Content-Type": "application/json", "Authorization": f"Bearer {HYPERBOLIC_API_KEY}"} prompt = f"Provide a detailed anatomy summary for medical students about: {query}. Use emojis for sections." payload = {"model": HYPERBOLIC_MODEL, "messages": [{"role": "user", "content": prompt}], "max_tokens": 600} return requests.post(HYPERBOLIC_API_URL, headers=headers, json=payload).json()["choices"][0]["message"]["content"] except Exception as e: return f"Error: {e}" def generate_viva_questions(topic: str) -> list: try: headers = {"Content-Type": "application/json", "Authorization": f"Bearer {HYPERBOLIC_API_KEY}"} prompt = f"Generate 5 hard anatomy VIVA questions on: {topic}. Format: Q1: ... HINT: ... ANSWER: ..." payload = {"model": HYPERBOLIC_MODEL, "messages": [{"role": "user", "content": prompt}], "max_tokens": 800} content = requests.post(HYPERBOLIC_API_URL, headers=headers, json=payload).json()["choices"][0]["message"]["content"] questions = [] current = {} for line in content.split('\n'): line = line.strip() if line.startswith('Q') and ':' in line: if current: questions.append(current) current = {'question': line.split(':', 1)[1].strip()} elif line.startswith('HINT:'): current['hint'] = line.split(':', 1)[1].strip() elif line.startswith('ANSWER:'): current['answer'] = line.split(':', 1)[1].strip() if current: questions.append(current) return questions[:5] except: return [] def evaluate_viva_answer(question, student_ans, expected): if not student_ans.strip(): return "Please provide an answer.", "βΈοΈ" try: headers = {"Content-Type": "application/json", "Authorization": f"Bearer {HYPERBOLIC_API_KEY}"} prompt = f"Evaluate this VIVA answer. Question: {question}. Student: {student_ans}. Expected: {expected}. Give feedback and score." payload = {"model": HYPERBOLIC_MODEL, "messages": [{"role": "user", "content": prompt}], "max_tokens": 400} feedback = requests.post(HYPERBOLIC_API_URL, headers=headers, json=payload).json()["choices"][0]["message"]["content"] emoji = "π" if "DISTINCTION" in feedback.upper() else "β " if "PASS" in feedback.upper() else "β οΈ" return f"{emoji} **Feedback:**\n\n{feedback}\n\nπ **Ref:** {expected}", emoji except: return "Error evaluating.", "β οΈ" def process_anatomy_query(query): if not query.strip(): return None, "", "Enter a question." valid, msg = is_anatomy_related(query) if not valid: return None, "", msg urls, err = search_anatomy_image(query) info = generate_anatomy_info(query) img = None for url in urls: img = download_image(url) if img: break return img, f"## π Key Learning Points\n\n{info}", err def process_uploaded_book(pdf_file): if not pdf_file: return [], "No file uploaded." try: with tempfile.NamedTemporaryFile(delete=False, suffix='.pdf') as tmp: tmp.write(pdf_file) tmp_path = tmp.name images = convert_from_path(tmp_path, dpi=150) reader = PyPDF2.PdfReader(tmp_path) data = [] for i, img in enumerate(images): txt = reader.pages[i].extract_text()[:2000] if i < len(reader.pages) else "" data.append((img, f"Page {i+1}", txt)) os.unlink(tmp_path) return data, f"Processed {len(data)} pages." except Exception as e: return [], f"Error: {e}" def analyze_book_image(image, page_info, page_text): try: headers = {"Content-Type": "application/json", "Authorization": f"Bearer {HYPERBOLIC_API_KEY}"} prompt = f"Analyze this anatomy book page ({page_info}): {page_text}. Give summary, clinical points, and 15 study questions." payload = {"model": HYPERBOLIC_MODEL, "messages": [{"role": "user", "content": prompt}], "max_tokens": 1000} return requests.post(HYPERBOLIC_API_URL, headers=headers, json=payload).json()["choices"][0]["message"]["content"] except Exception as e: return f"Error: {e}" def start_viva_mode(topic, image, name=""): if not topic: return [gr.update()] * 11 qs = generate_viva_questions(topic) if not qs: return [gr.update()] * 11 audio = generate_audio(qs[0]['question'], name) return ( gr.update(visible=True), # Container f"**VIVA ACTIVE:** {topic}", # Status image, # Image f"### Q1: {qs[0]['question']}", # Q Display f"π‘ {qs[0].get('hint','')}", # Hint "", "", # Answer, Feedback gr.update(interactive=True, value="Submit"), # Button qs, audio, name ) def submit_viva_answer_logic(ans, qs, idx, name): if idx >= len(qs): return "Done", "", "", "", gr.update(interactive=False), idx, None fb, _ = evaluate_viva_answer(qs[idx]['question'], ans, qs[idx].get('answer','')) next_idx = idx + 1 if next_idx < len(qs): nxt = qs[next_idx] audio = generate_audio(nxt['question'], name) return f"### Q{next_idx+1}: {nxt['question']}", f"π‘ {nxt.get('hint','')}", "", fb, gr.update(), next_idx, audio else: return "### π VIVA Complete!", "", "", fb, gr.update(interactive=False, value="Done"), next_idx, None # --- GRADIO UI --- CSS = """ /* HIDE DEFAULT TABS HEADER to use Custom Nav */ .tabs > .tab-nav { display: none !important; } #nav_bar { display: flex; gap: 10px; overflow-x: auto; padding-bottom: 5px; } #nav_bar button { flex: 1; white-space: nowrap; } """ with gr.Blocks(title="AnatomyBot") as demo: # State student_name = gr.State("") viva_qs = gr.State([]) q_idx = gr.State(0) cur_topic = gr.State("") cur_img = gr.State(None) cur_book_topic = gr.State("") # Main App with gr.Column(): gr.Markdown("# π©Ί AnatomyBot - MBBS Tutor") # Custom Nav with gr.Row(elem_id="nav_bar"): btn_learn = gr.Button("π Learning Mode", variant="primary") btn_viva = gr.Button("π― VIVA Training", variant="secondary") btn_book = gr.Button("π Book Mode", variant="secondary") # TABS Container (This solves the overlapping issue) with gr.Tabs(elem_id="main_tabs") as tabs: # TAB 1: LEARNING with gr.TabItem("Learning", id="tab_learn"): with gr.Row(): q_in = gr.Textbox(label="Anatomy Question", placeholder="e.g. Circle of Willis") # Clickable Examples gr.Examples( examples=[ ["Show me the Circle of Willis"], ["Brachial plexus anatomy"], ["Carpal bones arrangement"], ["Layers of the scalp"], ["Anatomy of the heart chambers"], ["Cranial nerves and their functions"], ["Structure of the kidney nephron"], ["Branches of the abdominal aorta"], ["Rotator cuff muscles"], ["Spinal cord cross section"], ["Femoral triangle anatomy"], ["Larynx cartilages and membranes"], ["Portal venous system"], ["Anatomy of the eyeball"], ["Bronchopulmonary segments"] ], inputs=q_in ) with gr.Row(): b_search = gr.Button("π Search", variant="primary") b_to_viva = gr.Button("π― Start VIVA on this Topic", variant="secondary") err_out = gr.Markdown() with gr.Row(): info_out = gr.Markdown() img_out = gr.Image(type="pil", label="Diagram") # TAB 2: VIVA with gr.TabItem("VIVA", id="tab_viva"): v_status = gr.Markdown("Select a topic in Learning Mode or Book Mode first!") with gr.Column(visible=False) as v_cont: with gr.Row(): v_img = gr.Image(interactive=False, type="pil", label="Reference") with gr.Column(): v_q_disp = gr.Markdown("Question...") v_hint = gr.Markdown("Hint...") v_audio = gr.Audio(autoplay=True, interactive=False) v_ans = gr.Textbox(label="Answer", lines=3) v_sub = gr.Button("Submit") v_fb = gr.Markdown() # TAB 3: BOOK with gr.TabItem("Book", id="tab_book"): bk_file = gr.File(label="Upload PDF", file_types=[".pdf"], type="binary") bk_stat = gr.Markdown() # Book State bk_imgs = gr.State([]) bk_caps = gr.State([]) bk_txts = gr.State([]) bk_page_sel = gr.Dropdown(label="Select Page", interactive=False) bk_view = gr.Image(label="Page View", type="pil") bk_anl = gr.Markdown() b_bk_viva = gr.Button("π― Start VIVA from Page", visible=False) # Visitor Counter at the bottom gr.HTML("""
""") # --- EVENT HANDLERS --- # 2. Navigation (Updates the Tabs 'selected' state) btn_learn.click(lambda: gr.Tabs(selected="tab_learn"), outputs=tabs) btn_viva.click(lambda: gr.Tabs(selected="tab_viva"), outputs=tabs) btn_book.click(lambda: gr.Tabs(selected="tab_book"), outputs=tabs) # 3. Learning Mode def run_search(q): img, txt, err = process_anatomy_query(q) return img, txt, err, q, img, gr.update(interactive=True) b_search.click(run_search, q_in, [img_out, info_out, err_out, cur_topic, cur_img, b_to_viva]) q_in.submit(run_search, q_in, [img_out, info_out, err_out, cur_topic, cur_img, b_to_viva]) # 4. Start VIVA (Learning Mode) b_to_viva.click( fn=lambda: gr.update(value="Generating...", interactive=False), outputs=b_to_viva ).then( fn=start_viva_mode, inputs=[cur_topic, cur_img, student_name], outputs=[v_cont, v_status, v_img, v_q_disp, v_hint, v_ans, v_fb, v_sub, viva_qs, v_audio, gr.State()] ).then( fn=lambda: (gr.Tabs(selected="tab_viva"), 0, gr.update(value="Start VIVA", interactive=True)), outputs=[tabs, q_idx, b_to_viva] ) # 5. VIVA Logic v_sub.click( fn=submit_viva_answer_logic, inputs=[v_ans, viva_qs, q_idx, student_name], outputs=[v_q_disp, v_hint, v_ans, v_fb, v_sub, q_idx, v_audio] ) # 6. Book Mode def on_upload(f): data, msg = process_uploaded_book(f) if not data: return [], [], [], gr.update(choices=[]), msg imgs, caps, txts = zip(*data) return imgs, caps, txts, gr.update(choices=list(caps), interactive=True), msg bk_file.upload(on_upload, bk_file, [bk_imgs, bk_caps, bk_txts, bk_page_sel, bk_stat]) def on_page_sel(sel, imgs, caps, txts): if not sel: return None, "", "", gr.update() idx = caps.index(sel) anl = analyze_book_image(imgs[idx], sel, txts[idx]) return imgs[idx], anl, f"Textbook: {sel}", gr.update(visible=True) bk_page_sel.change(on_page_sel, [bk_page_sel, bk_imgs, bk_caps, bk_txts], [bk_view, bk_anl, cur_book_topic, b_bk_viva]) # 7. Start VIVA (Book Mode) b_bk_viva.click( fn=start_viva_mode, inputs=[cur_book_topic, bk_view, student_name], outputs=[v_cont, v_status, v_img, v_q_disp, v_hint, v_ans, v_fb, v_sub, viva_qs, v_audio, gr.State()] ).then( fn=lambda: (gr.Tabs(selected="tab_viva"), 0), outputs=[tabs, q_idx] ) if __name__ == "__main__": demo.launch(server_name="0.0.0.0", server_port=7860)