# Step 1: Import Libraries import gradio as gr from sentence_transformers import SentenceTransformer import faiss import numpy as np import google.generativeai as genai import pickle import os # Step 2: Configure API Key from Hugging Face Secrets GOOGLE_API_KEY = None api_configured = False try: GOOGLE_API_KEY = os.environ.get('GOOGLE_API_KEY') if GOOGLE_API_KEY is None: print("WARNING: GOOGLE_API_KEY not found in environment variables.") print("Please add it to your Hugging Face Space secrets.") else: genai.configure(api_key=GOOGLE_API_KEY) api_configured = True print("✅ API Key configured successfully.") except Exception as e: print(f"ERROR: Could not configure API key. Details: {e}") api_configured = False # Step 3: Define Data Path DATA_PATH = "data" vector_store_file = os.path.join(DATA_PATH, "vector_store.index") data_file = os.path.join(DATA_PATH, "data.pkl") # Step 4: Initialize variables vector_store_data = None embedding_model = None llm = None texts = [] sources = [] index = None # Step 4: Load Models and Pre-processed Data if os.path.exists(vector_store_file) and os.path.exists(data_file): try: print(f"Loading pre-processed data from the '{DATA_PATH}' directory...") # Load FAISS index index = faiss.read_index(vector_store_file) # Load texts and sources with open(data_file, "rb") as f: stored_data = pickle.load(f) texts = stored_data.get("texts", []) sources = stored_data.get("sources", []) print(f"✅ Data loaded successfully. Found {len(texts)} text chunks.") # Load embedding model print("Loading embedding model...") embedding_model = SentenceTransformer('BAAI/bge-large-en-v1.5') print("✅ Embedding model loaded successfully.") # Load LLM if API is configured if api_configured: print("Loading Gemini model...") llm = genai.GenerativeModel('gemini-1.5-flash-latest') print("✅ Gemini model loaded successfully.") vector_store_data = (index, texts, sources) else: print("⚠️ Gemini model not loaded due to missing API key.") except Exception as e: print(f"❌ ERROR: An error occurred during data or model loading: {e}") import traceback traceback.print_exc() vector_store_data = None else: print(f"❌ ERROR: Pre-processed data not found in the '{DATA_PATH}' directory.") print(f"Please make sure '{vector_store_file}' and '{data_file}' exist.") if not os.path.exists(DATA_PATH): print(f"The '{DATA_PATH}' directory does not exist.") else: print(f"Contents of '{DATA_PATH}' directory: {os.listdir(DATA_PATH) if os.path.exists(DATA_PATH) else 'N/A'}") # Step 5: RAG and Chat Functions (with Updated System Prompt) def get_relevant_context(query, index, texts, sources, top_k=5): """Retrieve relevant context from the vector store.""" if embedding_model is None: return [] try: query_embedding = embedding_model.encode([query]) distances, indices = index.search(query_embedding, top_k) context = [] for i in indices[0]: if 0 <= i < len(texts): # Ensure index is valid context.append({"text": texts[i], "source": sources[i]}) return context except Exception as e: print(f"Error in get_relevant_context: {e}") return [] def chat_with_rag(message, history, vector_store_data): """Generate response using RAG.""" if vector_store_data is None: return "❌ The AI system is not properly initialized. Please check the configuration." if not api_configured or llm is None: return "❌ The Google API key is not configured. Please add GOOGLE_API_KEY to your Hugging Face Space secrets." try: index, texts, sources = vector_store_data # Get relevant context relevant_context = get_relevant_context(message, index, texts, sources) if not relevant_context: return "I couldn't find relevant information in the Halassa Lab's papers to answer your question. Could you try rephrasing or asking about a different aspect of the lab's research?" context_str = "\n\n".join([f"Source: {c['source']}\nContent: {c['text']}" for c in relevant_context]) # System prompt prompt = f""" You are a friendly and engaging science communicator for the Halassa Lab at MIT. Your goal is to explain the lab's complex computational neuroscience research to a general audience that has little to no scientific background. Follow these rules strictly: 1. **Simplify, Don't Dumb Down:** Break down complex topics into simple, easy-to-understand concepts. Use analogies and real-world examples (e.g., "think of the thalamus as a busy switchboard operator for the brain"). 2. **Be Engaging and Accessible:** Write in a clear, conversational, and friendly tone. Avoid jargon at all costs. If you must use a technical term, explain it immediately in simple terms. 3. **Synthesize and Explain:** Do not just copy-paste from the provided research papers. Read the relevant context and formulate a comprehensive, well-written answer in your own words. 4. **Cite Your Sources:** At the end of your response, if you used information from the provided papers, you MUST include a "Sources:" list. Use the format [filename - Page X]. This adds credibility. 5. **Stay Focused:** Only answer questions related to the Halassa Lab's work based on the provided context. If the context doesn't contain the answer, state that the information isn't available in the provided documents. Context from the Halassa Lab's papers: --- {context_str} --- User Question: {message} Your Friendly Explanation: """ # Generate response response = llm.generate_content(prompt) return response.text except Exception as e: error_msg = f"An error occurred while generating the response: {str(e)}" print(error_msg) import traceback traceback.print_exc() return f"❌ {error_msg}" # Step 6: Polished Gradio User Interface # Define a professional, clean theme for a science lab theme = gr.themes.Monochrome( primary_hue="indigo", secondary_hue="blue", neutral_hue="slate", font=[gr.themes.GoogleFont("Space Mono"), "ui-sans-serif", "system-ui", "sans-serif"], ).set( body_background_fill="*neutral_50", block_background_fill="*neutral_100", block_border_width="1px", block_shadow="*shadow_md", button_shadow="*shadow_md", button_primary_background_fill="*primary_500", button_primary_background_fill_hover="*primary_600", button_primary_text_color="white", ) # List of example questions for users to click on example_questions = [ "What is the main focus of the Halassa Lab's research, in simple terms?", "How does the thalamus affect our attention? Can you use an analogy?", "What have you learned about sleep and how it affects the brain?", "Explain the role of the thalamic reticular nucleus like I'm a high school student.", ] # Create the Gradio interface def create_demo(): """Create the Gradio demo interface.""" with gr.Blocks(theme=theme, title="Halassa Lab AI Explainer") as demo: with gr.Column(): # --- Header Section --- try: gr.Image( "https://d39w22sdwnt1s2.cloudfront.net/wp-content/uploads/2023/10/Halassa-M-2023-Option-2-1200x675.jpg", show_label=False, show_download_button=False, container=False ) except: # If image fails to load, continue without it pass gr.Markdown("# Hello! 👋") gr.Markdown("Ask me anything about the work we do in the Halassa Lab!") # Show status if vector_store_data is not None and api_configured: gr.Markdown("✅ **System Status:** Ready to answer your questions!") elif vector_store_data is not None and not api_configured: gr.Markdown("⚠️ **System Status:** Data loaded but Google API key is missing. Please configure it in Space secrets.") else: gr.Markdown("❌ **System Status:** System not properly initialized. Check logs for details.") # --- Chatbot Interface --- chatbot_ui = gr.Chatbot( label="Conversation", height=500, layout="panel", bubble_full_width=False ) message_box = gr.Textbox( label="Ask your question here...", lines=3, placeholder="e.g., How does the brain filter out distractions?" ) with gr.Row(): submit_button = gr.Button("Submit", variant="primary", scale=2) clear_button = gr.ClearButton( components=[chatbot_ui, message_box], value="Clear Conversation", scale=1 ) # --- Example Questions Section --- gr.Examples( examples=example_questions, inputs=message_box, label="Click an example to get started:" ) # --- Logic to make the chatbot respond --- def respond(message, history): """Handle user messages and generate responses.""" if not message.strip(): return "", history response_text = chat_with_rag(message, history, vector_store_data) history.append((message, response_text)) return "", history # Connect event handlers submit_button.click( respond, inputs=[message_box, chatbot_ui], outputs=[message_box, chatbot_ui] ) message_box.submit( respond, inputs=[message_box, chatbot_ui], outputs=[message_box, chatbot_ui] ) return demo # Step 7: Launch the app if __name__ == "__main__": if vector_store_data is not None: print("Launching application with full functionality...") demo = create_demo() else: print("Launching application in error mode...") # Display a clear error message in the UI if data loading failed with gr.Blocks(theme=theme) as demo: gr.Markdown("## ⚠️ Application Error") gr.Markdown(""" Could not load the necessary data and models. **Possible issues:** 1. Missing data files (`vector_store.index` and `data.pkl` in the `data/` directory) 2. Missing or incorrect Google API key in Hugging Face Space secrets 3. Model loading errors Please check the Hugging Face Space logs for detailed error messages. """) # Show current status gr.Markdown("### Debug Information:") status_text = [] status_text.append(f"- Data directory exists: {os.path.exists(DATA_PATH)}") status_text.append(f"- Vector store file exists: {os.path.exists(vector_store_file)}") status_text.append(f"- Data pickle file exists: {os.path.exists(data_file)}") status_text.append(f"- Google API key configured: {api_configured}") if os.path.exists(DATA_PATH): files = os.listdir(DATA_PATH) status_text.append(f"- Files in data directory: {files if files else 'Empty directory'}") gr.Markdown("\n".join(status_text)) demo.launch()