š§ OmniMind Orchestrator
The World's First Self-Evolving Multi-Agent MCP Ecosystem
Track 2 Submission - MCP's 1st Birthday Hackathon
""" OmniMind Orchestrator - Main Gradio Application The World's First Self-Evolving Multi-Agent MCP Ecosystem Competition Entry for MCP's 1st Birthday Hackathon Track 2: MCP in Action (Enterprise Category) Sponsor Integrations: - Google Gemini: Multi-model routing with Gemini 2.0 Flash - Modal: Dynamic MCP deployment - LlamaIndex: Enterprise knowledge RAG - ElevenLabs: Voice-first interface - Blaxel: Agent visualization """ import os import sys import asyncio import json from pathlib import Path from typing import Dict, Any, List, Optional, Tuple, AsyncGenerator from datetime import datetime import gradio as gr import plotly.graph_objects as go import networkx as nx # Add project root to path sys.path.insert(0, str(Path(__file__).parent)) from core.model_router import router, TaskType from mcp_gen.generator import generator from deployments.modal_deployer import deployer from core.knowledge_engine import knowledge from ui.voice_interface import voice # Load environment variables from dotenv import load_dotenv load_dotenv() # ============================================================================ # Agent Visualization (Blaxel Integration) # ============================================================================ def create_agent_graph(agent_state: Dict[str, Any]) -> go.Figure: """ Create real-time agent decision graph using Plotly. Prize Integration: Blaxel Choice Award ($2,500) """ G = nx.DiGraph() # Build graph from agent state nodes = agent_state.get("nodes", []) edges = agent_state.get("edges", []) for node in nodes: G.add_node(node["id"], label=node["label"], type=node.get("type", "default")) for edge in edges: G.add_edge(edge["from"], edge["to"], label=edge.get("label", "")) # Calculate layout pos = nx.spring_layout(G, k=2, iterations=50) # Create edge trace edge_x = [] edge_y = [] for edge in G.edges(): x0, y0 = pos[edge[0]] x1, y1 = pos[edge[1]] edge_x.extend([x0, x1, None]) edge_y.extend([y0, y1, None]) edge_trace = go.Scatter( x=edge_x, y=edge_y, line=dict(width=2, color='#888'), hoverinfo='none', mode='lines' ) # Create node trace node_x = [] node_y = [] node_text = [] node_colors = [] color_map = { "planning": "#3B82F6", # Blue "generating": "#10B981", # Green "deploying": "#F59E0B", # Orange "executing": "#8B5CF6", # Purple "completed": "#6B7280", # Gray } for node in G.nodes(): x, y = pos[node] node_x.append(x) node_y.append(y) node_text.append(G.nodes[node].get('label', node)) node_type = G.nodes[node].get('type', 'default') node_colors.append(color_map.get(node_type, "#6B7280")) node_trace = go.Scatter( x=node_x, y=node_y, mode='markers+text', hoverinfo='text', text=node_text, textposition="top center", marker=dict( size=30, color=node_colors, line=dict(width=2, color='white') ) ) # Create figure fig = go.Figure(data=[edge_trace, node_trace], layout=go.Layout( title=dict(text="š§ Agent Decision Graph (Real-Time)", font=dict(size=16)), showlegend=False, hovermode='closest', margin=dict(b=0, l=0, r=0, t=40), xaxis=dict(showgrid=False, zeroline=False, showticklabels=False), yaxis=dict(showgrid=False, zeroline=False, showticklabels=False), plot_bgcolor='rgba(0,0,0,0)', height=400 )) return fig # ============================================================================ # Core Agent Orchestration # ============================================================================ async def orchestrate_task( user_request: str, use_voice: bool = False, use_knowledge_base: bool = False ) -> AsyncGenerator[Tuple[str, Optional[go.Figure], Dict[str, Any]], None]: """ Main orchestration function - the brain of OmniMind. This is where the magic happens: 1. Analyze user request 2. Decide if we need to generate new MCPs 3. Generate code if needed 4. Deploy to Modal 5. Execute and return results Yields: (status_text, agent_graph, metadata) """ output = "# š¤ OmniMind Orchestrator\n\n" output += f"**Request:** {user_request}\n\n" output += "---\n\n" agent_state = { "nodes": [{"id": "start", "label": "User Request", "type": "planning"}], "edges": [] } yield (output, create_agent_graph(agent_state), {}) # Step 1: Analyze request with multi-model router output += "## š§ Step 1: Analyzing Request\n\n" yield (output, create_agent_graph(agent_state), {}) analysis_prompt = f"""Analyze this user request and determine what needs to be done: Request: {user_request} Determine: 1. Can this be done with existing general capabilities? (yes/no) 2. Do we need to generate a custom MCP server? (yes/no) 3. If yes, what should the MCP do? 4. What data sources or APIs are needed? Respond in JSON: {{ "needs_custom_mcp": true/false, "mcp_description": "what the MCP should do", "complexity": "simple|medium|complex", "estimated_tools_needed": 2, "approach": "high-level approach to solve this" }} """ analysis = await router.generate( analysis_prompt, task_type=TaskType.PLANNING, temperature=0.3 ) try: analysis_data = json.loads(analysis["response"]) except: # Fallback parsing analysis_data = { "needs_custom_mcp": True, "mcp_description": user_request, "complexity": "medium", "estimated_tools_needed": 1, "approach": "Generate custom MCP for this task" } output += f"**Analysis:** {analysis_data['approach']}\n\n" output += f"**Needs Custom MCP:** {analysis_data['needs_custom_mcp']}\n\n" agent_state["nodes"].append({"id": "analyze", "label": "Analysis", "type": "completed"}) agent_state["edges"].append({"from": "start", "to": "analyze"}) yield (output, create_agent_graph(agent_state), analysis_data) # Step 2: Get knowledge context (if enabled) context = None if use_knowledge_base: output += "## š Step 2: Querying Knowledge Base\n\n" agent_state["nodes"].append({"id": "knowledge", "label": "Knowledge", "type": "executing"}) agent_state["edges"].append({"from": "analyze", "to": "knowledge"}) yield (output, create_agent_graph(agent_state), {}) context = await knowledge.get_context_for_mcp_generation(user_request) if context: output += f"**Found relevant context:** {context[:200]}...\n\n" else: output += "**No relevant context found**\n\n" agent_state["nodes"][-1]["type"] = "completed" yield (output, create_agent_graph(agent_state), {}) # Step 3: Generate MCP (if needed) server_metadata = None if analysis_data.get("needs_custom_mcp", False): output += "## āļø Step 3: Generating Custom MCP Server\n\n" agent_state["nodes"].append({"id": "generate", "label": "Generate MCP", "type": "generating"}) agent_state["edges"].append({"from": "analyze", "to": "generate"}) yield (output, create_agent_graph(agent_state), {}) output += f"**Task:** {analysis_data['mcp_description']}\n\n" output += "šØ Using Claude Sonnet for code generation...\n\n" server_metadata = await generator.generate_mcp_server( task_description=analysis_data["mcp_description"], context={"user_context": context} if context else None ) output += f"ā **Generated:** {server_metadata['server_name']}\n" output += f"**Tools:** {', '.join([t['name'] for t in server_metadata['tools']])}\n\n" agent_state["nodes"][-1]["type"] = "completed" yield (output, create_agent_graph(agent_state), server_metadata) # Step 4: Deploy to Modal output += "## š Step 4: Deploying to Modal\n\n" agent_state["nodes"].append({"id": "deploy", "label": "Deploy", "type": "deploying"}) agent_state["edges"].append({"from": "generate", "to": "deploy"}) yield (output, create_agent_graph(agent_state), {}) deployment = await deployer.deploy_mcp_server(server_metadata) if deployment.get("simulated"): output += "ā ļø **Simulated deployment** (configure MODAL_TOKEN for real deployment)\n" output += f"**URL:** {deployment['modal_url']}\n" output += f"**Status:** {deployment['status']}\n\n" agent_state["nodes"][-1]["type"] = "completed" yield (output, create_agent_graph(agent_state), deployment) # Step 5: Final response generation output += "## ⨠Step 5: Generating Response\n\n" agent_state["nodes"].append({"id": "respond", "label": "Response", "type": "executing"}) if server_metadata: agent_state["edges"].append({"from": "deploy", "to": "respond"}) else: agent_state["edges"].append({"from": "analyze", "to": "respond"}) yield (output, create_agent_graph(agent_state), {}) response_prompt = f"""Based on the work done, provide a clear, professional response to the user. Original request: {user_request} What was done: {json.dumps(analysis_data, indent=2)} {f"Generated MCP: {server_metadata['server_name']}" if server_metadata else "No custom MCP needed"} Provide a helpful response explaining what was accomplished and how the user can use it. """ final_response = await router.generate( response_prompt, task_type=TaskType.REASONING, temperature=0.7 ) output += final_response["response"] + "\n\n" agent_state["nodes"][-1]["type"] = "completed" yield (output, create_agent_graph(agent_state), {}) # Voice output (if enabled) if use_voice and voice.client: output += "\nš **Generating voice response...**\n" yield (output, create_agent_graph(agent_state), {}) # Voice generation would happen here # For demo, we skip actual audio generation output += "\n---\n\n" output += f"**Model Usage:**\n" stats = router.get_usage_stats() output += f"- Total Requests: {stats['total_requests']}\n" output += f"- Total Cost: ${stats['total_cost']}\n" output += f"- Claude: {stats['by_model']['claude']['requests']} requests\n" output += f"- Gemini: {stats['by_model']['gemini']['requests']} requests\n" output += f"- GPT-4: {stats['by_model']['gpt4']['requests']} requests\n" yield (output, create_agent_graph(agent_state), stats) # ============================================================================ # Gradio UI # ============================================================================ def build_ui() -> gr.Blocks: """Build the Gradio 6 interface""" # Custom CSS for professional look custom_css = """ .gradio-container { font-family: 'Inter', -apple-system, BlinkMacSystemFont, sans-serif; } .main-header { text-align: center; padding: 2rem 0; background: linear-gradient(135deg, #667eea 0%, #764ba2 100%); color: white; border-radius: 10px; margin-bottom: 2rem; } .stats-box { padding: 1rem; background: #f8f9fa; border-radius: 8px; margin: 1rem 0; } """ with gr.Blocks(title="OmniMind Orchestrator - MCP Hackathon") as app: gr.HTML("""
The World's First Self-Evolving Multi-Agent MCP Ecosystem
Track 2 Submission - MCP's 1st Birthday Hackathon