Spaces:
Running
Running
Abid Ali Awan
refactor: Revise system prompt and enhance chat handling in the Gradio application by clarifying rules and workflow, improving file upload handling, and updating the chat response pipeline for better structure and clarity.
ad62f45
| """ | |
| Gradio + OpenAI MCP Connector — Clean, Fast, Streaming, With File Upload | |
| """ | |
| import os | |
| import shutil | |
| import gradio as gr | |
| from openai import OpenAI | |
| # --------------------- | |
| # CONFIGURATION | |
| # --------------------- | |
| OPENAI_API_KEY = os.getenv("OPENAI_API_KEY") | |
| MCP_SERVER_URL = "https://mcp-1st-birthday-auto-deployer.hf.space/gradio_api/mcp/" | |
| MODEL_FAST = "gpt-5-mini" # for tool resolution / MCP calls | |
| MODEL_STREAM = "gpt-5.1" # for final streaming reply | |
| client = OpenAI(api_key=OPENAI_API_KEY) | |
| # --------------------- | |
| # SYSTEM PROMPT | |
| # --------------------- | |
| SYSTEM_PROMPT = """ | |
| You are a fast MLOps automation assistant equipped with remote MCP tools | |
| for dataset analysis, model training, evaluation, and deployment. | |
| Rules: | |
| - Use MCP tools when they directly address the user's request. | |
| - Treat the uploaded CSV file URL as the source of truth. Never modify it. | |
| - Never hallucinate tool names, arguments, or fields. | |
| - Keep your internal reasoning hidden. | |
| - Keep responses short, direct, and practical. | |
| Workflow: | |
| 1) Decide if a tool is needed for the request. | |
| 2) If needed, call the correct MCP tool with the exact schema. | |
| 3) After tools complete, give a concise summary in plain language. | |
| 4) If no tool is needed, answer directly and briefly. | |
| """ | |
| # --------------------- | |
| # NATIVE MCP CONNECTOR (HTTP STREAMING) | |
| # --------------------- | |
| TOOLS = [ | |
| { | |
| "type": "mcp", | |
| "server_label": "deploy_tools", | |
| "server_url": MCP_SERVER_URL, # HTTP streaming MCP server | |
| } | |
| ] | |
| # --------------------- | |
| # FILE UPLOAD HANDLER | |
| # --------------------- | |
| def handle_upload(file_obj, request: gr.Request): | |
| """ | |
| - Persist uploaded file to a stable /tmp path | |
| - Return a public URL that the MCP tools can use directly | |
| """ | |
| if file_obj is None: | |
| return None | |
| local_path = file_obj.name | |
| stable_path = os.path.join("/tmp", os.path.basename(local_path)) | |
| try: | |
| shutil.copy(local_path, stable_path) | |
| local_path = stable_path | |
| except Exception: | |
| # If copy fails, still try original path | |
| pass | |
| base = str(request.base_url).rstrip("/") | |
| return f"{base}/gradio_api/file={local_path}" | |
| # --------------------- | |
| # MAIN CHAT HANDLER (STREAMING) | |
| # --------------------- | |
| def chat_send_stream(user_msg, history, file_url): | |
| """ | |
| 2-phase pipeline: | |
| PHASE 1: Non-streaming tool resolution using MODEL_FAST | |
| PHASE 2: Streaming final answer using MODEL_STREAM | |
| Gradio Chatbot expects: list[{"role": "...", "content": "..."}] | |
| """ | |
| # Ensure history is list[dict(role, content)] | |
| if history is None: | |
| history = [] | |
| # Append the user message to the UI history first | |
| history.append({"role": "user", "content": user_msg}) | |
| # ---- Build messages for OpenAI (sanitize away metadata etc.) ---- | |
| messages = [{"role": "system", "content": SYSTEM_PROMPT}] | |
| for msg in history: | |
| role = msg.get("role") | |
| content = msg.get("content", "") | |
| if role in ("user", "assistant"): | |
| messages.append({"role": role, "content": content}) | |
| # Inject file context into *last* user message for the model | |
| if file_url: | |
| last_user = messages[-1] | |
| if last_user["role"] == "user": | |