Spaces:
Running
Running
Abid Ali Awan
refactor: Enhance chat handling in Gradio application by implementing textbox interactivity during streaming phases, updating UI elements for improved user experience, and refining the application title for clarity.
ae2cddd
| """ | |
| Gradio + OpenAI Responses API + Remote MCP Server (HTTP) | |
| CSV-based MLOps Agent with streaming final answer & MCP tools | |
| """ | |
| import os | |
| import shutil | |
| import gradio as gr | |
| from openai import OpenAI | |
| # ------------------------- | |
| # Config | |
| # ------------------------- | |
| MCP_SERVER_URL = "https://mcp-1st-birthday-auto-deployer.hf.space/gradio_api/mcp/" | |
| OPENAI_API_KEY = os.getenv("OPENAI_API_KEY") | |
| MODEL = "gpt-5-mini" # you can swap to gpt-5 for final answers if you want | |
| client = OpenAI(api_key=OPENAI_API_KEY) | |
| MCP_TOOLS = [ | |
| { | |
| "type": "mcp", | |
| "server_label": "auto-deployer", | |
| "server_url": MCP_SERVER_URL, | |
| "require_approval": "never", | |
| } | |
| ] | |
| # ------------------------- | |
| # Short prompts | |
| # ------------------------- | |
| TOOL_SYSTEM_PROMPT = """ | |
| You are an MLOps assistant with MCP tools for CSV analysis, training, | |
| evaluation, and deployment. | |
| If the user asks about data, datasets, CSVs, models, training, | |
| evaluation, or deployment, call MCP tools instead of guessing. | |
| Use the CSV file URL exactly as given when tools need a file path. | |
| Do not invent tool names or parameters. | |
| Keep internal reasoning hidden and reply briefly with technical details. | |
| """ | |
| FINAL_SYSTEM_PROMPT = """ | |
| You are a helpful MLOps explainer and general assistant. | |
| You see the conversation plus a short technical summary of what tools did | |
| (if any). Explain in simple language what was done and what the results | |
| mean. Mention key metrics, model IDs, or endpoints if available. | |
| Suggest next steps briefly. For normal chat (no tools), just respond | |
| helpfully. | |
| Do not mention tools or internal phases explicitly. | |
| Keep the answer clear and concise. | |
| """ | |
| # ------------------------- | |
| # Helpers | |
| # ------------------------- | |
| def history_to_text(history) -> str: | |
| """ | |
| Turn Gradio history (list of {role, content}) into a plain-text | |
| conversation transcript for the model. | |
| """ | |
| if not history: | |
| return "" | |
| lines = [] | |
| for msg in history: | |
| role = msg.get("role") | |
| content = msg.get("content", "") | |
| if role == "user": | |
| lines.append(f"User: {content}") | |
| elif role == "assistant": | |
| lines.append(f"Assistant: {content}") | |
| return "\n".join(lines) | |
| def extract_output_text(response) -> str: | |
| """ | |
| Extract plain text from a non-streaming Responses API call. | |
| Fallback gracefully if the shape is unexpected. | |
| """ | |
| try: | |
| if response.output and len(response.output) > 0: | |
| first = response.output[0] | |
| if getattr(first, "content", None): | |
| c0 = first.content[0] | |
| text = getattr(c0, "text", None) | |
| if text: | |
| return text | |
| # Fallback | |
| return getattr(response, "output_text", None) or str(response) | |
| except Exception: | |
| return str(response) | |
| def handle_upload(file_path, request: gr.Request): | |
| """ | |
| 1) Take uploaded file path (string) | |
| 2) Copy to /tmp for a stable path | |
| 3) Build a public Gradio file URL that the MCP server can fetch via HTTP | |
| """ | |
| if not file_path: | |
| return None | |
| local_path = file_path | |
| stable_path = os.path.join("/tmp", os.path.basename(local_path)) | |
| try: | |
| shutil.copy(local_path, stable_path) | |
| local_path = stable_path | |
| except Exception: | |
| # If copy fails, just use the original path | |
| pass | |
| base_url = str(request.base_url).rstrip("/") | |
| public_url = f"{base_url}/gradio_api/file={local_path}" | |
| return public_url | |
| def should_use_tools(user_msg: str) -> bool: | |
| """ | |
| Simple heuristic to decide if this turn should trigger MCP tools. | |
| Only fire tools if the user is clearly asking for data / model work. | |
| """ | |
| text = user_msg.lower() | |
| keywords = [ | |
| "data", | |
| "dataset", | |
| "csv", | |
| "train", | |
| "training", | |
| "model", | |
| "deploy", | |
| "deployment", | |
| "predict", | |
| "prediction", | |
| "inference", | |
| "evaluate", | |
| "evaluation", | |
| "analyze", | |
| "analysis", | |
| ] | |
| return any(k in text for k in keywords) | |
| # ------------------------- | |
| # Main chat handler (streaming + disabling textbox) | |
| # ------------------------- | |
| def chat_send_stream(user_msg, history, file_url): | |
| """ | |
| Main Gradio streaming handler. | |
| - If the user is just chatting (e.g., "hey"), respond directly | |
| with a streaming answer (no tools, no CSV required). | |
| - If the user clearly asks for data/model operations, run: | |
| Phase 1: non-stream tool phase via Responses API + MCP tools | |
| Phase 2: streaming final answer via Responses API (no tools) | |
| - Keeps full chat history so follow-ups work. | |
| - Shows status/progress messages in the UI when tools are used. | |
| - Disables the textbox during work, re-enables at the end. | |
| """ | |
| # UI history (what Gradio displays) | |
| if history is None: | |
| history = [] | |
| # Append the user message to the UI history | |
| history.append({"role": "user", "content": user_msg}) | |
| # Conversation before this turn (for context) | |
| convo_before = history_to_text(history[:-1]) | |
| # Decide if this message should trigger tools | |
| use_tools = should_use_tools(user_msg) | |
| # ------------------------- | |
| # BRANCH 1: No tools (normal chat, e.g. "hey") | |
| # ------------------------- | |
| if not use_tools: | |
| # Add a small status bubble then stream | |
| history.append({"role": "assistant", "content": "✏️ Generating answer..."}) | |
| # Disable textbox while generating | |
| yield ( | |
| history, | |
| gr.update(interactive=False), | |
| ) | |
| # Build input text for Responses API | |
| input_text = ( | |
| (f"Conversation so far:\n{convo_before}\n\n" if convo_before else "") | |
| + "Latest user message:\n" | |
| + user_msg | |
| ) | |
| stream = client.responses.create( | |
| model=MODEL, | |
| instructions=FINAL_SYSTEM_PROMPT, | |
| input=input_text, | |
| reasoning={"effort": "low"}, | |
| stream=True, | |
| ) | |
| final_text = "" | |
| for event in stream: | |
| if event.type == "response.output_text.delta": | |
| final_text += event.delta | |
| history[-1]["content"] = final_text | |
| yield ( | |
| history, | |
| gr.update(interactive=False), | |
| ) | |
| elif event.type == "response.completed": | |
| break | |
| # Re-enable textbox at the end | |
| yield ( | |
| history, | |
| gr.update(interactive=True, value=""), | |
| ) | |
| return | |
| # ------------------------- | |
| # BRANCH 2: Tools needed (data / model operations) | |
| # ------------------------- | |
| # If tools are needed but no file URL, ask for CSV | |
| if not file_url: | |
| history.append( | |
| { | |
| "role": "assistant", | |
| "content": ( | |
| "To analyze, train, or deploy, please upload a CSV file first " | |
| "using the file upload control." | |
| ), | |
| } | |
| ) | |
| # Keep textbox enabled because nothing heavy is happening | |
| yield ( | |
| history, | |
| gr.update(interactive=True), | |
| ) | |
| return | |
| # User message for the model includes the CSV URL | |
| user_with_file = f"[Uploaded CSV file URL: {file_url}]\n\n{user_msg}" | |
| # ------------------------- | |
| # Phase 1: Tool + technical summary (non-streaming) | |
| # ------------------------- | |
| # Show a status message in UI | |
| history.append( | |
| { | |
| "role": "assistant", | |
| "content": "⏳ Analyzing your request and selecting MCP tools...", | |
| } | |
| ) | |
| # Disable textbox while tools run | |
| yield ( | |
| history, | |
| gr.update(interactive=False), | |
| ) | |
| # Build a single string input for the tool phase | |
| tool_phase_input = ( | |
| (f"Conversation so far:\n{convo_before}\n\n" if convo_before else "") | |
| + "Latest user request (with file URL):\n" | |
| + user_with_file | |
| + "\n\nYour task: decide which MCP tools to call and run them. " | |
| "Then return a short technical summary of what you did and what the tools returned." | |
| ) | |
| tool_phase = client.responses.create( | |
| model=MODEL, | |
| instructions=TOOL_SYSTEM_PROMPT, | |
| input=tool_phase_input, | |
| tools=MCP_TOOLS, | |
| reasoning={"effort": "low"}, | |
| ) | |
| scratchpad = extract_output_text(tool_phase).strip() | |
| if not scratchpad: | |
| scratchpad = "No MCP tool output was returned." | |
| # Update status message to show tools finished | |
| history[-1] = { | |
| "role": "assistant", | |
| "content": "✅ MCP tools finished. Preparing explanation...", | |
| } | |
| # Keep textbox disabled (we're about to stream final answer) | |
| yield ( | |
| history, | |
| gr.update(interactive=False), | |
| ) | |
| # ------------------------- | |
| # Phase 2: Final streaming explanation | |
| # ------------------------- | |
| # Replace last assistant message with streaming answer | |
| history[-1] = {"role": "assistant", "content": ""} | |
| # Build a single string input for the final explanation phase | |
| final_input = ( | |
| (f"Conversation so far:\n{convo_before}\n\n" if convo_before else "") | |
| + "Latest user request (with file URL):\n" | |
| + user_with_file | |
| + "\n\nTechnical summary of tool actions and results:\n" | |
| + scratchpad | |
| + "\n\nNow explain this clearly to the user." | |
| ) | |
| stream = client.responses.create( | |
| model=MODEL, | |
| instructions=FINAL_SYSTEM_PROMPT, | |
| input=final_input, | |
| reasoning={"effort": "low"}, | |
| stream=True, | |
| ) | |
| final_text = "" | |
| for event in stream: | |
| if event.type == "response.output_text.delta": | |
| final_text += event.delta | |
| history[-1]["content"] = final_text | |
| yield ( | |
| history, | |
| gr.update(interactive=False), | |
| ) | |
| elif event.type == "response.completed": | |
| break | |
| # Re-enable textbox at the end, and clear it | |
| yield ( | |
| history, | |
| gr.update(interactive=True, value=""), | |
| ) | |
| # ------------------------- | |
| # Gradio UI | |
| # ------------------------- | |
| with gr.Blocks(title="Streaming MLOps Agent") as demo: | |
| gr.Markdown( | |
| """ | |
| # 🧠 Smart MLOps Agent | |
| - 💬 Chat naturally, even just “hey” | |
| - 📂 Upload CSVs for analysis, training, and deployment | |
| - ⚡ See live tool status and streaming answers | |
| """ | |
| ) | |
| file_url_state = gr.State(value=None) | |
| uploader = gr.File( | |
| label="Optional CSV file upload (required for data/model operations)", | |
| file_count="single", | |
| type="filepath", | |
| file_types=[".csv"], | |
| ) | |
| uploader.change( | |
| handle_upload, | |
| inputs=[uploader], | |
| outputs=[file_url_state], | |
| ) | |
| chatbot = gr.Chatbot( | |
| label="Chat", | |
| avatar_images=( | |
| None, | |
| "https://huggingface.co/datasets/huggingface/brand-assets/resolve/main/hf-logo.png", | |
| ), | |
| ) | |
| msg = gr.Textbox( | |
| label="Message", | |
| interactive=True, | |
| placeholder="Say hi, or ask me to analyze / train / deploy on your dataset...", | |
| ) | |
| # Only Enter/Return sends messages; no Send button | |
| msg.submit( | |
| chat_send_stream, | |
| inputs=[msg, chatbot, file_url_state], | |
| outputs=[chatbot, msg], | |
| ) | |
| if __name__ == "__main__": | |
| demo.queue().launch( | |
| theme=gr.themes.Soft(), | |
| allowed_paths=["/tmp"], | |
| ssr_mode=False, | |
| show_error=True, | |
| quiet=True, | |
| ) | |