Spaces:
Running
Running
Abid Ali Awan
commited on
Commit
·
ad62f45
1
Parent(s):
e01ab7c
refactor: Revise system prompt and enhance chat handling in the Gradio application by clarifying rules and workflow, improving file upload handling, and updating the chat response pipeline for better structure and clarity.
Browse files
app.py
CHANGED
|
@@ -4,7 +4,6 @@ Gradio + OpenAI MCP Connector — Clean, Fast, Streaming, With File Upload
|
|
| 4 |
|
| 5 |
import os
|
| 6 |
import shutil
|
| 7 |
-
|
| 8 |
import gradio as gr
|
| 9 |
from openai import OpenAI
|
| 10 |
|
|
@@ -13,27 +12,41 @@ from openai import OpenAI
|
|
| 13 |
# ---------------------
|
| 14 |
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
|
| 15 |
MCP_SERVER_URL = "https://mcp-1st-birthday-auto-deployer.hf.space/gradio_api/mcp/"
|
| 16 |
-
|
| 17 |
-
|
|
|
|
| 18 |
|
| 19 |
client = OpenAI(api_key=OPENAI_API_KEY)
|
| 20 |
|
|
|
|
|
|
|
|
|
|
| 21 |
SYSTEM_PROMPT = """
|
| 22 |
-
You are a fast MLOps assistant with
|
| 23 |
-
|
| 24 |
-
|
| 25 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 26 |
"""
|
| 27 |
|
| 28 |
# ---------------------
|
| 29 |
-
# NATIVE MCP CONNECTOR
|
| 30 |
# ---------------------
|
| 31 |
TOOLS = [
|
| 32 |
{
|
| 33 |
"type": "mcp",
|
| 34 |
"server_label": "deploy_tools",
|
| 35 |
-
"server_url": MCP_SERVER_URL,
|
| 36 |
-
# transport auto-detected; HF space supports HTTP
|
| 37 |
}
|
| 38 |
]
|
| 39 |
|
|
@@ -42,139 +55,54 @@ TOOLS = [
|
|
| 42 |
# FILE UPLOAD HANDLER
|
| 43 |
# ---------------------
|
| 44 |
def handle_upload(file_obj, request: gr.Request):
|
|
|
|
|
|
|
|
|
|
|
|
|
| 45 |
if file_obj is None:
|
| 46 |
return None
|
| 47 |
|
| 48 |
-
# Ensure file is in a stable path
|
| 49 |
local_path = file_obj.name
|
| 50 |
stable_path = os.path.join("/tmp", os.path.basename(local_path))
|
|
|
|
| 51 |
try:
|
| 52 |
shutil.copy(local_path, stable_path)
|
| 53 |
local_path = stable_path
|
| 54 |
except Exception:
|
|
|
|
| 55 |
pass
|
| 56 |
|
| 57 |
-
# Build public Gradio URL
|
| 58 |
base = str(request.base_url).rstrip("/")
|
| 59 |
return f"{base}/gradio_api/file={local_path}"
|
| 60 |
|
| 61 |
|
| 62 |
# ---------------------
|
| 63 |
-
# MAIN CHAT HANDLER
|
| 64 |
# ---------------------
|
| 65 |
def chat_send_stream(user_msg, history, file_url):
|
| 66 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 67 |
if history is None:
|
| 68 |
history = []
|
| 69 |
|
| 70 |
-
# Append the user
|
| 71 |
history.append({"role": "user", "content": user_msg})
|
| 72 |
|
| 73 |
-
# Build OpenAI
|
| 74 |
messages = [{"role": "system", "content": SYSTEM_PROMPT}]
|
| 75 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 76 |
|
| 77 |
-
# Inject file context
|
| 78 |
-
final_user_msg = user_msg
|
| 79 |
if file_url:
|
| 80 |
-
|
| 81 |
-
|
| 82 |
-
messages[-1] = {"role": "user", "content": final_user_msg}
|
| 83 |
-
|
| 84 |
-
# ----------------------------------
|
| 85 |
-
# PHASE 1 — TOOL RESOLUTION
|
| 86 |
-
# ----------------------------------
|
| 87 |
-
tool_phase = client.responses.create(
|
| 88 |
-
model=MODEL_FAST,
|
| 89 |
-
reasoning={"effort": "low"},
|
| 90 |
-
tools=TOOLS,
|
| 91 |
-
instructions=SYSTEM_PROMPT,
|
| 92 |
-
input=messages,
|
| 93 |
-
)
|
| 94 |
-
|
| 95 |
-
tool_feedback = []
|
| 96 |
-
|
| 97 |
-
if tool_phase.output:
|
| 98 |
-
for item in tool_phase.output:
|
| 99 |
-
if item.type == "tool_call":
|
| 100 |
-
tool_feedback.append(f"🛠️ Used tool `{item.name}`.")
|
| 101 |
-
elif item.type == "tool_result":
|
| 102 |
-
tool_feedback.append(str(item.content))
|
| 103 |
-
|
| 104 |
-
if not tool_feedback:
|
| 105 |
-
tool_feedback.append("No MCP tools needed.")
|
| 106 |
-
|
| 107 |
-
else:
|
| 108 |
-
tool_feedback.append("No MCP tools needed.")
|
| 109 |
-
|
| 110 |
-
# Add assistant message with tool feedback
|
| 111 |
-
history.append({"role": "assistant", "content": "\n".join(tool_feedback)})
|
| 112 |
-
|
| 113 |
-
yield history
|
| 114 |
-
|
| 115 |
-
# ----------------------------------
|
| 116 |
-
# PHASE 2 — STREAMING FINAL ANSWER
|
| 117 |
-
# ----------------------------------
|
| 118 |
-
final_msg = history[-1]["content"] + "\n\n"
|
| 119 |
-
history[-1]["content"] = final_msg
|
| 120 |
-
|
| 121 |
-
stream = client.responses.create(
|
| 122 |
-
model=MODEL_STREAM,
|
| 123 |
-
reasoning={"effort": "low"},
|
| 124 |
-
instructions=SYSTEM_PROMPT,
|
| 125 |
-
input=messages + [history[-1]],
|
| 126 |
-
stream=True,
|
| 127 |
-
)
|
| 128 |
-
|
| 129 |
-
for ev in stream:
|
| 130 |
-
if ev.type == "response.output_text.delta":
|
| 131 |
-
final_msg += ev.delta
|
| 132 |
-
history[-1]["content"] = final_msg
|
| 133 |
-
yield history
|
| 134 |
-
|
| 135 |
-
elif ev.type == "response.completed":
|
| 136 |
-
break
|
| 137 |
-
|
| 138 |
-
stream.close()
|
| 139 |
-
|
| 140 |
-
|
| 141 |
-
# ---------------------
|
| 142 |
-
# GRADIO UI
|
| 143 |
-
# ---------------------
|
| 144 |
-
with gr.Blocks(title="MCP + GPT-5 — Fast Streaming MLOps Agent") as demo:
|
| 145 |
-
gr.Markdown("""
|
| 146 |
-
# 🚀 AI-Driven MLOps Agent (MCP-Powered)
|
| 147 |
-
- Upload a CSV file
|
| 148 |
-
- Tools resolve instantly
|
| 149 |
-
- Final answer streams smoothly
|
| 150 |
-
""")
|
| 151 |
-
|
| 152 |
-
file_state = gr.State()
|
| 153 |
-
|
| 154 |
-
uploader = gr.File(label="Upload CSV file", type="filepath", file_count="single")
|
| 155 |
-
|
| 156 |
-
uploader.change(handle_upload, inputs=[uploader], outputs=[file_state])
|
| 157 |
-
|
| 158 |
-
chatbot = gr.Chatbot(label="Chat")
|
| 159 |
-
msg = gr.Textbox(label="Message")
|
| 160 |
-
send = gr.Button("Send")
|
| 161 |
-
|
| 162 |
-
send.click(
|
| 163 |
-
chat_send_stream,
|
| 164 |
-
inputs=[msg, chatbot, file_state],
|
| 165 |
-
outputs=[chatbot],
|
| 166 |
-
).then(lambda: "", outputs=[msg])
|
| 167 |
-
|
| 168 |
-
msg.submit(
|
| 169 |
-
chat_send_stream,
|
| 170 |
-
inputs=[msg, chatbot, file_state],
|
| 171 |
-
outputs=[chatbot],
|
| 172 |
-
).then(lambda: "", outputs=[msg])
|
| 173 |
-
|
| 174 |
-
|
| 175 |
-
if __name__ == "__main__":
|
| 176 |
-
demo.queue().launch(
|
| 177 |
-
allowed_paths=["/tmp"],
|
| 178 |
-
show_error=True,
|
| 179 |
-
quiet=True,
|
| 180 |
-
)
|
|
|
|
| 4 |
|
| 5 |
import os
|
| 6 |
import shutil
|
|
|
|
| 7 |
import gradio as gr
|
| 8 |
from openai import OpenAI
|
| 9 |
|
|
|
|
| 12 |
# ---------------------
|
| 13 |
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
|
| 14 |
MCP_SERVER_URL = "https://mcp-1st-birthday-auto-deployer.hf.space/gradio_api/mcp/"
|
| 15 |
+
|
| 16 |
+
MODEL_FAST = "gpt-5-mini" # for tool resolution / MCP calls
|
| 17 |
+
MODEL_STREAM = "gpt-5.1" # for final streaming reply
|
| 18 |
|
| 19 |
client = OpenAI(api_key=OPENAI_API_KEY)
|
| 20 |
|
| 21 |
+
# ---------------------
|
| 22 |
+
# SYSTEM PROMPT
|
| 23 |
+
# ---------------------
|
| 24 |
SYSTEM_PROMPT = """
|
| 25 |
+
You are a fast MLOps automation assistant equipped with remote MCP tools
|
| 26 |
+
for dataset analysis, model training, evaluation, and deployment.
|
| 27 |
+
|
| 28 |
+
Rules:
|
| 29 |
+
- Use MCP tools when they directly address the user's request.
|
| 30 |
+
- Treat the uploaded CSV file URL as the source of truth. Never modify it.
|
| 31 |
+
- Never hallucinate tool names, arguments, or fields.
|
| 32 |
+
- Keep your internal reasoning hidden.
|
| 33 |
+
- Keep responses short, direct, and practical.
|
| 34 |
+
|
| 35 |
+
Workflow:
|
| 36 |
+
1) Decide if a tool is needed for the request.
|
| 37 |
+
2) If needed, call the correct MCP tool with the exact schema.
|
| 38 |
+
3) After tools complete, give a concise summary in plain language.
|
| 39 |
+
4) If no tool is needed, answer directly and briefly.
|
| 40 |
"""
|
| 41 |
|
| 42 |
# ---------------------
|
| 43 |
+
# NATIVE MCP CONNECTOR (HTTP STREAMING)
|
| 44 |
# ---------------------
|
| 45 |
TOOLS = [
|
| 46 |
{
|
| 47 |
"type": "mcp",
|
| 48 |
"server_label": "deploy_tools",
|
| 49 |
+
"server_url": MCP_SERVER_URL, # HTTP streaming MCP server
|
|
|
|
| 50 |
}
|
| 51 |
]
|
| 52 |
|
|
|
|
| 55 |
# FILE UPLOAD HANDLER
|
| 56 |
# ---------------------
|
| 57 |
def handle_upload(file_obj, request: gr.Request):
|
| 58 |
+
"""
|
| 59 |
+
- Persist uploaded file to a stable /tmp path
|
| 60 |
+
- Return a public URL that the MCP tools can use directly
|
| 61 |
+
"""
|
| 62 |
if file_obj is None:
|
| 63 |
return None
|
| 64 |
|
|
|
|
| 65 |
local_path = file_obj.name
|
| 66 |
stable_path = os.path.join("/tmp", os.path.basename(local_path))
|
| 67 |
+
|
| 68 |
try:
|
| 69 |
shutil.copy(local_path, stable_path)
|
| 70 |
local_path = stable_path
|
| 71 |
except Exception:
|
| 72 |
+
# If copy fails, still try original path
|
| 73 |
pass
|
| 74 |
|
|
|
|
| 75 |
base = str(request.base_url).rstrip("/")
|
| 76 |
return f"{base}/gradio_api/file={local_path}"
|
| 77 |
|
| 78 |
|
| 79 |
# ---------------------
|
| 80 |
+
# MAIN CHAT HANDLER (STREAMING)
|
| 81 |
# ---------------------
|
| 82 |
def chat_send_stream(user_msg, history, file_url):
|
| 83 |
+
"""
|
| 84 |
+
2-phase pipeline:
|
| 85 |
+
PHASE 1: Non-streaming tool resolution using MODEL_FAST
|
| 86 |
+
PHASE 2: Streaming final answer using MODEL_STREAM
|
| 87 |
+
Gradio Chatbot expects: list[{"role": "...", "content": "..."}]
|
| 88 |
+
"""
|
| 89 |
+
|
| 90 |
+
# Ensure history is list[dict(role, content)]
|
| 91 |
if history is None:
|
| 92 |
history = []
|
| 93 |
|
| 94 |
+
# Append the user message to the UI history first
|
| 95 |
history.append({"role": "user", "content": user_msg})
|
| 96 |
|
| 97 |
+
# ---- Build messages for OpenAI (sanitize away metadata etc.) ----
|
| 98 |
messages = [{"role": "system", "content": SYSTEM_PROMPT}]
|
| 99 |
+
for msg in history:
|
| 100 |
+
role = msg.get("role")
|
| 101 |
+
content = msg.get("content", "")
|
| 102 |
+
if role in ("user", "assistant"):
|
| 103 |
+
messages.append({"role": role, "content": content})
|
| 104 |
|
| 105 |
+
# Inject file context into *last* user message for the model
|
|
|
|
| 106 |
if file_url:
|
| 107 |
+
last_user = messages[-1]
|
| 108 |
+
if last_user["role"] == "user":
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|