Spaces:
Running
Running
Abid Ali Awan
commited on
Commit
·
ae2cddd
1
Parent(s):
17424a1
refactor: Enhance chat handling in Gradio application by implementing textbox interactivity during streaming phases, updating UI elements for improved user experience, and refining the application title for clarity.
Browse files
app.py
CHANGED
|
@@ -150,7 +150,7 @@ def should_use_tools(user_msg: str) -> bool:
|
|
| 150 |
|
| 151 |
|
| 152 |
# -------------------------
|
| 153 |
-
# Main chat handler (streaming)
|
| 154 |
# -------------------------
|
| 155 |
|
| 156 |
|
|
@@ -165,6 +165,7 @@ def chat_send_stream(user_msg, history, file_url):
|
|
| 165 |
Phase 2: streaming final answer via Responses API (no tools)
|
| 166 |
- Keeps full chat history so follow-ups work.
|
| 167 |
- Shows status/progress messages in the UI when tools are used.
|
|
|
|
| 168 |
"""
|
| 169 |
|
| 170 |
# UI history (what Gradio displays)
|
|
@@ -186,7 +187,11 @@ def chat_send_stream(user_msg, history, file_url):
|
|
| 186 |
if not use_tools:
|
| 187 |
# Add a small status bubble then stream
|
| 188 |
history.append({"role": "assistant", "content": "✏️ Generating answer..."})
|
| 189 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 190 |
|
| 191 |
# Build input text for Responses API
|
| 192 |
input_text = (
|
|
@@ -208,10 +213,18 @@ def chat_send_stream(user_msg, history, file_url):
|
|
| 208 |
if event.type == "response.output_text.delta":
|
| 209 |
final_text += event.delta
|
| 210 |
history[-1]["content"] = final_text
|
| 211 |
-
yield
|
|
|
|
|
|
|
|
|
|
| 212 |
elif event.type == "response.completed":
|
| 213 |
break
|
| 214 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 215 |
return
|
| 216 |
|
| 217 |
# -------------------------
|
|
@@ -229,7 +242,11 @@ def chat_send_stream(user_msg, history, file_url):
|
|
| 229 |
),
|
| 230 |
}
|
| 231 |
)
|
| 232 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 233 |
return
|
| 234 |
|
| 235 |
# User message for the model includes the CSV URL
|
|
@@ -246,7 +263,11 @@ def chat_send_stream(user_msg, history, file_url):
|
|
| 246 |
"content": "⏳ Analyzing your request and selecting MCP tools...",
|
| 247 |
}
|
| 248 |
)
|
| 249 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 250 |
|
| 251 |
# Build a single string input for the tool phase
|
| 252 |
tool_phase_input = (
|
|
@@ -274,7 +295,11 @@ def chat_send_stream(user_msg, history, file_url):
|
|
| 274 |
"role": "assistant",
|
| 275 |
"content": "✅ MCP tools finished. Preparing explanation...",
|
| 276 |
}
|
| 277 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 278 |
|
| 279 |
# -------------------------
|
| 280 |
# Phase 2: Final streaming explanation
|
|
@@ -306,24 +331,32 @@ def chat_send_stream(user_msg, history, file_url):
|
|
| 306 |
if event.type == "response.output_text.delta":
|
| 307 |
final_text += event.delta
|
| 308 |
history[-1]["content"] = final_text
|
| 309 |
-
yield
|
|
|
|
|
|
|
|
|
|
| 310 |
elif event.type == "response.completed":
|
| 311 |
break
|
| 312 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 313 |
|
| 314 |
# -------------------------
|
| 315 |
# Gradio UI
|
| 316 |
# -------------------------
|
| 317 |
|
| 318 |
-
with gr.Blocks(title="
|
| 319 |
gr.Markdown(
|
| 320 |
"""
|
| 321 |
-
#
|
| 322 |
-
|
| 323 |
-
-
|
| 324 |
-
-
|
| 325 |
-
-
|
| 326 |
-
- Final answers stream token by token.
|
| 327 |
"""
|
| 328 |
)
|
| 329 |
|
|
@@ -355,24 +388,19 @@ with gr.Blocks(title="MCP + GPT-5 mini - Streaming MLOps Agent") as demo:
|
|
| 355 |
interactive=True,
|
| 356 |
placeholder="Say hi, or ask me to analyze / train / deploy on your dataset...",
|
| 357 |
)
|
| 358 |
-
send = gr.Button("Send", interactive=True)
|
| 359 |
-
|
| 360 |
-
send.click(
|
| 361 |
-
chat_send_stream,
|
| 362 |
-
inputs=[msg, chatbot, file_url_state],
|
| 363 |
-
outputs=[chatbot],
|
| 364 |
-
).then(lambda: "", outputs=[msg])
|
| 365 |
|
|
|
|
| 366 |
msg.submit(
|
| 367 |
chat_send_stream,
|
| 368 |
inputs=[msg, chatbot, file_url_state],
|
| 369 |
-
outputs=[chatbot],
|
| 370 |
-
)
|
| 371 |
-
|
| 372 |
|
| 373 |
if __name__ == "__main__":
|
| 374 |
demo.queue().launch(
|
|
|
|
| 375 |
allowed_paths=["/tmp"],
|
| 376 |
ssr_mode=False,
|
| 377 |
show_error=True,
|
| 378 |
-
|
|
|
|
|
|
| 150 |
|
| 151 |
|
| 152 |
# -------------------------
|
| 153 |
+
# Main chat handler (streaming + disabling textbox)
|
| 154 |
# -------------------------
|
| 155 |
|
| 156 |
|
|
|
|
| 165 |
Phase 2: streaming final answer via Responses API (no tools)
|
| 166 |
- Keeps full chat history so follow-ups work.
|
| 167 |
- Shows status/progress messages in the UI when tools are used.
|
| 168 |
+
- Disables the textbox during work, re-enables at the end.
|
| 169 |
"""
|
| 170 |
|
| 171 |
# UI history (what Gradio displays)
|
|
|
|
| 187 |
if not use_tools:
|
| 188 |
# Add a small status bubble then stream
|
| 189 |
history.append({"role": "assistant", "content": "✏️ Generating answer..."})
|
| 190 |
+
# Disable textbox while generating
|
| 191 |
+
yield (
|
| 192 |
+
history,
|
| 193 |
+
gr.update(interactive=False),
|
| 194 |
+
)
|
| 195 |
|
| 196 |
# Build input text for Responses API
|
| 197 |
input_text = (
|
|
|
|
| 213 |
if event.type == "response.output_text.delta":
|
| 214 |
final_text += event.delta
|
| 215 |
history[-1]["content"] = final_text
|
| 216 |
+
yield (
|
| 217 |
+
history,
|
| 218 |
+
gr.update(interactive=False),
|
| 219 |
+
)
|
| 220 |
elif event.type == "response.completed":
|
| 221 |
break
|
| 222 |
|
| 223 |
+
# Re-enable textbox at the end
|
| 224 |
+
yield (
|
| 225 |
+
history,
|
| 226 |
+
gr.update(interactive=True, value=""),
|
| 227 |
+
)
|
| 228 |
return
|
| 229 |
|
| 230 |
# -------------------------
|
|
|
|
| 242 |
),
|
| 243 |
}
|
| 244 |
)
|
| 245 |
+
# Keep textbox enabled because nothing heavy is happening
|
| 246 |
+
yield (
|
| 247 |
+
history,
|
| 248 |
+
gr.update(interactive=True),
|
| 249 |
+
)
|
| 250 |
return
|
| 251 |
|
| 252 |
# User message for the model includes the CSV URL
|
|
|
|
| 263 |
"content": "⏳ Analyzing your request and selecting MCP tools...",
|
| 264 |
}
|
| 265 |
)
|
| 266 |
+
# Disable textbox while tools run
|
| 267 |
+
yield (
|
| 268 |
+
history,
|
| 269 |
+
gr.update(interactive=False),
|
| 270 |
+
)
|
| 271 |
|
| 272 |
# Build a single string input for the tool phase
|
| 273 |
tool_phase_input = (
|
|
|
|
| 295 |
"role": "assistant",
|
| 296 |
"content": "✅ MCP tools finished. Preparing explanation...",
|
| 297 |
}
|
| 298 |
+
# Keep textbox disabled (we're about to stream final answer)
|
| 299 |
+
yield (
|
| 300 |
+
history,
|
| 301 |
+
gr.update(interactive=False),
|
| 302 |
+
)
|
| 303 |
|
| 304 |
# -------------------------
|
| 305 |
# Phase 2: Final streaming explanation
|
|
|
|
| 331 |
if event.type == "response.output_text.delta":
|
| 332 |
final_text += event.delta
|
| 333 |
history[-1]["content"] = final_text
|
| 334 |
+
yield (
|
| 335 |
+
history,
|
| 336 |
+
gr.update(interactive=False),
|
| 337 |
+
)
|
| 338 |
elif event.type == "response.completed":
|
| 339 |
break
|
| 340 |
|
| 341 |
+
# Re-enable textbox at the end, and clear it
|
| 342 |
+
yield (
|
| 343 |
+
history,
|
| 344 |
+
gr.update(interactive=True, value=""),
|
| 345 |
+
)
|
| 346 |
+
|
| 347 |
|
| 348 |
# -------------------------
|
| 349 |
# Gradio UI
|
| 350 |
# -------------------------
|
| 351 |
|
| 352 |
+
with gr.Blocks(title="Streaming MLOps Agent") as demo:
|
| 353 |
gr.Markdown(
|
| 354 |
"""
|
| 355 |
+
# 🧠 Smart MLOps Agent
|
| 356 |
+
|
| 357 |
+
- 💬 Chat naturally, even just “hey”
|
| 358 |
+
- 📂 Upload CSVs for analysis, training, and deployment
|
| 359 |
+
- ⚡ See live tool status and streaming answers
|
|
|
|
| 360 |
"""
|
| 361 |
)
|
| 362 |
|
|
|
|
| 388 |
interactive=True,
|
| 389 |
placeholder="Say hi, or ask me to analyze / train / deploy on your dataset...",
|
| 390 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 391 |
|
| 392 |
+
# Only Enter/Return sends messages; no Send button
|
| 393 |
msg.submit(
|
| 394 |
chat_send_stream,
|
| 395 |
inputs=[msg, chatbot, file_url_state],
|
| 396 |
+
outputs=[chatbot, msg],
|
| 397 |
+
)
|
|
|
|
| 398 |
|
| 399 |
if __name__ == "__main__":
|
| 400 |
demo.queue().launch(
|
| 401 |
+
theme=gr.themes.Soft(),
|
| 402 |
allowed_paths=["/tmp"],
|
| 403 |
ssr_mode=False,
|
| 404 |
show_error=True,
|
| 405 |
+
quiet=True,
|
| 406 |
+
)
|