Spaces:
Running
Running
Abid Ali Awan
commited on
Commit
·
d8c5e10
1
Parent(s):
8b9fcae
refactor: Clean up imports, improve code formatting, and enhance readability in the Gradio application for better maintainability.
Browse files
app.py
CHANGED
|
@@ -2,24 +2,28 @@
|
|
| 2 |
Gradio MCP Client for Remote MCP Server - With File Upload
|
| 3 |
"""
|
| 4 |
|
| 5 |
-
import
|
| 6 |
import json
|
|
|
|
| 7 |
import shutil
|
| 8 |
-
import urllib.parse
|
| 9 |
-
import asyncio
|
| 10 |
-
import warnings
|
| 11 |
import signal
|
| 12 |
import sys
|
| 13 |
-
import
|
|
|
|
| 14 |
from contextlib import asynccontextmanager
|
| 15 |
|
| 16 |
-
|
| 17 |
from fastmcp import Client
|
| 18 |
from fastmcp.client.transports import StreamableHttpTransport
|
|
|
|
| 19 |
|
| 20 |
# Suppress deprecation warnings
|
| 21 |
-
warnings.filterwarnings(
|
| 22 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 23 |
|
| 24 |
# Import orchestrator functions (if available)
|
| 25 |
try:
|
|
@@ -252,7 +256,9 @@ async def chat_send_stream(user_msg, history, file_url):
|
|
| 252 |
history_tuples.append(msg)
|
| 253 |
|
| 254 |
# Stream the response using async generator
|
| 255 |
-
async for chunk in run_orchestrated_chat_stream(
|
|
|
|
|
|
|
| 256 |
chunk_type = chunk.get("type", "")
|
| 257 |
chunk_content = chunk.get("content", "")
|
| 258 |
|
|
@@ -341,7 +347,10 @@ with gr.Blocks(title="MCP + GPT-5 mini (file-first)") as demo:
|
|
| 341 |
# Use message format for better streaming support
|
| 342 |
chatbot = gr.Chatbot(
|
| 343 |
label="Chat",
|
| 344 |
-
avatar_images=(
|
|
|
|
|
|
|
|
|
|
| 345 |
layout="bubble",
|
| 346 |
)
|
| 347 |
|
|
@@ -350,8 +359,7 @@ with gr.Blocks(title="MCP + GPT-5 mini (file-first)") as demo:
|
|
| 350 |
|
| 351 |
# Add a toggle for streaming
|
| 352 |
use_streaming = gr.Checkbox(
|
| 353 |
-
label="🚀 Enable streaming (shows tool invocations in real-time)",
|
| 354 |
-
value=True
|
| 355 |
)
|
| 356 |
|
| 357 |
gr.Examples(
|
|
@@ -376,20 +384,14 @@ with gr.Blocks(title="MCP + GPT-5 mini (file-first)") as demo:
|
|
| 376 |
chat_send_stream,
|
| 377 |
inputs=[msg, chatbot, file_url_box],
|
| 378 |
outputs=[chatbot, msg],
|
| 379 |
-
).then(
|
| 380 |
-
lambda: None,
|
| 381 |
-
outputs=[msg]
|
| 382 |
-
)
|
| 383 |
|
| 384 |
# Press Enter to send (streaming)
|
| 385 |
msg.submit(
|
| 386 |
chat_send_stream,
|
| 387 |
inputs=[msg, chatbot, file_url_box],
|
| 388 |
outputs=[chatbot, msg],
|
| 389 |
-
).then(
|
| 390 |
-
lambda: None,
|
| 391 |
-
outputs=[msg]
|
| 392 |
-
)
|
| 393 |
|
| 394 |
|
| 395 |
if __name__ == "__main__":
|
|
@@ -414,8 +416,6 @@ if __name__ == "__main__":
|
|
| 414 |
ssr_mode=False,
|
| 415 |
prevent_thread_lock=True, # Helps with asyncio cleanup
|
| 416 |
show_error=True, # Show errors instead of silent failures
|
| 417 |
-
server_name="0.0.0.0", # Explicit server name
|
| 418 |
-
server_port=7860, # Explicit port
|
| 419 |
)
|
| 420 |
except KeyboardInterrupt:
|
| 421 |
print("\nShutdown requested by user")
|
|
@@ -440,4 +440,4 @@ if __name__ == "__main__":
|
|
| 440 |
try:
|
| 441 |
current_loop.close()
|
| 442 |
except Exception:
|
| 443 |
-
pass # Ignore cleanup errors
|
|
|
|
| 2 |
Gradio MCP Client for Remote MCP Server - With File Upload
|
| 3 |
"""
|
| 4 |
|
| 5 |
+
import asyncio
|
| 6 |
import json
|
| 7 |
+
import os
|
| 8 |
import shutil
|
|
|
|
|
|
|
|
|
|
| 9 |
import signal
|
| 10 |
import sys
|
| 11 |
+
import urllib.parse
|
| 12 |
+
import warnings
|
| 13 |
from contextlib import asynccontextmanager
|
| 14 |
|
| 15 |
+
import gradio as gr
|
| 16 |
from fastmcp import Client
|
| 17 |
from fastmcp.client.transports import StreamableHttpTransport
|
| 18 |
+
from openai import OpenAI
|
| 19 |
|
| 20 |
# Suppress deprecation warnings
|
| 21 |
+
warnings.filterwarnings(
|
| 22 |
+
"ignore", category=DeprecationWarning, module="websockets.legacy"
|
| 23 |
+
)
|
| 24 |
+
warnings.filterwarnings(
|
| 25 |
+
"ignore", category=DeprecationWarning, module="uvicorn.protocols.websockets"
|
| 26 |
+
)
|
| 27 |
|
| 28 |
# Import orchestrator functions (if available)
|
| 29 |
try:
|
|
|
|
| 256 |
history_tuples.append(msg)
|
| 257 |
|
| 258 |
# Stream the response using async generator
|
| 259 |
+
async for chunk in run_orchestrated_chat_stream(
|
| 260 |
+
user_msg, history_tuples, file_url
|
| 261 |
+
):
|
| 262 |
chunk_type = chunk.get("type", "")
|
| 263 |
chunk_content = chunk.get("content", "")
|
| 264 |
|
|
|
|
| 347 |
# Use message format for better streaming support
|
| 348 |
chatbot = gr.Chatbot(
|
| 349 |
label="Chat",
|
| 350 |
+
avatar_images=(
|
| 351 |
+
None,
|
| 352 |
+
"https://huggingface.co/datasets/huggingface/brand-assets/resolve/main/hf-logo.png",
|
| 353 |
+
),
|
| 354 |
layout="bubble",
|
| 355 |
)
|
| 356 |
|
|
|
|
| 359 |
|
| 360 |
# Add a toggle for streaming
|
| 361 |
use_streaming = gr.Checkbox(
|
| 362 |
+
label="🚀 Enable streaming (shows tool invocations in real-time)", value=True
|
|
|
|
| 363 |
)
|
| 364 |
|
| 365 |
gr.Examples(
|
|
|
|
| 384 |
chat_send_stream,
|
| 385 |
inputs=[msg, chatbot, file_url_box],
|
| 386 |
outputs=[chatbot, msg],
|
| 387 |
+
).then(lambda: None, outputs=[msg])
|
|
|
|
|
|
|
|
|
|
| 388 |
|
| 389 |
# Press Enter to send (streaming)
|
| 390 |
msg.submit(
|
| 391 |
chat_send_stream,
|
| 392 |
inputs=[msg, chatbot, file_url_box],
|
| 393 |
outputs=[chatbot, msg],
|
| 394 |
+
).then(lambda: None, outputs=[msg])
|
|
|
|
|
|
|
|
|
|
| 395 |
|
| 396 |
|
| 397 |
if __name__ == "__main__":
|
|
|
|
| 416 |
ssr_mode=False,
|
| 417 |
prevent_thread_lock=True, # Helps with asyncio cleanup
|
| 418 |
show_error=True, # Show errors instead of silent failures
|
|
|
|
|
|
|
| 419 |
)
|
| 420 |
except KeyboardInterrupt:
|
| 421 |
print("\nShutdown requested by user")
|
|
|
|
| 440 |
try:
|
| 441 |
current_loop.close()
|
| 442 |
except Exception:
|
| 443 |
+
pass # Ignore cleanup errors
|