File size: 2,893 Bytes
7952316
bb066a6
7952316
bb066a6
30ea124
c548a89
 
30ea124
bb066a6
 
 
 
7952316
 
 
2fc1a4a
7952316
30ea124
7952316
 
 
 
7fdb083
 
7952316
7fdb083
7952316
 
 
30ea124
 
c548a89
 
 
 
30ea124
c548a89
30ea124
 
c548a89
 
 
 
 
 
 
 
 
 
7fdb083
7952316
 
7fdb083
7952316
 
7fdb083
7952316
7fdb083
7952316
 
 
 
e224b82
7952316
 
bb066a6
 
 
7952316
7fdb083
7952316
 
bb066a6
7952316
 
 
 
 
 
 
 
f344af7
7fdb083
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
import gradio as gr
from openai import OpenAI
import os
import asyncio
from contextlib import AsyncExitStack
from mcp import ClientSession
from mcp.client.streamable_http import streamablehttp_client  

cle_api = os.environ.get("CLE_API_MISTRAL")

# Initialisation du client Mistral (API compatible OpenAI)
clientLLM = OpenAI(api_key=cle_api, base_url="https://api.mistral.ai/v1")

loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)

class MCPClientWrapper:
    def __init__(self):
        self.session = None
        self.exit_stack = None
        self.tools = []
    
    def connect(self, server_url: str) -> str:
        return loop.run_until_complete(self._connect(server_url))
    
    async def _connect(self, server_url: str) -> str:
        if self.exit_stack:
            await self.exit_stack.aclose()
        
        self.exit_stack = AsyncExitStack()
        
        # Utiliser le transport HTTP streamable ou SSE selon ce qui est disponible
        streams = await self.exit_stack.enter_async_context(streamablehttp_client(url=server_url))
        # streams va typiquement donner une paire (transport_read, transport_write) ou similaire
        self.http_read, self.http_write = streams
        
        self.session = await self.exit_stack.enter_async_context(ClientSession(self.http_read, self.http_write))
        await self.session.initialize()
        
        tools_response = await self.session.list_tools()
        self.tools = [
            {
                "name": t.name,
                "description": t.description,
                "input_schema": t.inputSchema
            }
            for t in tools_response.tools
        ]
        tool_names = [t["name"] for t in self.tools]
        return f"Connecté au MCP {server_url}. Outils disponibles : {', '.join(tool_names)}"

clientMCP = MCPClientWrapper()
print(clientMCP.connect("https://huggingface.co/spaces/HackathonCRA/mcp"))
print(clientMCP.tools)

# Chatbot
def chatbot(message, history):
    # Préparer l’historique
    messages = []
    for user_msg, bot_msg in history:
        messages.append({"role": "user", "content": user_msg})
        messages.append({"role": "assistant", "content": bot_msg})
    
    messages.append({"role": "user", "content": message})

    # Appel API Mistral
    response = clientLLM.chat.completions.create(
        model="mistral-small-latest", 
        messages=messages,
        tools=clientMCP.tools  # ⚡ maintenant on injecte directement les tools MCP récupérés
    )

    bot_reply = response.choices[0].message.content.strip()
    history.append(("Vous: " + message, "Bot: " + bot_reply))
    return history, history
    
with gr.Blocks() as demo:
    chatbot_ui = gr.Chatbot(label="ChatBot")
    msg = gr.Textbox(placeholder="Écrivez un message...")

    msg.submit(chatbot, [msg, chatbot_ui], [chatbot_ui, chatbot_ui])

demo.launch(debug=True)