Abid Ali Awan commited on
Commit
e01ab7c
·
1 Parent(s): 693bdb2

refactor: Simplify chat message handling in the Gradio application by consolidating user message updates and tool feedback processing, enhancing clarity in message history and improving the overall structure of the chat response pipeline.

Browse files
Files changed (1) hide show
  1. app.py +23 -29
app.py CHANGED
@@ -4,6 +4,7 @@ Gradio + OpenAI MCP Connector — Clean, Fast, Streaming, With File Upload
4
 
5
  import os
6
  import shutil
 
7
  import gradio as gr
8
  from openai import OpenAI
9
 
@@ -62,33 +63,27 @@ def handle_upload(file_obj, request: gr.Request):
62
  # MAIN CHAT HANDLER
63
  # ---------------------
64
  def chat_send_stream(user_msg, history, file_url):
65
- """
66
- 2-phase pipeline:
67
- PHASE 1 ➜ Non-streaming tool resolution using gpt-5-mini
68
- PHASE 2 ➜ Streaming final output using gpt-5
69
- """
70
-
71
  if history is None:
72
  history = []
73
 
74
- # Build message history for OpenAI
 
 
 
75
  messages = [{"role": "system", "content": SYSTEM_PROMPT}]
76
- for u, a in history:
77
- messages.append({"role": "user", "content": u})
78
- if a:
79
- messages.append({"role": "assistant", "content": a})
80
 
81
  # Inject file context
 
82
  if file_url:
83
- user_msg_full = f"[Uploaded CSV file: {file_url}]\n\n{user_msg}"
84
- else:
85
- user_msg_full = user_msg
86
 
87
- messages.append({"role": "user", "content": user_msg_full})
88
 
89
- # -----------------------------
90
  # PHASE 1 — TOOL RESOLUTION
91
- # -----------------------------
92
  tool_phase = client.responses.create(
93
  model=MODEL_FAST,
94
  reasoning={"effort": "low"},
@@ -99,13 +94,12 @@ def chat_send_stream(user_msg, history, file_url):
99
 
100
  tool_feedback = []
101
 
102
- # Detect tool calls (if any)
103
  if tool_phase.output:
104
  for item in tool_phase.output:
105
  if item.type == "tool_call":
106
  tool_feedback.append(f"🛠️ Used tool `{item.name}`.")
107
  elif item.type == "tool_result":
108
- tool_feedback.append(f"{item.content}")
109
 
110
  if not tool_feedback:
111
  tool_feedback.append("No MCP tools needed.")
@@ -113,29 +107,29 @@ def chat_send_stream(user_msg, history, file_url):
113
  else:
114
  tool_feedback.append("No MCP tools needed.")
115
 
116
- # Append tool results to messages before final generation
117
- messages.append({"role": "assistant", "content": "\n".join(tool_feedback)})
118
 
119
- # Yield intermediate tool feedback to the UI
120
- history.append((user_msg, "\n".join(tool_feedback)))
121
  yield history
122
 
123
- # -----------------------------
124
  # PHASE 2 — STREAMING FINAL ANSWER
125
- # -----------------------------
 
 
 
126
  stream = client.responses.create(
127
  model=MODEL_STREAM,
128
  reasoning={"effort": "low"},
129
  instructions=SYSTEM_PROMPT,
130
- input=messages,
131
  stream=True,
132
  )
133
 
134
- final_text = ""
135
  for ev in stream:
136
  if ev.type == "response.output_text.delta":
137
- final_text += ev.delta
138
- history[-1] = (user_msg, "\n".join(tool_feedback) + "\n\n" + final_text)
139
  yield history
140
 
141
  elif ev.type == "response.completed":
 
4
 
5
  import os
6
  import shutil
7
+
8
  import gradio as gr
9
  from openai import OpenAI
10
 
 
63
  # MAIN CHAT HANDLER
64
  # ---------------------
65
  def chat_send_stream(user_msg, history, file_url):
66
+ # history always starts as list of dicts
 
 
 
 
 
67
  if history is None:
68
  history = []
69
 
70
+ # Append the user's message
71
+ history.append({"role": "user", "content": user_msg})
72
+
73
+ # Build OpenAI message history
74
  messages = [{"role": "system", "content": SYSTEM_PROMPT}]
75
+ messages.extend(history)
 
 
 
76
 
77
  # Inject file context
78
+ final_user_msg = user_msg
79
  if file_url:
80
+ final_user_msg = f"[Uploaded CSV file: {file_url}]\n\n{user_msg}"
 
 
81
 
82
+ messages[-1] = {"role": "user", "content": final_user_msg}
83
 
84
+ # ----------------------------------
85
  # PHASE 1 — TOOL RESOLUTION
86
+ # ----------------------------------
87
  tool_phase = client.responses.create(
88
  model=MODEL_FAST,
89
  reasoning={"effort": "low"},
 
94
 
95
  tool_feedback = []
96
 
 
97
  if tool_phase.output:
98
  for item in tool_phase.output:
99
  if item.type == "tool_call":
100
  tool_feedback.append(f"🛠️ Used tool `{item.name}`.")
101
  elif item.type == "tool_result":
102
+ tool_feedback.append(str(item.content))
103
 
104
  if not tool_feedback:
105
  tool_feedback.append("No MCP tools needed.")
 
107
  else:
108
  tool_feedback.append("No MCP tools needed.")
109
 
110
+ # Add assistant message with tool feedback
111
+ history.append({"role": "assistant", "content": "\n".join(tool_feedback)})
112
 
 
 
113
  yield history
114
 
115
+ # ----------------------------------
116
  # PHASE 2 — STREAMING FINAL ANSWER
117
+ # ----------------------------------
118
+ final_msg = history[-1]["content"] + "\n\n"
119
+ history[-1]["content"] = final_msg
120
+
121
  stream = client.responses.create(
122
  model=MODEL_STREAM,
123
  reasoning={"effort": "low"},
124
  instructions=SYSTEM_PROMPT,
125
+ input=messages + [history[-1]],
126
  stream=True,
127
  )
128
 
 
129
  for ev in stream:
130
  if ev.type == "response.output_text.delta":
131
+ final_msg += ev.delta
132
+ history[-1]["content"] = final_msg
133
  yield history
134
 
135
  elif ev.type == "response.completed":