Spaces:
Sleeping
Sleeping
Update main.py
Browse files
main.py
CHANGED
|
@@ -50,10 +50,11 @@ def execute_python(code: str, config: RunnableConfig):
|
|
| 50 |
code: Valid Python code with correct indentation and syntax including necessary imports.
|
| 51 |
|
| 52 |
Available Libraries:
|
| 53 |
-
# Use
|
| 54 |
|
| 55 |
-
|
| 56 |
pandas
|
|
|
|
| 57 |
groq
|
| 58 |
yfinance
|
| 59 |
numpy
|
|
@@ -96,33 +97,28 @@ def execute_python(code: str, config: RunnableConfig):
|
|
| 96 |
memory = MemorySaver()
|
| 97 |
model = ChatOpenAI(model="gpt-4o-mini", streaming=True)
|
| 98 |
prompt = ChatPromptTemplate.from_messages([
|
| 99 |
-
("system", f"You are a Data Visualization assistant.You have access to a jupyter client with access to internet for python code execution
|
| 100 |
-
Your taks is to assist users with your data analysis and visualization expertise. Use Plotly for creating visualizations. Today's date is \
|
| 101 |
-
{datetime.now().strftime('%Y-%m-%d')}. The current folder contains the following files: {{collection_files}}"),
|
| 102 |
("placeholder", "{messages}"),
|
| 103 |
])
|
| 104 |
|
| 105 |
def state_modifier(state) -> list[BaseMessage]:
|
| 106 |
collection_files = "None"
|
| 107 |
-
|
| 108 |
-
|
| 109 |
-
|
| 110 |
-
|
| 111 |
-
|
| 112 |
-
|
| 113 |
-
|
| 114 |
-
|
| 115 |
-
|
| 116 |
-
|
| 117 |
-
|
| 118 |
-
|
| 119 |
-
|
| 120 |
-
|
| 121 |
-
|
| 122 |
-
|
| 123 |
-
except Exception as e:
|
| 124 |
-
print(f"Error in state modifier: {str(e)}")
|
| 125 |
-
return state["messages"]
|
| 126 |
|
| 127 |
# Create the agent with the Python execution tool
|
| 128 |
agent = create_react_agent(
|
|
@@ -147,56 +143,52 @@ async def chat(input_data: ChatInput):
|
|
| 147 |
}
|
| 148 |
|
| 149 |
input_message = HumanMessage(content=input_data.message)
|
| 150 |
-
try:
|
| 151 |
-
async def generate():
|
| 152 |
-
async for event in agent.astream_events(
|
| 153 |
-
{"messages": [input_message]},
|
| 154 |
-
config,
|
| 155 |
-
version="v2"
|
| 156 |
-
):
|
| 157 |
-
kind = event["event"]
|
| 158 |
-
|
| 159 |
-
if kind == "on_chat_model_stream":
|
| 160 |
-
content = event["data"]["chunk"].content
|
| 161 |
-
if content:
|
| 162 |
-
yield f"{json.dumps({'type': 'token', 'content': content})}\n"
|
| 163 |
-
|
| 164 |
-
if kind == "on_tool_start":
|
| 165 |
-
print(f"Debug - Tool being called: {event['name']}") # Add this debug line
|
| 166 |
-
tool_input = event['data'].get('input', '')
|
| 167 |
-
yield f"{json.dumps({'type': 'tool_start', 'tool': event['name'], 'input': tool_input})}\n"
|
| 168 |
-
|
| 169 |
-
elif kind == "on_tool_end":
|
| 170 |
-
tool_output = event['data'].get('output', '').content
|
| 171 |
-
#print(type(tool_output))
|
| 172 |
-
#print(dir(tool_output))
|
| 173 |
-
#print the keys
|
| 174 |
-
pattern = r'data: (.*?)\ndata:'
|
| 175 |
-
match = re.search(pattern, tool_output)
|
| 176 |
-
print(tool_output)
|
| 177 |
|
| 178 |
-
|
| 179 |
-
|
| 180 |
-
|
| 181 |
-
|
| 182 |
-
|
| 183 |
-
|
| 184 |
-
|
| 185 |
-
|
| 186 |
-
|
| 187 |
-
|
| 188 |
-
|
| 189 |
-
|
| 190 |
-
|
| 191 |
-
|
| 192 |
-
|
| 193 |
-
|
| 194 |
-
|
| 195 |
-
|
| 196 |
-
|
| 197 |
-
|
| 198 |
-
|
| 199 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 200 |
|
| 201 |
@app.get("/health")
|
| 202 |
async def health_check():
|
|
|
|
| 50 |
code: Valid Python code with correct indentation and syntax including necessary imports.
|
| 51 |
|
| 52 |
Available Libraries:
|
| 53 |
+
# Use plotly as the default charting library
|
| 54 |
|
| 55 |
+
matplotlib
|
| 56 |
pandas
|
| 57 |
+
plotly
|
| 58 |
groq
|
| 59 |
yfinance
|
| 60 |
numpy
|
|
|
|
| 97 |
memory = MemorySaver()
|
| 98 |
model = ChatOpenAI(model="gpt-4o-mini", streaming=True)
|
| 99 |
prompt = ChatPromptTemplate.from_messages([
|
| 100 |
+
("system", f"You are a Data Visualization assistant.You have access to a jupyter client with access to internet for python code execution. Your taks is to assist users with your data analysis and visualization expertise. Today's date is {datetime.now().strftime('%Y-%m-%d')}. The current folder contains the following files: {{collection_files}}"),
|
|
|
|
|
|
|
| 101 |
("placeholder", "{messages}"),
|
| 102 |
])
|
| 103 |
|
| 104 |
def state_modifier(state) -> list[BaseMessage]:
|
| 105 |
collection_files = "None"
|
| 106 |
+
# Format the prompt with the current state
|
| 107 |
+
formatted_prompt = prompt.invoke({
|
| 108 |
+
"collection_files": collection_files,
|
| 109 |
+
"messages": state["messages"]
|
| 110 |
+
})
|
| 111 |
+
|
| 112 |
+
# Trim the messages
|
| 113 |
+
return trim_messages(
|
| 114 |
+
formatted_prompt,
|
| 115 |
+
token_counter=len,
|
| 116 |
+
max_tokens=16000,
|
| 117 |
+
strategy="last",
|
| 118 |
+
start_on="human",
|
| 119 |
+
include_system=True,
|
| 120 |
+
allow_partial=False,
|
| 121 |
+
)
|
|
|
|
|
|
|
|
|
|
| 122 |
|
| 123 |
# Create the agent with the Python execution tool
|
| 124 |
agent = create_react_agent(
|
|
|
|
| 143 |
}
|
| 144 |
|
| 145 |
input_message = HumanMessage(content=input_data.message)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 146 |
|
| 147 |
+
async def generate():
|
| 148 |
+
async for event in agent.astream_events(
|
| 149 |
+
{"messages": [input_message]},
|
| 150 |
+
config,
|
| 151 |
+
version="v2"
|
| 152 |
+
):
|
| 153 |
+
kind = event["event"]
|
| 154 |
+
|
| 155 |
+
if kind == "on_chat_model_stream":
|
| 156 |
+
content = event["data"]["chunk"].content
|
| 157 |
+
if content:
|
| 158 |
+
yield f"{json.dumps({'type': 'token', 'content': content})}\n"
|
| 159 |
+
|
| 160 |
+
elif kind == "on_tool_start":
|
| 161 |
+
tool_input = event['data'].get('input', '')
|
| 162 |
+
yield f"{json.dumps({'type': 'tool_start', 'tool': event['name'], 'input': tool_input})}\n"
|
| 163 |
+
|
| 164 |
+
elif kind == "on_tool_end":
|
| 165 |
+
tool_output = event['data'].get('output', '').content
|
| 166 |
+
#print(type(tool_output))
|
| 167 |
+
#print(dir(tool_output))
|
| 168 |
+
#print the keys
|
| 169 |
+
pattern = r'data: (.*?)\ndata:'
|
| 170 |
+
match = re.search(pattern, tool_output)
|
| 171 |
+
print(tool_output)
|
| 172 |
+
|
| 173 |
+
if match:
|
| 174 |
+
tool_output_json = match.group(1).strip()
|
| 175 |
+
try:
|
| 176 |
+
tool_output = json.loads(tool_output_json)
|
| 177 |
+
if "artifacts" in tool_output:
|
| 178 |
+
for artifact in tool_output["artifacts"]:
|
| 179 |
+
artifact_content = requests.get(f"{API_URL}/artifact/{artifact['artifact_id']}").content
|
| 180 |
+
print(artifact_content)
|
| 181 |
+
tool_output["artifacts"][artifact["artifact_id"]] = artifact_content
|
| 182 |
+
except Exception as e:
|
| 183 |
+
print(e)
|
| 184 |
+
print("Error parsing tool output as json: ", tool_output)
|
| 185 |
+
else:
|
| 186 |
+
print("No match found in tool output")
|
| 187 |
+
yield f"{json.dumps({'type': 'tool_end', 'tool': event['name'], 'output': tool_output})}\n"
|
| 188 |
+
return EventSourceResponse(
|
| 189 |
+
generate(),
|
| 190 |
+
media_type="text/event-stream"
|
| 191 |
+
)
|
| 192 |
|
| 193 |
@app.get("/health")
|
| 194 |
async def health_check():
|