Spaces:
Runtime error
Runtime error
| import uuid | |
| from fastapi import FastAPI | |
| from fastapi.responses import StreamingResponse | |
| from fastapi.middleware.cors import CORSMiddleware | |
| from langchain_core.messages import BaseMessage, HumanMessage, trim_messages | |
| from langchain_core.tools import tool | |
| from langchain_openai import ChatOpenAI | |
| from langgraph.checkpoint.memory import MemorySaver | |
| from langgraph.prebuilt import create_react_agent | |
| from pydantic import BaseModel | |
| from typing import Optional | |
| import json | |
| from sse_starlette.sse import EventSourceResponse | |
| import io | |
| import sys | |
| from contextlib import redirect_stdout, redirect_stderr | |
| from langchain_core.runnables import RunnableConfig | |
| import requests | |
| import uvicorn | |
| import re | |
| from fastapi.staticfiles import StaticFiles | |
| from langchain_core.runnables import RunnableConfig | |
| from langchain_core.prompts import ChatPromptTemplate | |
| from datetime import datetime | |
| from presentation_api import router as presentation_router | |
| from yf_docs import yf_docs | |
| app = FastAPI() | |
| app.include_router(presentation_router) | |
| app.add_middleware( | |
| CORSMiddleware, | |
| allow_origins=["*"], | |
| allow_credentials=True, | |
| allow_methods=["*"], | |
| allow_headers=["*"], | |
| ) | |
| app.mount("/chatui", StaticFiles(directory="static/chatui", html=True), name="index") | |
| class CodeExecutionResult: | |
| def __init__(self, output: str, error: str = None): | |
| self.output = output | |
| self.error = error | |
| API_URL = "https://vps-91587096.vps.ovh.us/api1" | |
| def execute_python(code: str, config: RunnableConfig): | |
| """Execute Python code in an jupyter notebook and return the output. The returned artifacts (if present) are automatically rendered in the UI and visible to the user. Available Libraries: plotly (default charting library),pandas,yfinance,numpy,geopandas,folium | |
| Args: | |
| code: Valid Python code with correct indentation and syntax including necessary imports. | |
| """ | |
| thread_config = config.get("configurable", {}) | |
| session_token = thread_config.get("thread_id", "test") | |
| headers = { | |
| 'accept': 'application/json', | |
| 'Content-Type': 'application/json' | |
| } | |
| data = { | |
| "session_token": session_token, | |
| "code": code | |
| } | |
| try: | |
| response = requests.post( | |
| f'{API_URL}/v0/execute', | |
| headers=headers, | |
| json=data | |
| ) | |
| if response.status_code != 200: | |
| return ( | |
| f"Error: Request failed with status code {response.status_code}. Response: {response.text}", | |
| None | |
| ) | |
| # Get the response JSON | |
| response_json = response.json() | |
| # extract artifacts if they exist | |
| artifacts_data = response_json.get("artifacts_data", {}) | |
| # Create a clean response without artifacts | |
| execution_response = { | |
| "status": response_json.get("status"), | |
| "text": response_json.get("text"), | |
| "error_message": response_json.get("error_message"), | |
| "artifacts": response_json.get("artifacts") | |
| } | |
| return ( | |
| f"Execution completed successfully: {json.dumps(execution_response)}", | |
| {"artifacts_data": artifacts_data} if artifacts_data else None | |
| ) | |
| except Exception as e: | |
| return (f"Error executing code: {str(e)}", None) | |
| save_yf_prompt = """ | |
| # Save downloaded data | |
| tickers = yf.Tickers('MSFT AAPL') | |
| data = tickers.history(period='6mo') | |
| data.to_pickle('stock_data.pkl') | |
| # Load it back later | |
| data = pd.read_pickle('stock_data.pkl') | |
| """ | |
| memory = MemorySaver() | |
| model = ChatOpenAI(model="gpt-4o", streaming=True) | |
| prompt = ChatPromptTemplate.from_messages([ | |
| ("system", f"You are a Data Visualization assistant.You have access to an jupyter notebook with access to internet for python code execution.\ | |
| Your taks is to assist users with your data analysis and visualization expertise. Use only Plotly for creating visualizations and charts (Matplotlib is not available). Generated artifacts\ | |
| are automatically rendered in the UI. Variables do not persist across tool calls, hence save any data to current directory you want to use in the next tool call to a file.(use file_name and do not add file path, as you have only permission to edit current folder) {save_yf_prompt} Today's date is \ | |
| {datetime.now().strftime('%Y-%m-%d')}. Format you responses Beutifully using markdown tables, paragraphs, lists etc. Saved files are not accessible to user.The current folder contains the following files: {{collection_files}} {yf_docs}"), | |
| ("placeholder", "{messages}"), | |
| ]) | |
| def state_modifier(state) -> list[BaseMessage]: | |
| collection_files = "None" | |
| try: | |
| formatted_prompt = prompt.invoke({ | |
| "collection_files": collection_files, | |
| "messages": state["messages"] | |
| }) | |
| print(state["messages"]) | |
| return trim_messages( | |
| formatted_prompt, | |
| token_counter=len, | |
| max_tokens=16000, | |
| strategy="last", | |
| start_on="human", | |
| include_system=True, | |
| allow_partial=False, | |
| ) | |
| except Exception as e: | |
| print(f"Error in state modifier: {str(e)}") | |
| return state["messages"] | |
| # Create the agent with the Python execution tool | |
| agent = create_react_agent( | |
| model, | |
| tools=[execute_python], | |
| checkpointer=memory, | |
| state_modifier=state_modifier, | |
| ) | |
| class ChatInput(BaseModel): | |
| message: str | |
| thread_id: Optional[str] = None | |
| async def chat(input_data: ChatInput): | |
| thread_id = input_data.thread_id or str(uuid.uuid4()) | |
| config = { | |
| "configurable": { | |
| "thread_id": thread_id | |
| } | |
| } | |
| input_message = HumanMessage(content=input_data.message) | |
| async def generate(): | |
| async for event in agent.astream_events( | |
| {"messages": [input_message]}, | |
| config, | |
| version="v2" | |
| ): | |
| kind = event["event"] | |
| if kind == "on_chat_model_stream": | |
| content = event["data"]["chunk"].content | |
| if content: | |
| yield f"{json.dumps({'type': 'token', 'content': content})}\n" | |
| elif kind == "on_tool_start": | |
| tool_input = event['data'].get('input', '') | |
| yield f"{json.dumps({'type': 'tool_start', 'tool': event['name'], 'input': tool_input})}\n" | |
| elif kind == "on_tool_end": | |
| tool_output = event['data'].get('output', '').content | |
| artifact_output = event['data'].get('output', '').artifact.get('artifacts_data') if event['data'].get('output', '').artifact else None | |
| yield f"{json.dumps({'type': 'tool_end', 'tool': event['name'], 'output': tool_output, 'artifacts_data': artifact_output})}\n" | |
| #print(f"{json.dumps({'type': 'tool_end', 'tool': event['name'], 'output': tool_output, 'artifacts_data': artifact_output})}\n") | |
| return EventSourceResponse( | |
| generate(), | |
| media_type="text/event-stream" | |
| ) | |
| async def health_check(): | |
| return {"status": "healthy"} | |
| if __name__ == "__main__": | |
| uvicorn.run(app, host="0.0.0.0", port=7860) |