Spaces:
Runtime error
Runtime error
Update presentation_api.py
Browse files- presentation_api.py +31 -15
presentation_api.py
CHANGED
|
@@ -22,28 +22,38 @@ router = APIRouter(
|
|
| 22 |
)
|
| 23 |
|
| 24 |
|
| 25 |
-
@tool
|
| 26 |
def plan(input: dict) -> str:
|
| 27 |
-
"""Create a presentation plan with numbered slides and their descriptions.
|
|
|
|
| 28 |
Args:
|
| 29 |
-
input: Dictionary containing presentation details
|
| 30 |
-
Eg: {"1":"title page for ..", "2":"introduction .."}
|
| 31 |
-
Returns:
|
| 32 |
-
A dictionary with slide numbers as keys and descriptions as values
|
| 33 |
"""
|
| 34 |
return f"Plan created"
|
| 35 |
|
| 36 |
-
@tool
|
| 37 |
def create_slide(slideno: int, content: str) -> str:
|
| 38 |
-
"""Create a single presentation slide.
|
|
|
|
| 39 |
Args:
|
| 40 |
-
slideno: The slide number to create
|
| 41 |
-
content: The content for the slide
|
| 42 |
-
Returns:
|
| 43 |
-
Confirmation of slide creation
|
| 44 |
"""
|
| 45 |
return f"slide {slideno} created"
|
| 46 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 47 |
memory = MemorySaver()
|
| 48 |
model = ChatOpenAI(model="gpt-4o-mini", streaming=True)
|
| 49 |
prompt = ChatPromptTemplate.from_messages([
|
|
@@ -78,7 +88,7 @@ def state_modifier(state) -> list[BaseMessage]:
|
|
| 78 |
# Create the agent with presentation tools
|
| 79 |
agent = create_react_agent(
|
| 80 |
model,
|
| 81 |
-
tools=[plan, create_slide],
|
| 82 |
checkpointer=memory,
|
| 83 |
state_modifier=state_modifier,
|
| 84 |
)
|
|
@@ -114,11 +124,11 @@ async def chat(input_data: ChatInput):
|
|
| 114 |
yield f"{json.dumps({'type': 'token', 'content': content})}\n"
|
| 115 |
|
| 116 |
elif kind == "on_tool_start":
|
| 117 |
-
tool_input = event['data'].get('input', '')
|
| 118 |
yield f"{json.dumps({'type': 'tool_start', 'tool': event['name'], 'input': tool_input})}\n"
|
| 119 |
|
| 120 |
elif kind == "on_tool_end":
|
| 121 |
-
tool_output = event['data'].get('output', '')
|
| 122 |
yield f"{json.dumps({'type': 'tool_end', 'tool': event['name'], 'output': tool_output})}\n"
|
| 123 |
|
| 124 |
return EventSourceResponse(
|
|
@@ -130,3 +140,9 @@ async def chat(input_data: ChatInput):
|
|
| 130 |
async def health_check():
|
| 131 |
return {"status": "healthy"}
|
| 132 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 22 |
)
|
| 23 |
|
| 24 |
|
| 25 |
+
@tool(parse_docstring=True)
|
| 26 |
def plan(input: dict) -> str:
|
| 27 |
+
"""Create a presentation plan with numbered slides and their descriptions. Returns a confirmation message indicating that the plan has been created.
|
| 28 |
+
|
| 29 |
Args:
|
| 30 |
+
input: Dictionary containing presentation details, Example: {"1": "title page for ..", "2": "introduction .."}
|
|
|
|
|
|
|
|
|
|
| 31 |
"""
|
| 32 |
return f"Plan created"
|
| 33 |
|
| 34 |
+
@tool(parse_docstring=True)
|
| 35 |
def create_slide(slideno: int, content: str) -> str:
|
| 36 |
+
"""Create a single presentation slide. Returns a confirmation message indicating that the slide has been created.
|
| 37 |
+
|
| 38 |
Args:
|
| 39 |
+
slideno: The slide number to create.
|
| 40 |
+
content: The content for the slide.
|
|
|
|
|
|
|
| 41 |
"""
|
| 42 |
return f"slide {slideno} created"
|
| 43 |
|
| 44 |
+
@tool(parse_docstring=True)
|
| 45 |
+
def execute_python(expression: str) -> str:
|
| 46 |
+
"""Execute a python mathematic expression. Returns the result of the expression or an error message if execution fails.
|
| 47 |
+
|
| 48 |
+
Args:
|
| 49 |
+
expression: The python expression to execute.
|
| 50 |
+
"""
|
| 51 |
+
try:
|
| 52 |
+
result = eval(expression)
|
| 53 |
+
return f"The result of the expression is {result}"
|
| 54 |
+
except Exception as e:
|
| 55 |
+
return f"Error executing the expression: {str(e)}"
|
| 56 |
+
|
| 57 |
memory = MemorySaver()
|
| 58 |
model = ChatOpenAI(model="gpt-4o-mini", streaming=True)
|
| 59 |
prompt = ChatPromptTemplate.from_messages([
|
|
|
|
| 88 |
# Create the agent with presentation tools
|
| 89 |
agent = create_react_agent(
|
| 90 |
model,
|
| 91 |
+
tools=[plan, create_slide, execute_python],
|
| 92 |
checkpointer=memory,
|
| 93 |
state_modifier=state_modifier,
|
| 94 |
)
|
|
|
|
| 124 |
yield f"{json.dumps({'type': 'token', 'content': content})}\n"
|
| 125 |
|
| 126 |
elif kind == "on_tool_start":
|
| 127 |
+
tool_input = event['data'].get('input', '').pretty_repr()
|
| 128 |
yield f"{json.dumps({'type': 'tool_start', 'tool': event['name'], 'input': tool_input})}\n"
|
| 129 |
|
| 130 |
elif kind == "on_tool_end":
|
| 131 |
+
tool_output = event['data'].get('output', '').pretty_repr()
|
| 132 |
yield f"{json.dumps({'type': 'tool_end', 'tool': event['name'], 'output': tool_output})}\n"
|
| 133 |
|
| 134 |
return EventSourceResponse(
|
|
|
|
| 140 |
async def health_check():
|
| 141 |
return {"status": "healthy"}
|
| 142 |
|
| 143 |
+
|
| 144 |
+
app = FastAPI()
|
| 145 |
+
app.include_router(router)
|
| 146 |
+
|
| 147 |
+
if __name__ == "__main__":
|
| 148 |
+
uvicorn.run(app, host="0.0.0.0", port=8000)
|