rkihacker commited on
Commit
7c5229e
·
verified ·
1 Parent(s): c67aeae

Create main.py

Browse files
Files changed (1) hide show
  1. main.py +122 -0
main.py ADDED
@@ -0,0 +1,122 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import httpx
2
+ from fastapi import FastAPI, HTTPException
3
+ from pydantic import BaseModel, Field
4
+ from typing import List
5
+
6
+ app = FastAPI(
7
+ title="Perplexity-like API",
8
+ description="An API that uses web search to answer questions with citations.",
9
+ version="1.0.0"
10
+ )
11
+
12
+ # --- API Configuration ---
13
+ TYPEGPT_API_URL = "https://api.typegpt.net/v1/chat/completions"
14
+ TYPEGPT_API_KEY = "sk-oPdaZC7n1JlDq0sJ5NSSyHe7sYaeAXeEuj0wX4Lk8hlOGPF8"
15
+ SEARCH_API_URL = "https://searchapi.snapzion.com/search"
16
+
17
+
18
+ # --- System Prompt ---
19
+ # This prompt guides the AI to behave like a factual research assistant.
20
+ SYSTEM_PROMPT = """
21
+ You are an expert AI research assistant. Your primary goal is to provide accurate, comprehensive, and helpful answers based ONLY on the provided search results.
22
+
23
+ Instructions:
24
+ 1. Carefully analyze the user's query and the provided search results.
25
+ 2. Synthesize an answer directly from the information found in the search results.
26
+ 3. For every statement or piece of information you provide, you MUST cite the corresponding search result number in the format `[<number>]`.
27
+ 4. If multiple sources support a statement, you can cite them like `[1, 2]`.
28
+ 5. If the search results do not contain enough information to answer the query, you must explicitly state that you could not find the information in the provided context.
29
+ 6. Do not use any prior knowledge or information outside of the provided search results.
30
+ 7. Structure your response in a clear and easy-to-read format. Start with a direct answer, followed by a more detailed explanation.
31
+ """
32
+
33
+ # --- Pydantic Models for API Request/Response ---
34
+
35
+ class ChatMessage(BaseModel):
36
+ role: str
37
+ content: str
38
+
39
+ class ChatCompletionRequest(BaseModel):
40
+ messages: List[ChatMessage] = Field(..., example=[{"role": "user", "content": "What are the benefits of learning Python?"}])
41
+ model: str = "gpt-4.1-mini" # Model is fixed but included for compatibility
42
+
43
+ class Choice(BaseModel):
44
+ message: ChatMessage
45
+
46
+ class ChatCompletionResponse(BaseModel):
47
+ choices: List[Choice]
48
+ search_results: List[dict]
49
+
50
+
51
+ # --- API Endpoint ---
52
+
53
+ @app.post("/v1/chat/completions", response_model=ChatCompletionResponse)
54
+ async def chat_completions(request: ChatCompletionRequest):
55
+ """
56
+ Takes a user's chat history, performs a web search based on the latest query,
57
+ and uses the TypeGPT model to generate a factual, cited response.
58
+ """
59
+ if not request.messages or request.messages[-1].role != "user":
60
+ raise HTTPException(status_code=400, detail="Invalid request. The last message must be from the 'user'.")
61
+
62
+ user_query = request.messages[-1].content
63
+
64
+ async with httpx.AsyncClient(timeout=30.0) as client:
65
+ # 1. Perform a web search
66
+ try:
67
+ search_params = {"keywords": user_query}
68
+ search_response = await client.get(SEARCH_API_URL, params=search_params)
69
+ search_response.raise_for_status()
70
+ search_results = search_response.json()
71
+ except httpx.RequestError as e:
72
+ raise HTTPException(status_code=502, detail=f"Error calling the search API: {e}")
73
+ except Exception as e:
74
+ raise HTTPException(status_code=500, detail=f"Failed to process search results: {e}")
75
+
76
+ # 2. Format search results into a context for the language model
77
+ context = ""
78
+ for i, result in enumerate(search_results[:7]): # Use top 7 results for richer context
79
+ context += f"Source [{i+1}]:\nTitle: {result.get('title', 'N/A')}\nSnippet: {result.get('snippet', '')}\nURL: {result.get('url', 'N/A')}\n\n"
80
+
81
+ # 3. Construct the prompt for the language model
82
+ final_prompt = f"""
83
+ **Search Results:**
84
+ {context}
85
+
86
+ **User Query:** "{user_query}"
87
+
88
+ Please provide a comprehensive answer based on the search results above, following all instructions.
89
+ """
90
+
91
+ # 4. Get the response from the TypeGPT language model
92
+ try:
93
+ headers = {
94
+ "Authorization": f"Bearer {TYPEGPT_API_KEY}",
95
+ "Content-Type": "application/json"
96
+ }
97
+ # The payload now includes the system prompt and the user prompt with context
98
+ payload = {
99
+ "model": "gpt-4.1-mini",
100
+ "messages": [
101
+ {"role": "system", "content": SYSTEM_PROMPT},
102
+ {"role": "user", "content": final_prompt}
103
+ ]
104
+ }
105
+ llm_response = await client.post(TYPEGPT_API_URL, headers=headers, json=payload)
106
+ llm_response.raise_for_status()
107
+ llm_data = llm_response.json()
108
+ answer_content = llm_data['choices'][0]['message']['content']
109
+ except httpx.RequestError as e:
110
+ raise HTTPException(status_code=502, detail=f"Error calling language model API: {e}")
111
+ except (KeyError, IndexError) as e:
112
+ raise HTTPException(status_code=500, detail=f"Invalid response structure from language model API: {e}")
113
+
114
+ # 5. Format the final response
115
+ response_message = ChatMessage(role="assistant", content=answer_content)
116
+ response_choice = Choice(message=response_message)
117
+
118
+ return ChatCompletionResponse(choices=[response_choice], search_results=search_results)
119
+
120
+ if __name__ == "__main__":
121
+ import uvicorn
122
+ uvicorn.run(app, host="0.0.0.0", port=8000)