rkihacker commited on
Commit
a388535
·
verified ·
1 Parent(s): 363d93e

Update main.py

Browse files
Files changed (1) hide show
  1. main.py +138 -71
main.py CHANGED
@@ -1,25 +1,22 @@
1
  import httpx
 
 
 
2
  from fastapi import FastAPI, HTTPException
 
3
  from pydantic import BaseModel, Field
4
- from typing import List
5
-
6
- app = FastAPI(
7
- title="Perplexity-like API",
8
- description="An API that uses web search to answer questions with citations.",
9
- version="1.0.0"
10
- )
11
 
12
  # --- API Configuration ---
 
13
  TYPEGPT_API_URL = "https://api.typegpt.net/v1/chat/completions"
14
- TYPEGPT_API_KEY = "sk-oPdaZC7n1JlDq0sJ5NSSyHe7sYaeAXeEuj0wX4Lk8hlOGPF8"
15
  SEARCH_API_URL = "https://superapis-bing.hf.space/search"
16
 
17
-
18
  # --- System Prompt ---
19
  # This prompt guides the AI to behave like a factual research assistant.
20
  SYSTEM_PROMPT = """
21
  You are an expert AI research assistant. Your primary goal is to provide accurate, comprehensive, and helpful answers based ONLY on the provided search results.
22
-
23
  Instructions:
24
  1. Carefully analyze the user's query and the provided search results.
25
  2. Synthesize an answer directly from the information found in the search results.
@@ -30,93 +27,163 @@ Instructions:
30
  7. Structure your response in a clear and easy-to-read format. Start with a direct answer, followed by a more detailed explanation.
31
  """
32
 
33
- # --- Pydantic Models for API Request/Response ---
34
 
 
35
  class ChatMessage(BaseModel):
36
  role: str
37
  content: str
38
 
39
  class ChatCompletionRequest(BaseModel):
40
  messages: List[ChatMessage] = Field(..., example=[{"role": "user", "content": "What are the benefits of learning Python?"}])
41
- model: str = "gpt-4.1-mini" # Model is fixed but included for compatibility
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
42
 
43
- class Choice(BaseModel):
44
- message: ChatMessage
45
 
46
- class ChatCompletionResponse(BaseModel):
47
- choices: List[Choice]
48
- search_results: List[dict]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
49
 
50
 
51
  # --- API Endpoint ---
52
-
53
- @app.post("/v1/chat/completions", response_model=ChatCompletionResponse)
54
  async def chat_completions(request: ChatCompletionRequest):
55
  """
56
- Takes a user's chat history, performs a web search based on the latest query,
57
- and uses the TypeGPT model to generate a factual, cited response.
58
  """
59
  if not request.messages or request.messages[-1].role != "user":
60
  raise HTTPException(status_code=400, detail="Invalid request. The last message must be from the 'user'.")
61
 
62
  user_query = request.messages[-1].content
63
 
64
- async with httpx.AsyncClient(timeout=30.0) as client:
65
- # 1. Perform a web search
66
- try:
67
- search_params = {"keywords": user_query}
68
  search_response = await client.get(SEARCH_API_URL, params=search_params)
69
  search_response.raise_for_status()
70
  search_results = search_response.json()
71
- except httpx.RequestError as e:
72
- raise HTTPException(status_code=502, detail=f"Error calling the search API: {e}")
73
- except Exception as e:
74
- raise HTTPException(status_code=500, detail=f"Failed to process search results: {e}")
75
-
76
- # 2. Format search results into a context for the language model
77
- context = ""
78
- for i, result in enumerate(search_results[:7]): # Use top 7 results for richer context
79
- context += f"Source [{i+1}]:\nTitle: {result.get('title', 'N/A')}\nSnippet: {result.get('snippet', '')}\nURL: {result.get('url', 'N/A')}\n\n"
80
-
81
- # 3. Construct the prompt for the language model
82
- final_prompt = f"""
83
- **Search Results:**
84
- {context}
85
-
86
- **User Query:** "{user_query}"
 
 
87
 
88
- Please provide a comprehensive answer based on the search results above, following all instructions.
89
- """
 
 
 
 
 
 
 
90
 
91
- # 4. Get the response from the TypeGPT language model
92
- try:
93
- headers = {
94
- "Authorization": f"Bearer {TYPEGPT_API_KEY}",
95
- "Content-Type": "application/json"
96
- }
97
- # The payload now includes the system prompt and the user prompt with context
98
- payload = {
99
- "model": "gpt-4.1-mini",
100
- "messages": [
101
- {"role": "system", "content": SYSTEM_PROMPT},
102
- {"role": "user", "content": final_prompt}
103
- ]
104
- }
105
- llm_response = await client.post(TYPEGPT_API_URL, headers=headers, json=payload)
106
- llm_response.raise_for_status()
107
- llm_data = llm_response.json()
108
- answer_content = llm_data['choices'][0]['message']['content']
109
- except httpx.RequestError as e:
110
- raise HTTPException(status_code=502, detail=f"Error calling language model API: {e}")
111
- except (KeyError, IndexError) as e:
112
- raise HTTPException(status_code=500, detail=f"Invalid response structure from language model API: {e}")
113
-
114
- # 5. Format the final response
115
- response_message = ChatMessage(role="assistant", content=answer_content)
116
- response_choice = Choice(message=response_message)
117
-
118
- return ChatCompletionResponse(choices=[response_choice], search_results=search_results)
119
 
 
120
  if __name__ == "__main__":
121
  import uvicorn
 
 
 
 
 
122
  uvicorn.run(app, host="0.0.0.0", port=8000)
 
1
  import httpx
2
+ import json
3
+ import time
4
+ import uuid
5
  from fastapi import FastAPI, HTTPException
6
+ from fastapi.responses import StreamingResponse
7
  from pydantic import BaseModel, Field
8
+ from typing import List, Optional
 
 
 
 
 
 
9
 
10
  # --- API Configuration ---
11
+ # It's recommended to use environment variables for sensitive data in production.
12
  TYPEGPT_API_URL = "https://api.typegpt.net/v1/chat/completions"
13
+ TYPEGPT_API_KEY = "sk-oPdaZC7n1JlDq0sJ5NSSyHe7sYaeAXeEuj0wX4Lk8hlOGPF8" # Replace with your actual key
14
  SEARCH_API_URL = "https://superapis-bing.hf.space/search"
15
 
 
16
  # --- System Prompt ---
17
  # This prompt guides the AI to behave like a factual research assistant.
18
  SYSTEM_PROMPT = """
19
  You are an expert AI research assistant. Your primary goal is to provide accurate, comprehensive, and helpful answers based ONLY on the provided search results.
 
20
  Instructions:
21
  1. Carefully analyze the user's query and the provided search results.
22
  2. Synthesize an answer directly from the information found in the search results.
 
27
  7. Structure your response in a clear and easy-to-read format. Start with a direct answer, followed by a more detailed explanation.
28
  """
29
 
30
+ # --- Pydantic Models ---
31
 
32
+ # For incoming requests
33
  class ChatMessage(BaseModel):
34
  role: str
35
  content: str
36
 
37
  class ChatCompletionRequest(BaseModel):
38
  messages: List[ChatMessage] = Field(..., example=[{"role": "user", "content": "What are the benefits of learning Python?"}])
39
+ model: str = "perplexity-like" # Model name can be customized
40
+ stream: bool = Field(default=False, description="Enable streaming response")
41
+
42
+ # For outgoing streaming responses (OpenAI compatible)
43
+ class ChatDelta(BaseModel):
44
+ content: Optional[str] = None
45
+ role: Optional[str] = None
46
+
47
+ class ChatCompletionStreamChoice(BaseModel):
48
+ delta: ChatDelta
49
+ index: int = 0
50
+ finish_reason: Optional[str] = None
51
+
52
+ class ChatCompletionStreamResponse(BaseModel):
53
+ id: str = Field(default_factory=lambda: f"chatcmpl-{uuid.uuid4().hex}")
54
+ object: str = "chat.completion.chunk"
55
+ created: int = Field(default_factory=lambda: int(time.time()))
56
+ model: str = "perplexity-like"
57
+ choices: List[ChatCompletionStreamChoice]
58
+
59
+ # --- FastAPI App Initialization ---
60
+ app = FastAPI(
61
+ title="Perplexity-like API",
62
+ description="An API that uses web search to answer questions with citations, supporting streaming.",
63
+ version="2.0.0"
64
+ )
65
 
 
 
66
 
67
+ # --- Streaming Logic ---
68
+ async def stream_llm_response(payload: dict):
69
+ """
70
+ An async generator that streams the response from the language model.
71
+ """
72
+ start_time = time.time()
73
+ try:
74
+ async with httpx.AsyncClient(timeout=60.0) as client:
75
+ headers = {
76
+ "Authorization": f"Bearer {TYPEGPT_API_KEY}",
77
+ "Content-Type": "application/json"
78
+ }
79
+ async with client.stream("POST", TYPEGPT_API_URL, headers=headers, json=payload) as response:
80
+ # Check for errors from the upstream API
81
+ if response.status_code != 200:
82
+ error_content = await response.aread()
83
+ raise HTTPException(
84
+ status_code=response.status_code,
85
+ detail=f"Error from language model API: {error_content.decode()}"
86
+ )
87
+
88
+ # Process the stream line by line
89
+ async for line in response.aiter_lines():
90
+ if line.startswith("data: "):
91
+ data_str = line.removeprefix("data: ")
92
+ if data_str.strip() == "[DONE]":
93
+ break
94
+ try:
95
+ chunk = json.loads(data_str)
96
+ delta_content = chunk["choices"][0]["delta"].get("content")
97
+ if delta_content:
98
+ # Create a streaming-compliant response chunk
99
+ stream_choice = ChatCompletionStreamChoice(delta=ChatDelta(content=delta_content))
100
+ stream_response = ChatCompletionStreamResponse(choices=[stream_choice])
101
+ yield f"data: {stream_response.model_dump_json()}\n\n"
102
+ except (json.JSONDecodeError, KeyError, IndexError):
103
+ # Skip malformed lines
104
+ continue
105
+
106
+ except httpx.RequestError as e:
107
+ # Handle network-related errors during the streaming request
108
+ error_message = f"HTTP Request Error during streaming: {e}"
109
+ stream_choice = ChatCompletionStreamChoice(delta=ChatDelta(content=f"\n\nERROR: {error_message}"))
110
+ stream_response = ChatCompletionStreamResponse(choices=[stream_choice])
111
+ yield f"data: {stream_response.model_dump_json()}\n\n"
112
+
113
+ except Exception as e:
114
+ # Handle other unexpected errors
115
+ error_message = f"An unexpected error occurred during streaming: {e}"
116
+ stream_choice = ChatCompletionStreamChoice(delta=ChatDelta(content=f"\n\nERROR: {error_message}"))
117
+ stream_response = ChatCompletionStreamResponse(choices=[stream_choice])
118
+ yield f"data: {stream_response.model_dump_json()}\n\n"
119
+
120
+ # Send the final chunk with finish_reason
121
+ finally:
122
+ finish_time = time.time()
123
+ print(f"Stream finished in {finish_time - start_time:.2f} seconds.")
124
+ final_choice = ChatCompletionStreamChoice(delta=ChatDelta(), finish_reason="stop")
125
+ final_response = ChatCompletionStreamResponse(choices=[final_choice])
126
+ yield f"data: {final_response.model_dump_json()}\n\n"
127
+ yield "data: [DONE]\n\n"
128
 
129
 
130
  # --- API Endpoint ---
131
+ @app.post("/v1/chat/completions")
 
132
  async def chat_completions(request: ChatCompletionRequest):
133
  """
134
+ Takes a user's query, performs a web search, and streams a factual,
135
+ cited response from a language model.
136
  """
137
  if not request.messages or request.messages[-1].role != "user":
138
  raise HTTPException(status_code=400, detail="Invalid request. The last message must be from the 'user'.")
139
 
140
  user_query = request.messages[-1].content
141
 
142
+ # 1. Perform a web search
143
+ try:
144
+ async with httpx.AsyncClient(timeout=30.0) as client:
145
+ search_params = {"keywords": user_query, "max_results": 7}
146
  search_response = await client.get(SEARCH_API_URL, params=search_params)
147
  search_response.raise_for_status()
148
  search_results = search_response.json()
149
+ except httpx.RequestError as e:
150
+ raise HTTPException(status_code=502, detail=f"Error calling the search API: {e}")
151
+ except Exception as e:
152
+ raise HTTPException(status_code=500, detail=f"Failed to process search results: {e}")
153
+
154
+ # 2. Format search results into a context for the language model
155
+ # Using the 'description' field as per the new OpenAPI spec
156
+ context = ""
157
+ for i, result in enumerate(search_results):
158
+ context += f"Source [{i+1}]:\nTitle: {result.get('title', 'N/A')}\nDescription: {result.get('description', '')}\nURL: {result.get('url', 'N/A')}\n\n"
159
+
160
+ # 3. Construct the prompt for the language model
161
+ final_prompt = f"""
162
+ **Search Results:**
163
+ {context}
164
+ **User Query:** "{user_query}"
165
+ Please provide a comprehensive answer based on the search results above, following all instructions.
166
+ """
167
 
168
+ # 4. Prepare the payload for the TypeGPT language model
169
+ llm_payload = {
170
+ "model": "gpt-4.1-mini",
171
+ "messages": [
172
+ {"role": "system", "content": SYSTEM_PROMPT},
173
+ {"role": "user", "content": final_prompt}
174
+ ],
175
+ "stream": True # Enable streaming from the backing LLM
176
+ }
177
 
178
+ # 5. Return the streaming response
179
+ return StreamingResponse(stream_llm_response(llm_payload), media_type="text/event-stream")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
180
 
181
+ # --- Main execution ---
182
  if __name__ == "__main__":
183
  import uvicorn
184
+ # To run this app:
185
+ # 1. Save the code as main.py
186
+ # 2. Install necessary packages: pip install fastapi "uvicorn[standard]" httpx
187
+ # 3. Run in your terminal: uvicorn main:app --reload
188
+ # 4. Access the interactive docs at http://127.0.0.1:8000/docs
189
  uvicorn.run(app, host="0.0.0.0", port=8000)