|
|
import httpx |
|
|
from fastapi import FastAPI, HTTPException |
|
|
from pydantic import BaseModel, Field |
|
|
from typing import List |
|
|
|
|
|
app = FastAPI( |
|
|
title="Perplexity-like API", |
|
|
description="An API that uses web search to answer questions with citations.", |
|
|
version="1.0.0" |
|
|
) |
|
|
|
|
|
|
|
|
TYPEGPT_API_URL = "https://api.typegpt.net/v1/chat/completions" |
|
|
TYPEGPT_API_KEY = "sk-oPdaZC7n1JlDq0sJ5NSSyHe7sYaeAXeEuj0wX4Lk8hlOGPF8" |
|
|
SEARCH_API_URL = "https://searchapi.snapzion.com/search" |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
SYSTEM_PROMPT = """ |
|
|
You are an expert AI research assistant. Your primary goal is to provide accurate, comprehensive, and helpful answers based ONLY on the provided search results. |
|
|
|
|
|
Instructions: |
|
|
1. Carefully analyze the user's query and the provided search results. |
|
|
2. Synthesize an answer directly from the information found in the search results. |
|
|
3. For every statement or piece of information you provide, you MUST cite the corresponding search result number in the format `[<number>]`. |
|
|
4. If multiple sources support a statement, you can cite them like `[1, 2]`. |
|
|
5. If the search results do not contain enough information to answer the query, you must explicitly state that you could not find the information in the provided context. |
|
|
6. Do not use any prior knowledge or information outside of the provided search results. |
|
|
7. Structure your response in a clear and easy-to-read format. Start with a direct answer, followed by a more detailed explanation. |
|
|
""" |
|
|
|
|
|
|
|
|
|
|
|
class ChatMessage(BaseModel): |
|
|
role: str |
|
|
content: str |
|
|
|
|
|
class ChatCompletionRequest(BaseModel): |
|
|
messages: List[ChatMessage] = Field(..., example=[{"role": "user", "content": "What are the benefits of learning Python?"}]) |
|
|
model: str = "gpt-4.1-mini" |
|
|
|
|
|
class Choice(BaseModel): |
|
|
message: ChatMessage |
|
|
|
|
|
class ChatCompletionResponse(BaseModel): |
|
|
choices: List[Choice] |
|
|
search_results: List[dict] |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
@app.post("/v1/chat/completions", response_model=ChatCompletionResponse) |
|
|
async def chat_completions(request: ChatCompletionRequest): |
|
|
""" |
|
|
Takes a user's chat history, performs a web search based on the latest query, |
|
|
and uses the TypeGPT model to generate a factual, cited response. |
|
|
""" |
|
|
if not request.messages or request.messages[-1].role != "user": |
|
|
raise HTTPException(status_code=400, detail="Invalid request. The last message must be from the 'user'.") |
|
|
|
|
|
user_query = request.messages[-1].content |
|
|
|
|
|
async with httpx.AsyncClient(timeout=30.0) as client: |
|
|
|
|
|
try: |
|
|
search_params = {"keywords": user_query} |
|
|
search_response = await client.get(SEARCH_API_URL, params=search_params) |
|
|
search_response.raise_for_status() |
|
|
search_results = search_response.json() |
|
|
except httpx.RequestError as e: |
|
|
raise HTTPException(status_code=502, detail=f"Error calling the search API: {e}") |
|
|
except Exception as e: |
|
|
raise HTTPException(status_code=500, detail=f"Failed to process search results: {e}") |
|
|
|
|
|
|
|
|
context = "" |
|
|
for i, result in enumerate(search_results[:7]): |
|
|
context += f"Source [{i+1}]:\nTitle: {result.get('title', 'N/A')}\nSnippet: {result.get('snippet', '')}\nURL: {result.get('url', 'N/A')}\n\n" |
|
|
|
|
|
|
|
|
final_prompt = f""" |
|
|
**Search Results:** |
|
|
{context} |
|
|
|
|
|
**User Query:** "{user_query}" |
|
|
|
|
|
Please provide a comprehensive answer based on the search results above, following all instructions. |
|
|
""" |
|
|
|
|
|
|
|
|
try: |
|
|
headers = { |
|
|
"Authorization": f"Bearer {TYPEGPT_API_KEY}", |
|
|
"Content-Type": "application/json" |
|
|
} |
|
|
|
|
|
payload = { |
|
|
"model": "gpt-4.1-mini", |
|
|
"messages": [ |
|
|
{"role": "system", "content": SYSTEM_PROMPT}, |
|
|
{"role": "user", "content": final_prompt} |
|
|
] |
|
|
} |
|
|
llm_response = await client.post(TYPEGPT_API_URL, headers=headers, json=payload) |
|
|
llm_response.raise_for_status() |
|
|
llm_data = llm_response.json() |
|
|
answer_content = llm_data['choices'][0]['message']['content'] |
|
|
except httpx.RequestError as e: |
|
|
raise HTTPException(status_code=502, detail=f"Error calling language model API: {e}") |
|
|
except (KeyError, IndexError) as e: |
|
|
raise HTTPException(status_code=500, detail=f"Invalid response structure from language model API: {e}") |
|
|
|
|
|
|
|
|
response_message = ChatMessage(role="assistant", content=answer_content) |
|
|
response_choice = Choice(message=response_message) |
|
|
|
|
|
return ChatCompletionResponse(choices=[response_choice], search_results=search_results) |
|
|
|
|
|
if __name__ == "__main__": |
|
|
import uvicorn |
|
|
uvicorn.run(app, host="0.0.0.0", port=8000) |