|
|
import os |
|
|
import asyncio |
|
|
from fastapi import FastAPI, HTTPException, Query |
|
|
from dotenv import load_dotenv |
|
|
import aiohttp |
|
|
from bs4 import BeautifulSoup |
|
|
import logging |
|
|
|
|
|
|
|
|
|
|
|
logging.basicConfig(level=logging.INFO) |
|
|
logger = logging.getLogger(__name__) |
|
|
|
|
|
load_dotenv() |
|
|
LLM_API_KEY = os.getenv("LLM_API_KEY") |
|
|
|
|
|
if not LLM_API_KEY: |
|
|
raise RuntimeError("LLM_API_KEY must be set in a .env file.") |
|
|
|
|
|
|
|
|
SNAPZION_API_URL = "https://search.snapzion.com/get-snippets" |
|
|
SNAPZION_HEADERS = { |
|
|
'accept': '*/*', |
|
|
'accept-language': 'en-US,en;q=0.9', |
|
|
'content-type': 'application/json', |
|
|
'origin': 'https://search.snapzion.com', |
|
|
'priority': 'u=1, i', |
|
|
'referer': 'https://search.snapzion.com/docs', |
|
|
'sec-ch-ua': '"Chromium";v="140", "Not=A?Brand";v="24", "Google Chrome";v="140"', |
|
|
'sec-ch-ua-mobile': '?0', |
|
|
'sec-ch-ua-platform': '"Windows"', |
|
|
'sec-fetch-dest': 'empty', |
|
|
'sec-fetch-mode': 'cors', |
|
|
'sec-fetch-site': 'same-origin', |
|
|
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/140.0.0.0 Safari/537.36', |
|
|
} |
|
|
|
|
|
|
|
|
SCRAPING_HEADERS = { |
|
|
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/140.0.0.0 Safari/537.36', |
|
|
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8', |
|
|
'Accept-Language': 'en-US,en;q=0.9', |
|
|
'Connection': 'keep-alive', |
|
|
'Upgrade-Insecure-Requests': '1', |
|
|
} |
|
|
|
|
|
|
|
|
LLM_API_URL = "https://api.inference.net/v1/chat/completions" |
|
|
LLM_MODEL = "meta-llama/llama-3.1-8b-instruct/fp-8" |
|
|
|
|
|
|
|
|
app = FastAPI( |
|
|
title="AI Search Snippets API (Snapzion)", |
|
|
description="Provides AI-generated summaries from Snapzion search results.", |
|
|
version="1.1.0" |
|
|
) |
|
|
|
|
|
|
|
|
|
|
|
async def call_snapzion_search(session: aiohttp.ClientSession, query: str) -> list: |
|
|
try: |
|
|
async with session.post(SNAPZION_API_URL, headers=SNAPZION_HEADERS, json={"query": query}, timeout=15) as response: |
|
|
response.raise_for_status() |
|
|
data = await response.json() |
|
|
return data.get("organic_results", []) |
|
|
except Exception as e: |
|
|
logger.error(f"Snapzion API call failed: {e}") |
|
|
raise HTTPException(status_code=503, detail=f"Search service (Snapzion) failed: {e}") |
|
|
|
|
|
|
|
|
async def scrape_url(session: aiohttp.ClientSession, url: str) -> str: |
|
|
"""Asynchronously scrapes text from a URL, now with browser headers.""" |
|
|
if url.lower().endswith('.pdf'): |
|
|
return "Error: Content is a PDF, which cannot be scraped." |
|
|
try: |
|
|
|
|
|
async with session.get(url, headers=SCRAPING_HEADERS, timeout=10, ssl=False) as response: |
|
|
if response.status != 200: |
|
|
logger.warning(f"Failed to fetch {url}, status code: {response.status}") |
|
|
return f"Error: Failed to fetch with status {response.status}" |
|
|
html = await response.text() |
|
|
soup = BeautifulSoup(html, "html.parser") |
|
|
for tag in soup(['script', 'style', 'nav', 'footer', 'header', 'aside']): |
|
|
tag.decompose() |
|
|
return " ".join(soup.stripped_strings) |
|
|
except Exception as e: |
|
|
logger.warning(f"Could not scrape {url}. Reason: {e}") |
|
|
return f"Error: Could not scrape. Reason: {e}" |
|
|
|
|
|
async def get_ai_snippet(query: str, context: str, sources: list) -> str: |
|
|
headers = {"Authorization": f"Bearer {LLM_API_KEY}", "Content-Type": "application/json"} |
|
|
source_list_str = "\n".join([f"[{i+1}] {source['title']}: {source['link']}" for i, source in enumerate(sources)]) |
|
|
prompt = f""" |
|
|
Based *only* on the provided context, provide a concise, factual answer to the user's query. Cite every sentence with the corresponding source number(s), like `[1]` or `[2, 3]`. |
|
|
|
|
|
Sources: |
|
|
{source_list_str} |
|
|
|
|
|
Context: |
|
|
--- |
|
|
{context} |
|
|
--- |
|
|
|
|
|
User Query: "{query}" |
|
|
|
|
|
Answer with citations: |
|
|
""" |
|
|
data = {"model": LLM_MODEL, "messages": [{"role": "user", "content": prompt}], "max_tokens": 500} |
|
|
async with aiohttp.ClientSession() as session: |
|
|
try: |
|
|
async with session.post(LLM_API_URL, headers=headers, json=data, timeout=45) as response: |
|
|
response.raise_for_status() |
|
|
result = await response.json() |
|
|
return result['choices'][0]['message']['content'] |
|
|
except Exception as e: |
|
|
logger.error(f"LLM API call failed: {e}") |
|
|
raise HTTPException(status_code=502, detail=f"Failed to get response from LLM: {e}") |
|
|
|
|
|
|
|
|
|
|
|
@app.get("/search") |
|
|
async def ai_search(q: str = Query(..., min_length=3, description="The search query.")): |
|
|
async with aiohttp.ClientSession() as session: |
|
|
search_results = await call_snapzion_search(session, q) |
|
|
if not search_results: |
|
|
raise HTTPException(status_code=404, detail="Could not find any relevant sources for the query.") |
|
|
|
|
|
sources = search_results[:5] |
|
|
scrape_tasks = [scrape_url(session, source["link"]) for source in sources] |
|
|
scraped_contents = await asyncio.gather(*scrape_tasks) |
|
|
|
|
|
|
|
|
successful_scrapes = [content for content in scraped_contents if not content.startswith("Error:")] |
|
|
|
|
|
full_context = "" |
|
|
if successful_scrapes: |
|
|
logger.info(f"Successfully scraped {len(successful_scrapes)} out of {len(sources)} sources.") |
|
|
|
|
|
full_context = "\n\n".join( |
|
|
f"Source [{i+1}] ({sources[i]['link']}):\n{scraped_contents[i]}" |
|
|
for i in range(len(sources)) if not scraped_contents[i].startswith("Error:") |
|
|
) |
|
|
else: |
|
|
|
|
|
logger.warning("All scraping attempts failed. Falling back to using API snippets for context.") |
|
|
full_context = "\n\n".join( |
|
|
f"Source [{i+1}] ({source['link']}):\n{source['snippet']}" |
|
|
for i, source in enumerate(sources) |
|
|
) |
|
|
|
|
|
if not full_context.strip(): |
|
|
|
|
|
raise HTTPException(status_code=500, detail="Could not construct any context from sources or snippets.") |
|
|
|
|
|
ai_summary = await get_ai_snippet(q, full_context, sources) |
|
|
|
|
|
return {"ai_summary": ai_summary, "sources": sources} |
|
|
|
|
|
@app.get("/") |
|
|
def root(): |
|
|
return {"message": "AI Search API is active. Use the /docs endpoint to test."} |