D3MI4N's picture
docs: add comprehensive docstrings to all Python files
23a9367
"""
LLM Agent Tool for Intelligent Surf Spot Analysis.
This module provides AI-powered reasoning and natural language generation
for surf spot recommendations. It demonstrates autonomous agent behavior
by analyzing surf conditions and generating human-like explanations.
The agent supports multiple LLM providers with intelligent fallbacks:
1. OpenAI GPT-4 (primary)
2. Anthropic Claude (secondary)
3. OpenRouter (multi-model access)
4. Rule-based reasoning (always available)
Key capabilities:
- Autonomous analysis of surf conditions
- Natural language explanation generation
- Safety-focused recommendations based on skill level
- Multi-factor reasoning about wave, wind, and swell
- Contextual advice for different surf scenarios
Example:
>>> agent = SurfLLMAgent()
>>> input_data = LLMAgentInput(
... user_location="Tarifa, Spain",
... user_preferences={"skill_level": "beginner"},
... surf_spots=evaluated_spots
... )
>>> result = await agent.run(input_data)
>>> print(result.reasoning) # AI-generated explanation
Author: Surf Spot Finder Team
License: MIT
"""
import os
import json
from typing import Dict, Any, List, Optional
from pydantic import BaseModel, Field
import httpx
import logging
# Load environment variables
try:
from dotenv import load_dotenv
load_dotenv()
except ImportError:
pass
logger = logging.getLogger(__name__)
class LLMAgentInput(BaseModel):
"""Input schema for the LLM agent tool.
Attributes:
user_location: User's location for contextual recommendations.
user_preferences: Dict with skill_level, board_type, etc.
surf_spots: List of evaluated spots with scores and conditions.
reasoning_task: Type of analysis (default: "recommendation").
"""
user_location: str = Field(description="User's location")
user_preferences: Dict[str, Any] = Field(description="User surfing preferences")
surf_spots: List[Dict[str, Any]] = Field(description="Evaluated surf spots with scores")
reasoning_task: str = Field(default="recommendation", description="Type of reasoning task")
class LLMAgentOutput(BaseModel):
"""Output schema for LLM agent results.
Attributes:
success: Whether AI analysis completed successfully.
summary: Brief recommendation summary (1-2 sentences).
reasoning: Detailed AI analysis with explanations.
recommendations: List of specific actionable advice.
error: Error message if analysis failed.
"""
success: bool
summary: str = ""
reasoning: str = ""
recommendations: List[str] = []
error: str = ""
class SurfLLMAgent:
"""
LLM-powered agent for surf spot recommendations and reasoning
Demonstrates autonomous decision making and natural language understanding
"""
name = "surf_llm_agent"
description = "AI agent that provides intelligent surf spot recommendations with reasoning"
def __init__(self):
# Support multiple LLM providers
self.openai_api_key = os.getenv("OPENAI_API_KEY")
self.anthropic_api_key = os.getenv("ANTHROPIC_API_KEY")
self.openrouter_api_key = os.getenv("OPENROUTER_API_KEY")
def _get_available_provider(self):
"""Determine which LLM provider is available"""
if self.openai_api_key:
return "openai"
elif self.anthropic_api_key:
return "anthropic"
elif self.openrouter_api_key:
return "openrouter"
else:
return None
async def _call_openai(self, messages: List[Dict[str, str]]) -> str:
"""Call OpenAI API"""
async with httpx.AsyncClient() as client:
response = await client.post(
"https://api.openai.com/v1/chat/completions",
headers={
"Authorization": f"Bearer {self.openai_api_key}",
"Content-Type": "application/json"
},
json={
"model": "gpt-4o-mini",
"messages": messages,
"max_tokens": 800,
"temperature": 0.7
},
timeout=30.0
)
response.raise_for_status()
return response.json()["choices"][0]["message"]["content"]
async def _call_anthropic(self, messages: List[Dict[str, str]]) -> str:
"""Call Anthropic API"""
# Convert messages to Anthropic format
system_msg = ""
user_msgs = []
for msg in messages:
if msg["role"] == "system":
system_msg = msg["content"]
else:
user_msgs.append(msg)
async with httpx.AsyncClient() as client:
response = await client.post(
"https://api.anthropic.com/v1/messages",
headers={
"x-api-key": self.anthropic_api_key,
"Content-Type": "application/json",
"anthropic-version": "2023-06-01"
},
json={
"model": "claude-3-haiku-20240307",
"max_tokens": 800,
"system": system_msg,
"messages": user_msgs
},
timeout=30.0
)
response.raise_for_status()
return response.json()["content"][0]["text"]
async def _call_openrouter(self, messages: List[Dict[str, str]]) -> str:
"""Call OpenRouter API (multiple models)"""
async with httpx.AsyncClient() as client:
response = await client.post(
"https://openrouter.ai/api/v1/chat/completions",
headers={
"Authorization": f"Bearer {self.openrouter_api_key}",
"Content-Type": "application/json"
},
json={
"model": "meta-llama/llama-3.1-8b-instruct:free",
"messages": messages,
"max_tokens": 800,
"temperature": 0.7
},
timeout=30.0
)
response.raise_for_status()
return response.json()["choices"][0]["message"]["content"]
def _build_surf_analysis_prompt(self, input_data: LLMAgentInput) -> List[Dict[str, str]]:
"""Build the prompt for surf spot analysis"""
# Format spot data for the LLM
spots_text = ""
for i, spot in enumerate(input_data.surf_spots, 1):
spots_text += f"\n{i}. **{spot['name']}** ({spot['location']})\n"
spots_text += f" - Score: {spot['score']}/100\n"
spots_text += f" - Distance: {spot['distance_km']}km\n"
spots_text += f" - Break Type: {spot['characteristics']['break_type']}\n"
spots_text += f" - Skill Levels: {', '.join(spot['characteristics']['skill_level'])}\n"
spots_text += f" - Current Conditions: {spot['conditions'].get('wave_height', 'N/A')}m waves, {spot['conditions'].get('wind_speed', 'N/A')}kt wind\n"
spots_text += f" - Analysis: {spot['explanation'][:200]}...\n"
# Format user preferences
prefs_text = json.dumps(input_data.user_preferences, indent=2) if input_data.user_preferences else "No specific preferences"
system_prompt = """You are an expert surf forecaster and local guide AI agent with deep knowledge of surf conditions, wave physics, and surfing culture. Your role is to:
1. **Analyze** surf conditions autonomously using your expertise
2. **Reason** about which spots are best for the user's specific situation
3. **Recommend** spots with clear explanations of your decision-making process
4. **Provide** practical insights about timing, equipment, and local knowledge
Think like a local surf guide who:
- Understands how weather patterns affect waves
- Knows the personality of each surf spot
- Considers safety based on skill level
- Provides actionable advice for the session
Be conversational but expert, enthusiastic but realistic about conditions."""
user_prompt = f"""Please analyze these surf spots for a surfer in {input_data.user_location} and provide your expert recommendations:
**User Preferences:**
{prefs_text}
**Available Surf Spots:**
{spots_text}
**Your Task:**
As an expert surf agent, please:
1. **Analyze** the current conditions and scores autonomously
2. **Reason** about which spots match the user's needs best
3. **Recommend** the top 2-3 spots with clear explanations
4. **Provide** practical session advice (timing, equipment, safety)
Format your response as:
**🎯 SURF FORECAST ANALYSIS**
[Your autonomous analysis of conditions and patterns]
**πŸ„β€β™‚οΈ TOP RECOMMENDATIONS**
[Your reasoned recommendations with explanations]
**πŸ’‘ SESSION ADVICE**
[Practical tips for the surfing session]
**🌊 LOCAL INSIGHTS**
[Additional context about conditions, crowds, access, etc.]
Be specific about WHY you're recommending each spot based on the data and your surf expertise."""
return [
{"role": "system", "content": system_prompt},
{"role": "user", "content": user_prompt}
]
def _parse_llm_response(self, response: str) -> Dict[str, str]:
"""Parse structured LLM response into components"""
sections = {
"analysis": "",
"recommendations": "",
"advice": "",
"insights": ""
}
current_section = "analysis"
lines = response.split('\n')
for line in lines:
line = line.strip()
if "🎯" in line or "ANALYSIS" in line.upper():
current_section = "analysis"
elif "πŸ„β€β™‚οΈ" in line or "RECOMMENDATION" in line.upper():
current_section = "recommendations"
elif "πŸ’‘" in line or "ADVICE" in line.upper():
current_section = "advice"
elif "🌊" in line or "INSIGHTS" in line.upper():
current_section = "insights"
elif line and not line.startswith('**'):
sections[current_section] += line + " "
return {k: v.strip() for k, v in sections.items()}
async def run(self, input_data: LLMAgentInput) -> LLMAgentOutput:
"""Execute the LLM agent reasoning"""
try:
provider = self._get_available_provider()
if not provider:
# Use fallback analysis instead of failing
logger.info("No LLM provider available, using fallback analysis")
fallback_summary = self._generate_fallback_summary(input_data)
return LLMAgentOutput(
success=True,
summary=fallback_summary,
reasoning="Analysis completed using rule-based evaluation (no LLM API key configured)",
recommendations=[f"{s['name']}: {s['score']}/100" for s in input_data.surf_spots[:3]]
)
if not input_data.surf_spots:
return LLMAgentOutput(
success=False,
error="No surf spots provided for analysis"
)
# Build the analysis prompt
messages = self._build_surf_analysis_prompt(input_data)
# Call the appropriate LLM
logger.info(f"Calling {provider} for surf analysis")
if provider == "openai":
response = await self._call_openai(messages)
elif provider == "anthropic":
response = await self._call_anthropic(messages)
elif provider == "openrouter":
response = await self._call_openrouter(messages)
# Parse the response
parsed = self._parse_llm_response(response)
# Extract recommendations as list
recommendations = []
if parsed["recommendations"]:
# Simple extraction of spot names mentioned in recommendations
for spot in input_data.surf_spots[:3]: # Top 3 spots
if spot["name"].lower() in parsed["recommendations"].lower():
recommendations.append(f"{spot['name']}: {spot['score']}/100")
return LLMAgentOutput(
success=True,
summary=f"{parsed['analysis'][:200]}..." if parsed['analysis'] else "Analysis completed",
reasoning=response, # Full LLM response
recommendations=recommendations
)
except Exception as e:
logger.error(f"LLM Agent error: {e}")
# Fallback to rule-based recommendation
fallback_summary = self._generate_fallback_summary(input_data)
return LLMAgentOutput(
success=True, # Still successful with fallback
summary=fallback_summary,
reasoning="Using fallback analysis due to LLM unavailability",
recommendations=[f"{s['name']}: {s['score']}/100" for s in input_data.surf_spots[:3]]
)
def _generate_fallback_summary(self, input_data: LLMAgentInput) -> str:
"""Generate a rule-based summary when LLM is unavailable"""
if not input_data.surf_spots:
return "No surf spots found in your area."
best_spot = input_data.surf_spots[0]
total_spots = len(input_data.surf_spots)
skill_level = input_data.user_preferences.get('skill_level', 'intermediate')
return f"""Surf analysis for {input_data.user_location}: Found {total_spots} spots within range.
Top recommendation is {best_spot['name']} with a score of {best_spot['score']}/100,
located {best_spot['distance_km']}km away. Current conditions show {best_spot['conditions'].get('wave_height', 'unknown')}m waves.
This spot is suitable for {skill_level} surfers and offers {best_spot['characteristics']['break_type']} waves."""
def create_llm_agent_tool():
"""Factory function to create the LLM agent tool"""
tool = SurfLLMAgent()
return {
"name": tool.name,
"description": tool.description,
"input_schema": LLMAgentInput.schema(),
"function": tool.run,
}