File size: 14,633 Bytes
7b6b271
23a9367
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7b6b271
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
23a9367
 
 
 
 
 
 
 
7b6b271
 
 
 
 
 
 
23a9367
 
 
 
 
 
 
 
 
7b6b271
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
"""
LLM Agent Tool for Intelligent Surf Spot Analysis.

This module provides AI-powered reasoning and natural language generation
for surf spot recommendations. It demonstrates autonomous agent behavior
by analyzing surf conditions and generating human-like explanations.

The agent supports multiple LLM providers with intelligent fallbacks:
    1. OpenAI GPT-4 (primary)
    2. Anthropic Claude (secondary) 
    3. OpenRouter (multi-model access)
    4. Rule-based reasoning (always available)

Key capabilities:
    - Autonomous analysis of surf conditions
    - Natural language explanation generation
    - Safety-focused recommendations based on skill level
    - Multi-factor reasoning about wave, wind, and swell
    - Contextual advice for different surf scenarios

Example:
    >>> agent = SurfLLMAgent()
    >>> input_data = LLMAgentInput(
    ...     user_location="Tarifa, Spain",
    ...     user_preferences={"skill_level": "beginner"},
    ...     surf_spots=evaluated_spots
    ... )
    >>> result = await agent.run(input_data)
    >>> print(result.reasoning)  # AI-generated explanation

Author: Surf Spot Finder Team
License: MIT
"""

import os
import json
from typing import Dict, Any, List, Optional
from pydantic import BaseModel, Field
import httpx
import logging

# Load environment variables
try:
    from dotenv import load_dotenv
    load_dotenv()
except ImportError:
    pass

logger = logging.getLogger(__name__)


class LLMAgentInput(BaseModel):
    """Input schema for the LLM agent tool.
    
    Attributes:
        user_location: User's location for contextual recommendations.
        user_preferences: Dict with skill_level, board_type, etc.
        surf_spots: List of evaluated spots with scores and conditions.
        reasoning_task: Type of analysis (default: "recommendation").
    """
    user_location: str = Field(description="User's location")
    user_preferences: Dict[str, Any] = Field(description="User surfing preferences")
    surf_spots: List[Dict[str, Any]] = Field(description="Evaluated surf spots with scores")
    reasoning_task: str = Field(default="recommendation", description="Type of reasoning task")


class LLMAgentOutput(BaseModel):
    """Output schema for LLM agent results.
    
    Attributes:
        success: Whether AI analysis completed successfully.
        summary: Brief recommendation summary (1-2 sentences).
        reasoning: Detailed AI analysis with explanations.
        recommendations: List of specific actionable advice.
        error: Error message if analysis failed.
    """
    success: bool
    summary: str = ""
    reasoning: str = ""
    recommendations: List[str] = []
    error: str = ""


class SurfLLMAgent:
    """
    LLM-powered agent for surf spot recommendations and reasoning
    Demonstrates autonomous decision making and natural language understanding
    """
    
    name = "surf_llm_agent"
    description = "AI agent that provides intelligent surf spot recommendations with reasoning"
    
    def __init__(self):
        # Support multiple LLM providers
        self.openai_api_key = os.getenv("OPENAI_API_KEY")
        self.anthropic_api_key = os.getenv("ANTHROPIC_API_KEY")
        self.openrouter_api_key = os.getenv("OPENROUTER_API_KEY")
    
    def _get_available_provider(self):
        """Determine which LLM provider is available"""
        if self.openai_api_key:
            return "openai"
        elif self.anthropic_api_key:
            return "anthropic"
        elif self.openrouter_api_key:
            return "openrouter"
        else:
            return None
    
    async def _call_openai(self, messages: List[Dict[str, str]]) -> str:
        """Call OpenAI API"""
        async with httpx.AsyncClient() as client:
            response = await client.post(
                "https://api.openai.com/v1/chat/completions",
                headers={
                    "Authorization": f"Bearer {self.openai_api_key}",
                    "Content-Type": "application/json"
                },
                json={
                    "model": "gpt-4o-mini",
                    "messages": messages,
                    "max_tokens": 800,
                    "temperature": 0.7
                },
                timeout=30.0
            )
            response.raise_for_status()
            return response.json()["choices"][0]["message"]["content"]
    
    async def _call_anthropic(self, messages: List[Dict[str, str]]) -> str:
        """Call Anthropic API"""
        # Convert messages to Anthropic format
        system_msg = ""
        user_msgs = []
        
        for msg in messages:
            if msg["role"] == "system":
                system_msg = msg["content"]
            else:
                user_msgs.append(msg)
        
        async with httpx.AsyncClient() as client:
            response = await client.post(
                "https://api.anthropic.com/v1/messages",
                headers={
                    "x-api-key": self.anthropic_api_key,
                    "Content-Type": "application/json",
                    "anthropic-version": "2023-06-01"
                },
                json={
                    "model": "claude-3-haiku-20240307",
                    "max_tokens": 800,
                    "system": system_msg,
                    "messages": user_msgs
                },
                timeout=30.0
            )
            response.raise_for_status()
            return response.json()["content"][0]["text"]
    
    async def _call_openrouter(self, messages: List[Dict[str, str]]) -> str:
        """Call OpenRouter API (multiple models)"""
        async with httpx.AsyncClient() as client:
            response = await client.post(
                "https://openrouter.ai/api/v1/chat/completions",
                headers={
                    "Authorization": f"Bearer {self.openrouter_api_key}",
                    "Content-Type": "application/json"
                },
                json={
                    "model": "meta-llama/llama-3.1-8b-instruct:free",
                    "messages": messages,
                    "max_tokens": 800,
                    "temperature": 0.7
                },
                timeout=30.0
            )
            response.raise_for_status()
            return response.json()["choices"][0]["message"]["content"]
    
    def _build_surf_analysis_prompt(self, input_data: LLMAgentInput) -> List[Dict[str, str]]:
        """Build the prompt for surf spot analysis"""
        
        # Format spot data for the LLM
        spots_text = ""
        for i, spot in enumerate(input_data.surf_spots, 1):
            spots_text += f"\n{i}. **{spot['name']}** ({spot['location']})\n"
            spots_text += f"   - Score: {spot['score']}/100\n"
            spots_text += f"   - Distance: {spot['distance_km']}km\n"
            spots_text += f"   - Break Type: {spot['characteristics']['break_type']}\n"
            spots_text += f"   - Skill Levels: {', '.join(spot['characteristics']['skill_level'])}\n"
            spots_text += f"   - Current Conditions: {spot['conditions'].get('wave_height', 'N/A')}m waves, {spot['conditions'].get('wind_speed', 'N/A')}kt wind\n"
            spots_text += f"   - Analysis: {spot['explanation'][:200]}...\n"
        
        # Format user preferences
        prefs_text = json.dumps(input_data.user_preferences, indent=2) if input_data.user_preferences else "No specific preferences"
        
        system_prompt = """You are an expert surf forecaster and local guide AI agent with deep knowledge of surf conditions, wave physics, and surfing culture. Your role is to:

1. **Analyze** surf conditions autonomously using your expertise
2. **Reason** about which spots are best for the user's specific situation  
3. **Recommend** spots with clear explanations of your decision-making process
4. **Provide** practical insights about timing, equipment, and local knowledge

Think like a local surf guide who:
- Understands how weather patterns affect waves
- Knows the personality of each surf spot
- Considers safety based on skill level  
- Provides actionable advice for the session

Be conversational but expert, enthusiastic but realistic about conditions."""

        user_prompt = f"""Please analyze these surf spots for a surfer in {input_data.user_location} and provide your expert recommendations:

**User Preferences:**
{prefs_text}

**Available Surf Spots:**
{spots_text}

**Your Task:**
As an expert surf agent, please:

1. **Analyze** the current conditions and scores autonomously
2. **Reason** about which spots match the user's needs best
3. **Recommend** the top 2-3 spots with clear explanations
4. **Provide** practical session advice (timing, equipment, safety)

Format your response as:

**🎯 SURF FORECAST ANALYSIS**
[Your autonomous analysis of conditions and patterns]

**πŸ„β€β™‚οΈ TOP RECOMMENDATIONS**  
[Your reasoned recommendations with explanations]

**πŸ’‘ SESSION ADVICE**
[Practical tips for the surfing session]

**🌊 LOCAL INSIGHTS**
[Additional context about conditions, crowds, access, etc.]

Be specific about WHY you're recommending each spot based on the data and your surf expertise."""

        return [
            {"role": "system", "content": system_prompt},
            {"role": "user", "content": user_prompt}
        ]
    
    def _parse_llm_response(self, response: str) -> Dict[str, str]:
        """Parse structured LLM response into components"""
        sections = {
            "analysis": "",
            "recommendations": "",
            "advice": "",
            "insights": ""
        }
        
        current_section = "analysis"
        lines = response.split('\n')
        
        for line in lines:
            line = line.strip()
            
            if "🎯" in line or "ANALYSIS" in line.upper():
                current_section = "analysis"
            elif "πŸ„β€β™‚οΈ" in line or "RECOMMENDATION" in line.upper():
                current_section = "recommendations"
            elif "πŸ’‘" in line or "ADVICE" in line.upper():
                current_section = "advice"
            elif "🌊" in line or "INSIGHTS" in line.upper():
                current_section = "insights"
            elif line and not line.startswith('**'):
                sections[current_section] += line + " "
        
        return {k: v.strip() for k, v in sections.items()}
    
    async def run(self, input_data: LLMAgentInput) -> LLMAgentOutput:
        """Execute the LLM agent reasoning"""
        try:
            provider = self._get_available_provider()
            
            if not provider:
                # Use fallback analysis instead of failing
                logger.info("No LLM provider available, using fallback analysis")
                fallback_summary = self._generate_fallback_summary(input_data)
                
                return LLMAgentOutput(
                    success=True,
                    summary=fallback_summary,
                    reasoning="Analysis completed using rule-based evaluation (no LLM API key configured)",
                    recommendations=[f"{s['name']}: {s['score']}/100" for s in input_data.surf_spots[:3]]
                )
            
            if not input_data.surf_spots:
                return LLMAgentOutput(
                    success=False,
                    error="No surf spots provided for analysis"
                )
            
            # Build the analysis prompt
            messages = self._build_surf_analysis_prompt(input_data)
            
            # Call the appropriate LLM
            logger.info(f"Calling {provider} for surf analysis")
            
            if provider == "openai":
                response = await self._call_openai(messages)
            elif provider == "anthropic":
                response = await self._call_anthropic(messages)
            elif provider == "openrouter":
                response = await self._call_openrouter(messages)
            
            # Parse the response
            parsed = self._parse_llm_response(response)
            
            # Extract recommendations as list
            recommendations = []
            if parsed["recommendations"]:
                # Simple extraction of spot names mentioned in recommendations
                for spot in input_data.surf_spots[:3]:  # Top 3 spots
                    if spot["name"].lower() in parsed["recommendations"].lower():
                        recommendations.append(f"{spot['name']}: {spot['score']}/100")
            
            return LLMAgentOutput(
                success=True,
                summary=f"{parsed['analysis'][:200]}..." if parsed['analysis'] else "Analysis completed",
                reasoning=response,  # Full LLM response
                recommendations=recommendations
            )
            
        except Exception as e:
            logger.error(f"LLM Agent error: {e}")
            
            # Fallback to rule-based recommendation
            fallback_summary = self._generate_fallback_summary(input_data)
            
            return LLMAgentOutput(
                success=True,  # Still successful with fallback
                summary=fallback_summary,
                reasoning="Using fallback analysis due to LLM unavailability",
                recommendations=[f"{s['name']}: {s['score']}/100" for s in input_data.surf_spots[:3]]
            )
    
    def _generate_fallback_summary(self, input_data: LLMAgentInput) -> str:
        """Generate a rule-based summary when LLM is unavailable"""
        if not input_data.surf_spots:
            return "No surf spots found in your area."
        
        best_spot = input_data.surf_spots[0]
        total_spots = len(input_data.surf_spots)
        
        skill_level = input_data.user_preferences.get('skill_level', 'intermediate')
        
        return f"""Surf analysis for {input_data.user_location}: Found {total_spots} spots within range. 
        Top recommendation is {best_spot['name']} with a score of {best_spot['score']}/100, 
        located {best_spot['distance_km']}km away. Current conditions show {best_spot['conditions'].get('wave_height', 'unknown')}m waves. 
        This spot is suitable for {skill_level} surfers and offers {best_spot['characteristics']['break_type']} waves."""


def create_llm_agent_tool():
    """Factory function to create the LLM agent tool"""
    tool = SurfLLMAgent()
    return {
        "name": tool.name,
        "description": tool.description,
        "input_schema": LLMAgentInput.schema(),
        "function": tool.run,
    }