File size: 7,623 Bytes
7b6b271
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
#!/usr/bin/env python3
"""
Test OpenAI API key functionality
"""

import os
import sys
import asyncio
from typing import Dict, Any

# Load environment variables first
try:
    from dotenv import load_dotenv
    load_dotenv()
except ImportError:
    pass

# Add parent directory to path
sys.path.insert(0, os.path.dirname(os.path.dirname(__file__)))

async def test_openai_api_key():
    """Test if OpenAI API key works with our LLM agent"""
    print("πŸ”‘ Testing OpenAI API Key...")
    
    try:
        from tools.llm_agent_tool import SurfLLMAgent, LLMAgentInput
        
        # Check if API key is set
        api_key = os.getenv("OPENAI_API_KEY")
        if not api_key:
            print("   ❌ No OPENAI_API_KEY found in environment")
            return False
        
        print(f"   πŸ” API Key found: {api_key[:10]}...{api_key[-4:]}")
        
        # Create agent
        agent = SurfLLMAgent()
        provider = agent._get_available_provider()
        print(f"   πŸ€– Selected provider: {provider}")
        
        if provider != "openai":
            print(f"   ⚠️  OpenAI not selected as provider (using {provider})")
            return False
        
        # Create sample surf data for testing
        sample_spots = [
            {
                "id": "test_spot",
                "name": "Tarifa",
                "location": "Spain, Andalusia",
                "distance_km": 15.2,
                "score": 85.5,
                "characteristics": {
                    "break_type": "beach_break",
                    "skill_level": ["beginner", "intermediate", "advanced"]
                },
                "conditions": {
                    "wave_height": 1.5,
                    "wind_speed": 12,
                    "wind_direction": 135,
                    "swell_direction": 225
                },
                "explanation": "Excellent conditions with good wave height and favorable wind direction"
            }
        ]
        
        # Test LLM agent
        test_input = LLMAgentInput(
            user_location="MΓ‘laga, Spain",
            user_preferences={"skill_level": "intermediate", "board_type": "shortboard"},
            surf_spots=sample_spots
        )
        
        print("   🧠 Testing LLM reasoning...")
        result = await agent.run(test_input)
        
        if result.success:
            print("   βœ… LLM API call successful!")
            print(f"   πŸ“ Summary: {result.summary[:100]}...")
            
            # Check if we got real LLM reasoning (not fallback)
            if "fallback" not in result.reasoning.lower() and "no llm api key" not in result.reasoning.lower():
                print("   🎯 Real LLM reasoning detected!")
                print(f"   πŸ“Š Reasoning length: {len(result.reasoning)} characters")
                
                # Show a snippet of the reasoning
                reasoning_preview = result.reasoning[:200] + "..." if len(result.reasoning) > 200 else result.reasoning
                print(f"   πŸ’­ Reasoning preview: {reasoning_preview}")
                
                return True
            else:
                print("   ⚠️  Fallback reasoning used (API may not be working)")
                return False
        else:
            print(f"   ❌ LLM agent failed: {result.error}")
            return False
            
    except Exception as e:
        print(f"   πŸ’₯ Exception during API test: {e}")
        return False

async def test_direct_openai_call():
    """Test a direct OpenAI API call to isolate issues"""
    print("\nπŸ”§ Testing Direct OpenAI API Call...")
    
    try:
        import httpx
        
        api_key = os.getenv("OPENAI_API_KEY")
        if not api_key:
            print("   ❌ No API key available")
            return False
        
        async with httpx.AsyncClient() as client:
            response = await client.post(
                "https://api.openai.com/v1/chat/completions",
                headers={
                    "Authorization": f"Bearer {api_key}",
                    "Content-Type": "application/json"
                },
                json={
                    "model": "gpt-4o-mini",
                    "messages": [
                        {"role": "system", "content": "You are a surf expert."},
                        {"role": "user", "content": "Give me one sentence about surfing in Tarifa, Spain."}
                    ],
                    "max_tokens": 100
                },
                timeout=30.0
            )
            
            if response.status_code == 200:
                data = response.json()
                content = data["choices"][0]["message"]["content"]
                print(f"   βœ… Direct API call successful!")
                print(f"   πŸ“ Response: {content}")
                return True
            else:
                print(f"   ❌ API call failed: {response.status_code}")
                print(f"   πŸ“„ Response: {response.text}")
                return False
                
    except Exception as e:
        print(f"   πŸ’₯ Direct API call exception: {e}")
        return False

def check_environment():
    """Check environment setup"""
    print("🌍 Checking Environment Setup...")
    
    # Check for .env file
    env_path = os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(__file__))), '.env')
    if os.path.exists(env_path):
        print(f"   βœ… .env file found at: {env_path}")
    else:
        print(f"   ⚠️  No .env file found at: {env_path}")
    
    # Check environment variables
    api_key = os.getenv("OPENAI_API_KEY")
    if api_key:
        print(f"   βœ… OPENAI_API_KEY is set ({len(api_key)} characters)")
        if api_key.startswith("sk-"):
            print("   βœ… API key format looks correct (starts with 'sk-')")
        else:
            print("   ⚠️  API key doesn't start with 'sk-' - might be invalid format")
    else:
        print("   ❌ OPENAI_API_KEY not found in environment")
    
    return bool(api_key)

async def main():
    """Run all API tests"""
    print("πŸš€ OpenAI API Key Testing")
    print("=" * 50)
    
    # Check environment
    env_ok = check_environment()
    
    if not env_ok:
        print("\n❌ Environment not properly configured")
        print("πŸ’‘ To enable OpenAI API:")
        print("   1. Create a .env file in project root")
        print("   2. Add: OPENAI_API_KEY=sk-your-key-here")
        print("   3. Get API key from: https://platform.openai.com/api-keys")
        return False
    
    # Test direct API call
    direct_ok = await test_direct_openai_call()
    
    # Test LLM agent integration
    agent_ok = await test_openai_api_key()
    
    print("\n" + "=" * 50)
    print("πŸ“Š API Test Results:")
    print(f"   Environment Setup: {'βœ…' if env_ok else '❌'}")
    print(f"   Direct API Call: {'βœ…' if direct_ok else '❌'}")
    print(f"   LLM Agent Integration: {'βœ…' if agent_ok else '❌'}")
    
    if env_ok and direct_ok and agent_ok:
        print("\nπŸŽ‰ OpenAI API is working perfectly!")
        print("πŸ€– Your surf agent will use advanced LLM reasoning")
    elif env_ok and direct_ok:
        print("\nπŸ”§ OpenAI API works, but there's an integration issue")
    elif env_ok:
        print("\n⚠️  API key found but not working - check key validity")
    else:
        print("\n❌ OpenAI API not properly configured")
        print("πŸ”„ The app will use intelligent fallback reasoning instead")
    
    return env_ok and direct_ok and agent_ok

if __name__ == "__main__":
    asyncio.run(main())