D3MI4N's picture
Production ready app version
7b6b271
#!/usr/bin/env python3
"""
Test OpenAI API key functionality
"""
import os
import sys
import asyncio
from typing import Dict, Any
# Load environment variables first
try:
from dotenv import load_dotenv
load_dotenv()
except ImportError:
pass
# Add parent directory to path
sys.path.insert(0, os.path.dirname(os.path.dirname(__file__)))
async def test_openai_api_key():
"""Test if OpenAI API key works with our LLM agent"""
print("πŸ”‘ Testing OpenAI API Key...")
try:
from tools.llm_agent_tool import SurfLLMAgent, LLMAgentInput
# Check if API key is set
api_key = os.getenv("OPENAI_API_KEY")
if not api_key:
print(" ❌ No OPENAI_API_KEY found in environment")
return False
print(f" πŸ” API Key found: {api_key[:10]}...{api_key[-4:]}")
# Create agent
agent = SurfLLMAgent()
provider = agent._get_available_provider()
print(f" πŸ€– Selected provider: {provider}")
if provider != "openai":
print(f" ⚠️ OpenAI not selected as provider (using {provider})")
return False
# Create sample surf data for testing
sample_spots = [
{
"id": "test_spot",
"name": "Tarifa",
"location": "Spain, Andalusia",
"distance_km": 15.2,
"score": 85.5,
"characteristics": {
"break_type": "beach_break",
"skill_level": ["beginner", "intermediate", "advanced"]
},
"conditions": {
"wave_height": 1.5,
"wind_speed": 12,
"wind_direction": 135,
"swell_direction": 225
},
"explanation": "Excellent conditions with good wave height and favorable wind direction"
}
]
# Test LLM agent
test_input = LLMAgentInput(
user_location="MΓ‘laga, Spain",
user_preferences={"skill_level": "intermediate", "board_type": "shortboard"},
surf_spots=sample_spots
)
print(" 🧠 Testing LLM reasoning...")
result = await agent.run(test_input)
if result.success:
print(" βœ… LLM API call successful!")
print(f" πŸ“ Summary: {result.summary[:100]}...")
# Check if we got real LLM reasoning (not fallback)
if "fallback" not in result.reasoning.lower() and "no llm api key" not in result.reasoning.lower():
print(" 🎯 Real LLM reasoning detected!")
print(f" πŸ“Š Reasoning length: {len(result.reasoning)} characters")
# Show a snippet of the reasoning
reasoning_preview = result.reasoning[:200] + "..." if len(result.reasoning) > 200 else result.reasoning
print(f" πŸ’­ Reasoning preview: {reasoning_preview}")
return True
else:
print(" ⚠️ Fallback reasoning used (API may not be working)")
return False
else:
print(f" ❌ LLM agent failed: {result.error}")
return False
except Exception as e:
print(f" πŸ’₯ Exception during API test: {e}")
return False
async def test_direct_openai_call():
"""Test a direct OpenAI API call to isolate issues"""
print("\nπŸ”§ Testing Direct OpenAI API Call...")
try:
import httpx
api_key = os.getenv("OPENAI_API_KEY")
if not api_key:
print(" ❌ No API key available")
return False
async with httpx.AsyncClient() as client:
response = await client.post(
"https://api.openai.com/v1/chat/completions",
headers={
"Authorization": f"Bearer {api_key}",
"Content-Type": "application/json"
},
json={
"model": "gpt-4o-mini",
"messages": [
{"role": "system", "content": "You are a surf expert."},
{"role": "user", "content": "Give me one sentence about surfing in Tarifa, Spain."}
],
"max_tokens": 100
},
timeout=30.0
)
if response.status_code == 200:
data = response.json()
content = data["choices"][0]["message"]["content"]
print(f" βœ… Direct API call successful!")
print(f" πŸ“ Response: {content}")
return True
else:
print(f" ❌ API call failed: {response.status_code}")
print(f" πŸ“„ Response: {response.text}")
return False
except Exception as e:
print(f" πŸ’₯ Direct API call exception: {e}")
return False
def check_environment():
"""Check environment setup"""
print("🌍 Checking Environment Setup...")
# Check for .env file
env_path = os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(__file__))), '.env')
if os.path.exists(env_path):
print(f" βœ… .env file found at: {env_path}")
else:
print(f" ⚠️ No .env file found at: {env_path}")
# Check environment variables
api_key = os.getenv("OPENAI_API_KEY")
if api_key:
print(f" βœ… OPENAI_API_KEY is set ({len(api_key)} characters)")
if api_key.startswith("sk-"):
print(" βœ… API key format looks correct (starts with 'sk-')")
else:
print(" ⚠️ API key doesn't start with 'sk-' - might be invalid format")
else:
print(" ❌ OPENAI_API_KEY not found in environment")
return bool(api_key)
async def main():
"""Run all API tests"""
print("πŸš€ OpenAI API Key Testing")
print("=" * 50)
# Check environment
env_ok = check_environment()
if not env_ok:
print("\n❌ Environment not properly configured")
print("πŸ’‘ To enable OpenAI API:")
print(" 1. Create a .env file in project root")
print(" 2. Add: OPENAI_API_KEY=sk-your-key-here")
print(" 3. Get API key from: https://platform.openai.com/api-keys")
return False
# Test direct API call
direct_ok = await test_direct_openai_call()
# Test LLM agent integration
agent_ok = await test_openai_api_key()
print("\n" + "=" * 50)
print("πŸ“Š API Test Results:")
print(f" Environment Setup: {'βœ…' if env_ok else '❌'}")
print(f" Direct API Call: {'βœ…' if direct_ok else '❌'}")
print(f" LLM Agent Integration: {'βœ…' if agent_ok else '❌'}")
if env_ok and direct_ok and agent_ok:
print("\nπŸŽ‰ OpenAI API is working perfectly!")
print("πŸ€– Your surf agent will use advanced LLM reasoning")
elif env_ok and direct_ok:
print("\nπŸ”§ OpenAI API works, but there's an integration issue")
elif env_ok:
print("\n⚠️ API key found but not working - check key validity")
else:
print("\n❌ OpenAI API not properly configured")
print("πŸ”„ The app will use intelligent fallback reasoning instead")
return env_ok and direct_ok and agent_ok
if __name__ == "__main__":
asyncio.run(main())