Spaces:
Sleeping
Sleeping
| #!/usr/bin/env python3 | |
| """ | |
| Modal Integration Tests for Surf Spot Finder MCP Server | |
| This module provides comprehensive testing for Modal serverless deployment | |
| integration, including direct endpoint tests, MCP client integration, | |
| local fallback behavior, and Gradio UI compatibility. | |
| Tests included: | |
| - Direct Modal endpoint health checks and API calls | |
| - MCP client integration with Modal backend | |
| - Local fallback when Modal is unavailable | |
| - End-to-end Gradio UI integration | |
| Usage: | |
| python -m pytest mcp_server/tests/test_modal_integration.py | |
| # OR | |
| python mcp_server/tests/test_modal_integration.py | |
| """ | |
| import os | |
| import sys | |
| import json | |
| from typing import Dict, Any, Tuple, Optional | |
| # Add project paths for imports | |
| project_root = os.path.dirname(os.path.dirname(os.path.dirname(__file__))) | |
| sys.path.insert(0, os.path.join(project_root, 'hf_space')) | |
| sys.path.insert(0, project_root) | |
| def test_modal_direct_endpoints() -> bool: | |
| """ | |
| Test Modal endpoints directly via HTTP requests. | |
| This function performs direct HTTP calls to the deployed Modal endpoints | |
| to verify they are accessible and returning expected responses. | |
| Tests performed: | |
| 1. Health check endpoint GET request | |
| 2. Surf spots API POST request with sample payload | |
| Returns: | |
| bool: True if all endpoint tests pass, False otherwise | |
| Raises: | |
| requests.RequestException: If HTTP requests fail | |
| json.JSONDecodeError: If response parsing fails | |
| """ | |
| print("π Testing Modal Endpoints Directly") | |
| print("=" * 50) | |
| import requests | |
| base_url = "https://mcp-model-labs--surf-spot-finder-mcp" | |
| # Test 1: Health check | |
| print("π©Ί Testing health check...") | |
| try: | |
| response = requests.get(f"{base_url}-health-check.modal.run", timeout=10) | |
| if response.status_code == 200: | |
| data = response.json() | |
| print(f" β Health: {data['status']}") | |
| print(f" π Message: {data['message']}") | |
| else: | |
| print(f" β Health check failed: {response.status_code}") | |
| return False | |
| except Exception as e: | |
| print(f" β Health check error: {e}") | |
| return False | |
| # Test 2: Surf spots API | |
| print("\nπββοΈ Testing surf spots API...") | |
| try: | |
| payload = { | |
| "location": "MΓ‘laga, Spain", | |
| "max_distance": 50, | |
| "num_spots": 3, | |
| "preferences": { | |
| "skill_level": "intermediate", | |
| "board_type": "shortboard" | |
| } | |
| } | |
| response = requests.post( | |
| f"{base_url}-api-find-spots.modal.run", | |
| json=payload, | |
| headers={"Content-Type": "application/json"}, | |
| timeout=30 | |
| ) | |
| if response.status_code == 200: | |
| data = response.json() | |
| print(f" β API Success: {data['ok']}") | |
| if data['ok']: | |
| spots = data.get('spots', []) | |
| print(f" π Found {len(spots)} spots") | |
| for i, spot in enumerate(spots, 1): | |
| print(f" {i}. {spot['name']}: {spot['score']}/100") | |
| # Check for AI reasoning | |
| reasoning = data.get('ai_reasoning', '') | |
| if reasoning: | |
| print(f" π€ AI reasoning: {len(reasoning)} characters") | |
| print(f" π Preview: {reasoning[:100]}...") | |
| return True | |
| else: | |
| print(f" β API returned error: {data.get('error')}") | |
| return False | |
| else: | |
| print(f" β HTTP error: {response.status_code}") | |
| print(f" π Response: {response.text[:200]}...") | |
| return False | |
| except Exception as e: | |
| print(f" β API test error: {e}") | |
| return False | |
| def test_modal_via_client() -> bool: | |
| """ | |
| Test Modal integration via the MCP client abstraction. | |
| This function tests the MCP client's ability to communicate with | |
| the Modal backend, verifying that the client correctly routes | |
| requests through Modal when MODAL_URL is configured. | |
| Tests performed: | |
| 1. Configure MODAL_URL environment variable | |
| 2. Import and use MCP client find_best_spots function | |
| 3. Verify response format and content quality | |
| 4. Check AI reasoning and summary generation | |
| Returns: | |
| bool: True if client integration works, False otherwise | |
| Environment Variables: | |
| MODAL_URL: Set to Modal API endpoint for testing | |
| Raises: | |
| ImportError: If MCP client modules cannot be imported | |
| Exception: If client execution fails | |
| """ | |
| print("\nπ Testing Modal via MCP Client") | |
| print("=" * 40) | |
| # Set Modal URL environment variable | |
| os.environ['MODAL_URL'] = 'https://mcp-model-labs--surf-spot-finder-mcp-api-find-spots.modal.run' | |
| try: | |
| from mcp_client import find_best_spots | |
| print("π Testing client with Modal backend...") | |
| result = find_best_spots( | |
| user_location="Tarifa, Spain", | |
| max_distance_km=75, | |
| top_n=2, | |
| prefs={ | |
| "skill_level": "advanced", | |
| "board_type": "shortboard" | |
| } | |
| ) | |
| print(f" β Client success: {result['ok']}") | |
| if result['ok']: | |
| spots = result.get('results', []) | |
| print(f" πββοΈ Found {len(spots)} spots via client") | |
| for spot in spots: | |
| print(f" β’ {spot.get('name', 'Unknown')}: {spot.get('score', 0)}/100") | |
| # Test AI summary | |
| summary = result.get('ai_summary', '') | |
| if summary: | |
| print(f" π AI Summary: {summary[:100]}...") | |
| # Test AI reasoning | |
| reasoning = result.get('ai_reasoning', '') | |
| if reasoning and len(reasoning) > 100: | |
| print(f" π§ AI Reasoning: {len(reasoning)} characters") | |
| return True | |
| else: | |
| print(f" β Client error: {result.get('error')}") | |
| return False | |
| except Exception as e: | |
| print(f" β Client test error: {e}") | |
| import traceback | |
| print(f" π Traceback: {traceback.format_exc()}") | |
| return False | |
| def test_local_fallback() -> bool: | |
| """ | |
| Test local fallback functionality when Modal is unavailable. | |
| This function verifies that the system gracefully falls back to | |
| local processing when Modal deployment is not configured or | |
| accessible, ensuring system reliability. | |
| Tests performed: | |
| 1. Remove MODAL_URL environment variable | |
| 2. Attempt surf spot finding with local backend | |
| 3. Verify local processing works correctly | |
| 4. Check response format consistency | |
| Returns: | |
| bool: True if local fallback works, False otherwise | |
| Note: | |
| This test modifies environment variables and should restore | |
| them if needed for subsequent tests. | |
| """ | |
| print("\nπ Testing Local Fallback") | |
| print("=" * 30) | |
| # Remove Modal URL to test local mode | |
| if 'MODAL_URL' in os.environ: | |
| del os.environ['MODAL_URL'] | |
| try: | |
| from mcp_client import find_best_spots | |
| print("π Testing local fallback mode...") | |
| result = find_best_spots( | |
| user_location="MΓ‘laga, Spain", | |
| max_distance_km=50, | |
| top_n=2, | |
| prefs={"skill_level": "beginner"} | |
| ) | |
| print(f" β Local fallback: {result['ok']}") | |
| if result['ok']: | |
| spots = result.get('results', []) | |
| print(f" πββοΈ Local spots found: {len(spots)}") | |
| for spot in spots: | |
| name = spot.get('name', 'Unknown') | |
| score = spot.get('score', 0) | |
| print(f" β’ {name}: {score}/100") | |
| return True | |
| else: | |
| print(f" β Local fallback failed: {result.get('error')}") | |
| return False | |
| except Exception as e: | |
| print(f" β Local fallback error: {e}") | |
| return False | |
| def test_gradio_integration() -> bool: | |
| """ | |
| Test end-to-end Gradio application integration with Modal. | |
| This function tests the actual Gradio UI function that users | |
| interact with, ensuring it properly integrates with the Modal | |
| backend and returns formatted HTML responses. | |
| Tests performed: | |
| 1. Configure Modal URL for testing | |
| 2. Import and execute Gradio surf finder function | |
| 3. Verify HTML output generation | |
| 4. Check for Modal-specific content in responses | |
| Returns: | |
| bool: True if Gradio integration works, False otherwise | |
| Note: | |
| This test directly calls the Gradio app functions and | |
| verifies the UI layer integration. | |
| """ | |
| print("\nπ¨ Testing Gradio App Integration") | |
| print("=" * 40) | |
| # Set Modal URL back | |
| os.environ['MODAL_URL'] = 'https://mcp-model-labs--surf-spot-finder-mcp-api-find-spots.modal.run' | |
| try: | |
| sys.path.append('hf_space') | |
| from app import run_surf_finder | |
| print("π Testing Gradio surf finder function...") | |
| # Test the actual Gradio function | |
| results = run_surf_finder( | |
| location="MΓ‘laga, Spain", | |
| max_distance=50, | |
| num_spots=3, | |
| skill_level="Intermediate", | |
| board_type="Shortboard" | |
| ) | |
| # results should be (spots_html, ai_reasoning_html, accordion_update) | |
| if len(results) >= 2: | |
| spots_html = results[0] | |
| reasoning_html = results[1] | |
| print(" β Gradio function executed successfully") | |
| # Check if we got HTML content | |
| if spots_html and len(spots_html) > 100: | |
| print(f" π Spots HTML: {len(spots_html)} characters") | |
| if reasoning_html and len(reasoning_html) > 100: | |
| print(f" π§ Reasoning HTML: {len(reasoning_html)} characters") | |
| # Check for Modal-specific content | |
| if "Modal Deployment" in reasoning_html: | |
| print(" π― Modal deployment content detected!") | |
| return True | |
| else: | |
| print(f" β Unexpected Gradio result format: {results}") | |
| return False | |
| except Exception as e: | |
| print(f" β Gradio integration error: {e}") | |
| import traceback | |
| print(f" π Traceback: {traceback.format_exc()}") | |
| return False | |
| def main() -> bool: | |
| """ | |
| Execute the complete Modal integration test suite. | |
| This function runs all Modal integration tests in sequence and | |
| provides a comprehensive report of the results. It's designed to | |
| verify that the entire Modal deployment pipeline is working correctly. | |
| Test Suite Includes: | |
| 1. Direct Modal endpoint connectivity tests | |
| 2. MCP client integration with Modal backend | |
| 3. Local fallback behavior verification | |
| 4. End-to-end Gradio UI integration testing | |
| Returns: | |
| bool: True if all tests pass, False if any test fails | |
| Exit Codes: | |
| 0: All tests passed successfully | |
| 1: One or more tests failed | |
| Example: | |
| >>> success = main() | |
| >>> print(f"Tests {'passed' if success else 'failed'}") | |
| """ | |
| print("πββοΈ Surf Spot Finder - Modal Integration Tests") | |
| print("=" * 60) | |
| results = {} | |
| # Test 1: Direct Modal endpoints | |
| results['modal_direct'] = test_modal_direct_endpoints() | |
| # Test 2: Modal via MCP client | |
| results['modal_client'] = test_modal_via_client() | |
| # Test 3: Local fallback | |
| results['local_fallback'] = test_local_fallback() | |
| # Test 4: Gradio integration | |
| results['gradio_integration'] = test_gradio_integration() | |
| # Summary | |
| print("\n" + "=" * 60) | |
| print("π MODAL INTEGRATION TEST RESULTS:") | |
| print("=" * 60) | |
| for test_name, success in results.items(): | |
| status = "β PASS" if success else "β FAIL" | |
| print(f" {test_name.replace('_', ' ').title()}: {status}") | |
| passed = sum(results.values()) | |
| total = len(results) | |
| print(f"\nπ Results: {passed}/{total} tests passed") | |
| if passed == total: | |
| print("\nπ ALL TESTS PASSED!") | |
| print("π Your Modal deployment is ready for hackathon submission!") | |
| print("\nπ Modal Dashboard: https://modal.com/apps/mcp-model-labs") | |
| print("π Health Check: https://mcp-model-labs--surf-spot-finder-mcp-health-check.modal.run") | |
| else: | |
| print(f"\nβ οΈ {total - passed} test(s) failed - check configuration") | |
| if not results.get('modal_direct', False): | |
| print("π‘ Tip: Modal endpoints might be cold-starting. Try again in 30s.") | |
| print("=" * 60) | |
| return passed == total | |
| if __name__ == "__main__": | |
| success = main() | |
| sys.exit(0 if success else 1) |