Spaces:
Running
Running
Upload folder using huggingface_hub
Browse files- .gitattributes +1 -0
- .hfignore +20 -0
- Dockerfile +104 -0
- README.md +89 -4
- api_app.py +78 -0
- check_secrets.py +35 -0
- initialize.py +166 -0
- knowledge/custom/TED Podcasts.pdf +3 -0
- preload.py +58 -0
- python/api/csrf_token.py +23 -0
- python/extensions/system_prompt/_10_system_prompt.py +43 -0
- python/helpers/csrf.py +28 -0
- python/helpers/mcp_server.py +433 -0
- python/helpers/searxng.py +36 -0
- python/helpers/settings.py +1602 -0
- python/tools/search_engine.py +81 -0
- requirements.txt +46 -0
- run_ui.py +306 -0
- searxng/settings.yml +0 -0
- start.sh +10 -0
- webui/index.html +0 -0
- webui/js/api.js +90 -0
- webui/js/index.js +1275 -0
.gitattributes
CHANGED
|
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
| 36 |
+
knowledge/custom/TED[[:space:]]Podcasts.pdf filter=lfs diff=lfs merge=lfs -text
|
.hfignore
ADDED
|
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
agent-zero-temp/
|
| 2 |
+
anthropic-skills-temp/
|
| 3 |
+
agent-zero-temp-2/
|
| 4 |
+
verification_output.txt
|
| 5 |
+
verify_integration.py
|
| 6 |
+
test_api.py
|
| 7 |
+
.git/
|
| 8 |
+
__pycache__/
|
| 9 |
+
*.pyc
|
| 10 |
+
.env
|
| 11 |
+
logs/
|
| 12 |
+
tmp/
|
| 13 |
+
usr/
|
| 14 |
+
knowledge/
|
| 15 |
+
python/
|
| 16 |
+
searxng/
|
| 17 |
+
webui/
|
| 18 |
+
node_modules/
|
| 19 |
+
.hfignore
|
| 20 |
+
deploy_to_hf.py
|
Dockerfile
ADDED
|
@@ -0,0 +1,104 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Use a specific Python 3.12 image for compatibility
|
| 2 |
+
FROM python:3.12-slim-bookworm
|
| 3 |
+
|
| 4 |
+
LABEL description="Dockerfile for Agent-Zero on Hugging Face Spaces"
|
| 5 |
+
|
| 6 |
+
# Avoid prompts during package installation
|
| 7 |
+
ENV DEBIAN_FRONTEND=noninteractive
|
| 8 |
+
|
| 9 |
+
# Install system dependencies, including openssl for key generation and rsync
|
| 10 |
+
RUN apt-get update && apt-get install -y \
|
| 11 |
+
build-essential \
|
| 12 |
+
gcc \
|
| 13 |
+
g++ \
|
| 14 |
+
gfortran \
|
| 15 |
+
libffi-dev \
|
| 16 |
+
libssl-dev \
|
| 17 |
+
libopenblas-dev \
|
| 18 |
+
pkg-config \
|
| 19 |
+
git \
|
| 20 |
+
curl \
|
| 21 |
+
openssl \
|
| 22 |
+
procps \
|
| 23 |
+
rsync \
|
| 24 |
+
&& rm -rf /var/lib/apt/lists/*
|
| 25 |
+
|
| 26 |
+
# Use the official Ollama installation script
|
| 27 |
+
RUN curl -fsSL https://ollama.com/install.sh | sh
|
| 28 |
+
|
| 29 |
+
# Set the working directory
|
| 30 |
+
WORKDIR /app
|
| 31 |
+
|
| 32 |
+
# Copy requirements.txt and install dependencies to leverage Docker cache
|
| 33 |
+
COPY requirements.txt .
|
| 34 |
+
RUN pip install --no-cache-dir --upgrade pip wheel setuptools
|
| 35 |
+
RUN pip install --no-cache-dir -r requirements.txt
|
| 36 |
+
|
| 37 |
+
# Clone the agent-zero repository.
|
| 38 |
+
RUN git clone --depth 1 https://github.com/agent0ai/agent-zero.git /agent-zero-base && \
|
| 39 |
+
rsync -av /agent-zero-base/ /app/ && \
|
| 40 |
+
rm -rf /agent-zero-base
|
| 41 |
+
|
| 42 |
+
# Clone the anthropics skills repository and integrate skills.
|
| 43 |
+
RUN git clone --depth 1 https://github.com/anthropics/skills.git /anthropic-skills && \
|
| 44 |
+
mkdir -p /app/usr/skills && \
|
| 45 |
+
cp -r /anthropic-skills/skills/* /app/usr/skills/ && \
|
| 46 |
+
rm -rf /anthropic-skills
|
| 47 |
+
|
| 48 |
+
# Copy the local files to overwrite the one from the repository
|
| 49 |
+
COPY run_ui.py /app/run_ui.py
|
| 50 |
+
COPY webui/js/api.js /app/webui/js/api.js
|
| 51 |
+
COPY webui/index.html /app/webui/index.html
|
| 52 |
+
COPY webui/js/index.js /app/webui/js/index.js
|
| 53 |
+
COPY preload.py /app/preload.py
|
| 54 |
+
COPY python/extensions/system_prompt/_10_system_prompt.py /app/python/extensions/system_prompt/_10_system_prompt.py
|
| 55 |
+
COPY python/helpers/searxng.py /app/python/helpers/searxng.py
|
| 56 |
+
COPY python/helpers/settings.py /app/python/helpers/settings.py
|
| 57 |
+
COPY python/helpers/csrf.py /app/python/helpers/csrf.py
|
| 58 |
+
COPY python/api/csrf_token.py /app/python/api/csrf_token.py
|
| 59 |
+
COPY start.sh /app/start.sh
|
| 60 |
+
COPY python/tools/search_engine.py /app/python/tools/search_engine.py
|
| 61 |
+
COPY initialize.py /app/initialize.py
|
| 62 |
+
|
| 63 |
+
# --- DEFINITIVE FIX: GENERATE KEY AT BUILD TIME ---
|
| 64 |
+
# This creates a permanent .env file with a stable key when the image is built.
|
| 65 |
+
# This eliminates all runtime race conditions and is the most reliable method.
|
| 66 |
+
RUN echo "FLASK_SECRET_KEY=$(openssl rand -hex 32)" > .env
|
| 67 |
+
|
| 68 |
+
# Declare build-time arguments (Hugging Face injects secrets here)
|
| 69 |
+
ARG JULES_API_KEY
|
| 70 |
+
ARG GITHUB_PERSONAL_ACCESS_TOKEN
|
| 71 |
+
ARG HUGGINGFACE_API_KEY
|
| 72 |
+
|
| 73 |
+
ENV JULES_API_KEY=$JULES_API_KEY
|
| 74 |
+
ENV GITHUB_PERSONAL_ACCESS_TOKEN=$GITHUB_PERSONAL_ACCESS_TOKEN
|
| 75 |
+
ENV HUGGINGFACE_API_KEY=$HUGGINGFACE_API_KEY
|
| 76 |
+
# Pre-download the required spaCy model during the build
|
| 77 |
+
|
| 78 |
+
|
| 79 |
+
# Manually create the 'ollama' group
|
| 80 |
+
RUN groupadd -r ollama
|
| 81 |
+
|
| 82 |
+
# Create a non-root user for security
|
| 83 |
+
RUN useradd --create-home --shell /bin/bash user
|
| 84 |
+
|
| 85 |
+
# Add the user to the 'ollama' group so it can use the service
|
| 86 |
+
RUN usermod -aG ollama user
|
| 87 |
+
|
| 88 |
+
# Grant the non-root user ownership of the application directory
|
| 89 |
+
RUN chown -R user:user /app
|
| 90 |
+
|
| 91 |
+
# Make start.sh executable
|
| 92 |
+
RUN chmod +x /app/start.sh
|
| 93 |
+
|
| 94 |
+
# Switch to the non-root user
|
| 95 |
+
USER user
|
| 96 |
+
|
| 97 |
+
# Set the final working directory
|
| 98 |
+
WORKDIR /app
|
| 99 |
+
|
| 100 |
+
# Expose the application port
|
| 101 |
+
EXPOSE 5000
|
| 102 |
+
|
| 103 |
+
# Command to start the services (now much simpler)
|
| 104 |
+
CMD ["/app/start.sh"]
|
README.md
CHANGED
|
@@ -1,10 +1,95 @@
|
|
| 1 |
---
|
| 2 |
-
title: Agent
|
| 3 |
-
emoji:
|
| 4 |
-
colorFrom:
|
| 5 |
colorTo: yellow
|
| 6 |
sdk: docker
|
|
|
|
| 7 |
pinned: false
|
|
|
|
| 8 |
---
|
| 9 |
|
| 10 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
---
|
| 2 |
+
title: Agent with Claude Skills
|
| 3 |
+
emoji: 😻
|
| 4 |
+
colorFrom: gray
|
| 5 |
colorTo: yellow
|
| 6 |
sdk: docker
|
| 7 |
+
app_port: 5000
|
| 8 |
pinned: false
|
| 9 |
+
short_description: A powerful and flexible agent framework with a web-based UI.
|
| 10 |
---
|
| 11 |
|
| 12 |
+
## 🚀 Agent with Claude Skills
|
| 13 |
+
|
| 14 |
+
This project provides a robust and extensible agent framework with a user-friendly web interface. It's designed to be highly configurable, allowing you to easily integrate with different large language models (LLMs), embedding models, and other services.
|
| 15 |
+
|
| 16 |
+
### ✨ Features
|
| 17 |
+
|
| 18 |
+
- **Web-based UI:** A clean and intuitive web interface for interacting with the agent.
|
| 19 |
+
- **Dockerized Environment:** The entire application is containerized with Docker, ensuring a consistent and reproducible environment.
|
| 20 |
+
- **Extensible API:** A flexible API that allows you to add new tools and capabilities to the agent.
|
| 21 |
+
- **Configurable Models:** Easily configure the chat, utility, and embedding models through a simple settings file.
|
| 22 |
+
- **Multi-agent Communication:** Supports multi-agent communication through the Multi-agent Communication Protocol (MCP).
|
| 23 |
+
- **Agent-to-Agent Communication:** Enables direct communication between agents using the Agent-to-Agent (A2A) protocol.
|
| 24 |
+
- **Authentication:** Secure your agent with basic authentication and API key protection.
|
| 25 |
+
- **CSRF Protection:** Built-in Cross-Site Request Forgery (CSRF) protection for enhanced security.
|
| 26 |
+
- **Dynamic Settings:** Modify the agent's settings on the fly without restarting the application.
|
| 27 |
+
|
| 28 |
+
### 🛠️ Getting Started
|
| 29 |
+
|
| 30 |
+
#### Prerequisites
|
| 31 |
+
|
| 32 |
+
- [Docker](https://docs.docker.com/get-docker/)
|
| 33 |
+
- [Docker Compose](https://docs.docker.com/compose/install/) (optional, for local development)
|
| 34 |
+
|
| 35 |
+
#### Running the Application
|
| 36 |
+
|
| 37 |
+
1. **Clone the repository:**
|
| 38 |
+
|
| 39 |
+
```bash
|
| 40 |
+
git clone https://huggingface.co/spaces/harvesthealth/agent-with-claude-skills
|
| 41 |
+
cd agent-with-claude-skills
|
| 42 |
+
```
|
| 43 |
+
|
| 44 |
+
2. **Build and run the Docker container:**
|
| 45 |
+
|
| 46 |
+
```bash
|
| 47 |
+
docker build -t agent-with-claude-skills .
|
| 48 |
+
docker run -p 5000:5000 agent-with-claude-skills
|
| 49 |
+
```
|
| 50 |
+
|
| 51 |
+
3. **Access the web UI:**
|
| 52 |
+
|
| 53 |
+
Open your web browser and navigate to `http://localhost:5000`.
|
| 54 |
+
|
| 55 |
+
#### Configuration
|
| 56 |
+
|
| 57 |
+
The application's settings are managed in the `python/helpers/settings.py` file. You can modify this file to change the default models, API keys, and other parameters.
|
| 58 |
+
|
| 59 |
+
**Important:** For production environments, it's highly recommended to use a `.env` file to manage your secrets and API keys.
|
| 60 |
+
|
| 61 |
+
### 🔧 Modifying the API Base URL
|
| 62 |
+
|
| 63 |
+
By default, the `chat_model_api_base` and `util_model_api_base` are hardcoded in `python/helpers/settings.py`. To make this configurable, you can modify the file to use an environment variable.
|
| 64 |
+
|
| 65 |
+
1. **Open `python/helpers/settings.py`:**
|
| 66 |
+
|
| 67 |
+
2. **Locate the `get_default_settings` function:**
|
| 68 |
+
|
| 69 |
+
3. **Replace the hardcoded URLs with a variable:**
|
| 70 |
+
|
| 71 |
+
```python
|
| 72 |
+
def get_default_settings() -> Settings:
|
| 73 |
+
api_base_url = os.getenv("API_BASE_URL", "https://api.helmholtz-blablador.fz-juelich.de/v1")
|
| 74 |
+
return Settings(
|
| 75 |
+
version=_get_version(),
|
| 76 |
+
chat_model_provider="Other OpenAI compatible",
|
| 77 |
+
chat_model_name="alias-large",
|
| 78 |
+
chat_model_api_base=api_base_url,
|
| 79 |
+
...
|
| 80 |
+
util_model_provider="Other OpenAI compatible",
|
| 81 |
+
util_model_name="alias-large",
|
| 82 |
+
util_model_api_base=api_base_url,
|
| 83 |
+
...
|
| 84 |
+
)
|
| 85 |
+
```
|
| 86 |
+
|
| 87 |
+
4. **Create a `.env` file in the root of the project:**
|
| 88 |
+
|
| 89 |
+
```
|
| 90 |
+
API_BASE_URL=https://your-api-base-url.com/v1
|
| 91 |
+
```
|
| 92 |
+
|
| 93 |
+
### 🤝 Contributing
|
| 94 |
+
|
| 95 |
+
Contributions are welcome! Please feel free to submit a pull request or open an issue if you have any suggestions or find any bugs.
|
api_app.py
ADDED
|
@@ -0,0 +1,78 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from fastapi import FastAPI, HTTPException
|
| 2 |
+
from pydantic import BaseModel
|
| 3 |
+
from typing import Optional, List
|
| 4 |
+
import os
|
| 5 |
+
import asyncio
|
| 6 |
+
from agent import AgentContext, AgentContextType, UserMessage
|
| 7 |
+
import initialize
|
| 8 |
+
from python.helpers import runtime, dotenv
|
| 9 |
+
from python.helpers.print_style import PrintStyle
|
| 10 |
+
|
| 11 |
+
app = FastAPI(title="Skilled-Agent API")
|
| 12 |
+
|
| 13 |
+
class ChatRequest(BaseModel):
|
| 14 |
+
message: str
|
| 15 |
+
chat_id: Optional[str] = None
|
| 16 |
+
attachments: Optional[List[str]] = None
|
| 17 |
+
|
| 18 |
+
class ChatResponse(BaseModel):
|
| 19 |
+
response: str
|
| 20 |
+
chat_id: str
|
| 21 |
+
|
| 22 |
+
@app.on_event("startup")
|
| 23 |
+
async def startup_event():
|
| 24 |
+
PrintStyle().print("Initializing Skilled-Agent API...")
|
| 25 |
+
runtime.initialize()
|
| 26 |
+
dotenv.load_dotenv()
|
| 27 |
+
|
| 28 |
+
# Run migrations if necessary
|
| 29 |
+
initialize.initialize_migration()
|
| 30 |
+
|
| 31 |
+
# Initialize chats
|
| 32 |
+
init_chats = initialize.initialize_chats()
|
| 33 |
+
init_chats.result_sync()
|
| 34 |
+
|
| 35 |
+
# Initialize MCP
|
| 36 |
+
initialize.initialize_mcp()
|
| 37 |
+
|
| 38 |
+
# Start job loop
|
| 39 |
+
initialize.initialize_job_loop()
|
| 40 |
+
|
| 41 |
+
# Preload
|
| 42 |
+
initialize.initialize_preload()
|
| 43 |
+
|
| 44 |
+
PrintStyle().print("Skilled-Agent API started.")
|
| 45 |
+
|
| 46 |
+
@app.post("/chat", response_model=ChatResponse)
|
| 47 |
+
async def chat(request: ChatRequest):
|
| 48 |
+
context = None
|
| 49 |
+
if request.chat_id:
|
| 50 |
+
context = AgentContext.get(request.chat_id)
|
| 51 |
+
if not context:
|
| 52 |
+
raise HTTPException(status_code=404, detail=f"Chat session {request.chat_id} not found")
|
| 53 |
+
else:
|
| 54 |
+
# Create new context
|
| 55 |
+
config = initialize.initialize_agent()
|
| 56 |
+
context = AgentContext(config=config, type=AgentContextType.BACKGROUND)
|
| 57 |
+
|
| 58 |
+
if not request.message:
|
| 59 |
+
raise HTTPException(status_code=400, detail="Message is required")
|
| 60 |
+
|
| 61 |
+
try:
|
| 62 |
+
PrintStyle().print(f"Processing message for chat {context.id}...")
|
| 63 |
+
task = context.communicate(
|
| 64 |
+
UserMessage(
|
| 65 |
+
message=request.message,
|
| 66 |
+
attachments=request.attachments or []
|
| 67 |
+
)
|
| 68 |
+
)
|
| 69 |
+
# Wait for the agent to finish and return the final message
|
| 70 |
+
result = await task.result()
|
| 71 |
+
return ChatResponse(response=result, chat_id=context.id)
|
| 72 |
+
except Exception as e:
|
| 73 |
+
PrintStyle().error(f"Error in chat: {e}")
|
| 74 |
+
raise HTTPException(status_code=500, detail=str(e))
|
| 75 |
+
|
| 76 |
+
@app.get("/health")
|
| 77 |
+
async def health():
|
| 78 |
+
return {"status": "healthy"}
|
check_secrets.py
ADDED
|
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
|
| 3 |
+
# List of secrets that are commonly used by Agent-Zero and integrated skills
|
| 4 |
+
COMMON_SECRETS = [
|
| 5 |
+
"ANTHROPIC_API_KEY",
|
| 6 |
+
"OPENAI_API_KEY",
|
| 7 |
+
"GITHUB_PERSONAL_ACCESS_TOKEN",
|
| 8 |
+
"HUGGINGFACE_API_KEY",
|
| 9 |
+
"GOOGLE_API_KEY",
|
| 10 |
+
"SERPAPI_API_KEY",
|
| 11 |
+
"SEARXNG_URL"
|
| 12 |
+
]
|
| 13 |
+
|
| 14 |
+
def check_secrets():
|
| 15 |
+
print("=== Skilled-Agent Secret Check ===")
|
| 16 |
+
missing = []
|
| 17 |
+
available = []
|
| 18 |
+
|
| 19 |
+
for secret in COMMON_SECRETS:
|
| 20 |
+
if os.getenv(secret):
|
| 21 |
+
available.append(secret)
|
| 22 |
+
else:
|
| 23 |
+
missing.append(secret)
|
| 24 |
+
|
| 25 |
+
if available:
|
| 26 |
+
print(f"INFO: Available secrets in environment: {', '.join(available)}")
|
| 27 |
+
|
| 28 |
+
if missing:
|
| 29 |
+
print(f"WARNING: The following secrets are NOT set: {', '.join(missing)}")
|
| 30 |
+
print("Please configure them in your Hugging Face Space Settings -> Secrets if your tasks require them.")
|
| 31 |
+
|
| 32 |
+
print("==================================")
|
| 33 |
+
|
| 34 |
+
if __name__ == "__main__":
|
| 35 |
+
check_secrets()
|
initialize.py
ADDED
|
@@ -0,0 +1,166 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from agent import AgentConfig
|
| 2 |
+
import models
|
| 3 |
+
from python.helpers import runtime, settings, defer
|
| 4 |
+
from python.helpers.print_style import PrintStyle
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
def initialize_agent():
|
| 8 |
+
current_settings = settings.get_settings()
|
| 9 |
+
|
| 10 |
+
def _normalize_model_kwargs(kwargs: dict) -> dict:
|
| 11 |
+
# convert string values that represent valid Python numbers to numeric types
|
| 12 |
+
result = {}
|
| 13 |
+
for key, value in kwargs.items():
|
| 14 |
+
if isinstance(value, str):
|
| 15 |
+
# try to convert string to number if it's a valid Python number
|
| 16 |
+
try:
|
| 17 |
+
# try int first, then float
|
| 18 |
+
result[key] = int(value)
|
| 19 |
+
except ValueError:
|
| 20 |
+
try:
|
| 21 |
+
result[key] = float(value)
|
| 22 |
+
except ValueError:
|
| 23 |
+
result[key] = value
|
| 24 |
+
else:
|
| 25 |
+
result[key] = value
|
| 26 |
+
return result
|
| 27 |
+
|
| 28 |
+
# chat model from user settings
|
| 29 |
+
chat_llm = models.ModelConfig(
|
| 30 |
+
type=models.ModelType.CHAT,
|
| 31 |
+
provider=current_settings["chat_model_provider"],
|
| 32 |
+
name=current_settings["chat_model_name"],
|
| 33 |
+
api_base=current_settings["chat_model_api_base"],
|
| 34 |
+
ctx_length=current_settings["chat_model_ctx_length"],
|
| 35 |
+
vision=current_settings["chat_model_vision"],
|
| 36 |
+
limit_requests=current_settings["chat_model_rl_requests"],
|
| 37 |
+
limit_input=current_settings["chat_model_rl_input"],
|
| 38 |
+
limit_output=current_settings["chat_model_rl_output"],
|
| 39 |
+
kwargs=_normalize_model_kwargs(current_settings["chat_model_kwargs"]),
|
| 40 |
+
)
|
| 41 |
+
|
| 42 |
+
# utility model from user settings
|
| 43 |
+
utility_llm = models.ModelConfig(
|
| 44 |
+
type=models.ModelType.CHAT,
|
| 45 |
+
provider=current_settings["util_model_provider"],
|
| 46 |
+
name=current_settings["util_model_name"],
|
| 47 |
+
api_base=current_settings["util_model_api_base"],
|
| 48 |
+
ctx_length=current_settings["util_model_ctx_length"],
|
| 49 |
+
limit_requests=current_settings["util_model_rl_requests"],
|
| 50 |
+
limit_input=current_settings["util_model_rl_input"],
|
| 51 |
+
limit_output=current_settings["util_model_rl_output"],
|
| 52 |
+
kwargs=_normalize_model_kwargs(current_settings["util_model_kwargs"]),
|
| 53 |
+
)
|
| 54 |
+
# embedding model from user settings
|
| 55 |
+
embedding_llm = models.ModelConfig(
|
| 56 |
+
type=models.ModelType.EMBEDDING,
|
| 57 |
+
provider=current_settings["embed_model_provider"],
|
| 58 |
+
name=current_settings["embed_model_name"],
|
| 59 |
+
api_base=current_settings["embed_model_api_base"],
|
| 60 |
+
limit_requests=current_settings["embed_model_rl_requests"],
|
| 61 |
+
kwargs=_normalize_model_kwargs(current_settings["embed_model_kwargs"]),
|
| 62 |
+
)
|
| 63 |
+
# browser model from user settings
|
| 64 |
+
browser_llm = models.ModelConfig(
|
| 65 |
+
type=models.ModelType.CHAT,
|
| 66 |
+
provider=current_settings["browser_model_provider"],
|
| 67 |
+
name=current_settings["browser_model_name"],
|
| 68 |
+
api_base=current_settings["browser_model_api_base"],
|
| 69 |
+
vision=current_settings["browser_model_vision"],
|
| 70 |
+
kwargs=_normalize_model_kwargs(current_settings["browser_model_kwargs"]),
|
| 71 |
+
)
|
| 72 |
+
# agent configuration
|
| 73 |
+
config = AgentConfig(
|
| 74 |
+
chat_model=chat_llm,
|
| 75 |
+
utility_model=utility_llm,
|
| 76 |
+
embeddings_model=embedding_llm,
|
| 77 |
+
browser_model=browser_llm,
|
| 78 |
+
profile=current_settings["agent_profile"],
|
| 79 |
+
memory_subdir=current_settings["agent_memory_subdir"],
|
| 80 |
+
knowledge_subdirs=[current_settings["agent_knowledge_subdir"], "default"],
|
| 81 |
+
mcp_servers=current_settings.get("mcp_servers"),
|
| 82 |
+
# code_exec params get initialized in _set_runtime_config
|
| 83 |
+
# additional = {},
|
| 84 |
+
)
|
| 85 |
+
|
| 86 |
+
# update SSH and docker settings
|
| 87 |
+
_set_runtime_config(config, current_settings)
|
| 88 |
+
|
| 89 |
+
# update config with runtime args
|
| 90 |
+
_args_override(config)
|
| 91 |
+
|
| 92 |
+
# initialize MCP in deferred task to prevent blocking the main thread
|
| 93 |
+
# async def initialize_mcp_async(mcp_servers_config: str):
|
| 94 |
+
# return initialize_mcp(mcp_servers_config)
|
| 95 |
+
# defer.DeferredTask(thread_name="mcp-initializer").start_task(initialize_mcp_async, config.mcp_servers)
|
| 96 |
+
# initialize_mcp(config.mcp_servers)
|
| 97 |
+
|
| 98 |
+
# import python.helpers.mcp_handler as mcp_helper
|
| 99 |
+
# import agent as agent_helper
|
| 100 |
+
# import python.helpers.print_style as print_style_helper
|
| 101 |
+
# if not mcp_helper.MCPConfig.get_instance().is_initialized():
|
| 102 |
+
# try:
|
| 103 |
+
# mcp_helper.MCPConfig.update(config.mcp_servers)
|
| 104 |
+
# except Exception as e:
|
| 105 |
+
# first_context = agent_helper.AgentContext.first()
|
| 106 |
+
# if first_context:
|
| 107 |
+
# (
|
| 108 |
+
# first_context.log
|
| 109 |
+
# .log(type="warning", content=f"Failed to update MCP settings: {e}", temp=False)
|
| 110 |
+
# )
|
| 111 |
+
# (
|
| 112 |
+
# print_style_helper.PrintStyle(background_color="black", font_color="red", padding=True)
|
| 113 |
+
# .print(f"Failed to update MCP settings: {e}")
|
| 114 |
+
# )
|
| 115 |
+
|
| 116 |
+
# return config object
|
| 117 |
+
return config
|
| 118 |
+
|
| 119 |
+
def initialize_chats():
|
| 120 |
+
from python.helpers import persist_chat
|
| 121 |
+
async def initialize_chats_async():
|
| 122 |
+
persist_chat.load_tmp_chats()
|
| 123 |
+
return defer.DeferredTask().start_task(initialize_chats_async)
|
| 124 |
+
|
| 125 |
+
def initialize_mcp():
|
| 126 |
+
set = settings.get_settings()
|
| 127 |
+
async def initialize_mcp_async():
|
| 128 |
+
from python.helpers.mcp_handler import initialize_mcp as _initialize_mcp
|
| 129 |
+
return _initialize_mcp(set["mcp_servers"])
|
| 130 |
+
return defer.DeferredTask().start_task(initialize_mcp_async)
|
| 131 |
+
|
| 132 |
+
def initialize_job_loop():
|
| 133 |
+
from python.helpers.job_loop import run_loop
|
| 134 |
+
return defer.DeferredTask("JobLoop").start_task(run_loop)
|
| 135 |
+
|
| 136 |
+
def initialize_preload():
|
| 137 |
+
import preload
|
| 138 |
+
return defer.DeferredTask().start_task(preload.preload)
|
| 139 |
+
|
| 140 |
+
|
| 141 |
+
def _args_override(config):
|
| 142 |
+
# update config with runtime args
|
| 143 |
+
for key, value in runtime.args.items():
|
| 144 |
+
if hasattr(config, key):
|
| 145 |
+
# conversion based on type of config[key]
|
| 146 |
+
if isinstance(getattr(config, key), bool):
|
| 147 |
+
value = value.lower().strip() == "true"
|
| 148 |
+
elif isinstance(getattr(config, key), int):
|
| 149 |
+
value = int(value)
|
| 150 |
+
elif isinstance(getattr(config, key), float):
|
| 151 |
+
value = float(value)
|
| 152 |
+
elif isinstance(getattr(config, key), str):
|
| 153 |
+
value = str(value)
|
| 154 |
+
else:
|
| 155 |
+
raise Exception(
|
| 156 |
+
f"Unsupported argument type of '{key}': {type(getattr(config, key))}"
|
| 157 |
+
)
|
| 158 |
+
|
| 159 |
+
setattr(config, key, value)
|
| 160 |
+
|
| 161 |
+
|
| 162 |
+
def _set_runtime_config(config: AgentConfig, set: settings.Settings):
|
| 163 |
+
ssh_conf = settings.get_runtime_config(set)
|
| 164 |
+
for key, value in ssh_conf.items():
|
| 165 |
+
if hasattr(config, key):
|
| 166 |
+
setattr(config, key, value)
|
knowledge/custom/TED Podcasts.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:ffc5a579932dc6ea1e6c50b386f81c8fd0e9bd2cb4501a4a6856d11623a5bf11
|
| 3 |
+
size 237793
|
preload.py
ADDED
|
@@ -0,0 +1,58 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import asyncio
|
| 2 |
+
from python.helpers import runtime, whisper, settings
|
| 3 |
+
from python.helpers.print_style import PrintStyle
|
| 4 |
+
from python.helpers import kokoro_tts
|
| 5 |
+
import models
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
async def preload():
|
| 9 |
+
PrintStyle().print("Preloading models...")
|
| 10 |
+
try:
|
| 11 |
+
set = settings.get_default_settings()
|
| 12 |
+
|
| 13 |
+
# preload whisper model
|
| 14 |
+
async def preload_whisper():
|
| 15 |
+
try:
|
| 16 |
+
return await whisper.preload(set["stt_model_size"])
|
| 17 |
+
except Exception as e:
|
| 18 |
+
PrintStyle().error(f"Error in preload_whisper: {e}")
|
| 19 |
+
|
| 20 |
+
# preload embedding model
|
| 21 |
+
async def preload_embedding():
|
| 22 |
+
if set["embed_model_provider"].lower() == "huggingface":
|
| 23 |
+
try:
|
| 24 |
+
# Use the new LiteLLM-based model system
|
| 25 |
+
emb_mod = models.get_embedding_model(
|
| 26 |
+
"huggingface", set["embed_model_name"]
|
| 27 |
+
)
|
| 28 |
+
emb_txt = await emb_mod.aembed_query("test")
|
| 29 |
+
return emb_txt
|
| 30 |
+
except Exception as e:
|
| 31 |
+
PrintStyle().error(f"Error in preload_embedding: {e}")
|
| 32 |
+
|
| 33 |
+
# preload kokoro tts model if enabled
|
| 34 |
+
async def preload_kokoro():
|
| 35 |
+
if set["tts_kokoro"]:
|
| 36 |
+
try:
|
| 37 |
+
return await kokoro_tts.preload()
|
| 38 |
+
except Exception as e:
|
| 39 |
+
PrintStyle().error(f"Error in preload_kokoro: {e}")
|
| 40 |
+
|
| 41 |
+
# async tasks to preload
|
| 42 |
+
tasks = [
|
| 43 |
+
preload_embedding(),
|
| 44 |
+
# preload_whisper(),
|
| 45 |
+
# preload_kokoro()
|
| 46 |
+
]
|
| 47 |
+
|
| 48 |
+
await asyncio.gather(*tasks, return_exceptions=True)
|
| 49 |
+
PrintStyle().print("Preload completed")
|
| 50 |
+
except Exception as e:
|
| 51 |
+
PrintStyle().error(f"Error in preload: {e}")
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
# preload transcription model
|
| 55 |
+
if __name__ == "__main__":
|
| 56 |
+
PrintStyle().print("Running preload...")
|
| 57 |
+
runtime.initialize()
|
| 58 |
+
asyncio.run(preload())
|
python/api/csrf_token.py
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from python.helpers.api import (
|
| 2 |
+
ApiHandler,
|
| 3 |
+
Input,
|
| 4 |
+
Output,
|
| 5 |
+
Request,
|
| 6 |
+
Response,
|
| 7 |
+
session,
|
| 8 |
+
)
|
| 9 |
+
from python.helpers import runtime, csrf
|
| 10 |
+
|
| 11 |
+
class GetCsrfToken(ApiHandler):
|
| 12 |
+
|
| 13 |
+
@classmethod
|
| 14 |
+
def get_methods(cls) -> list[str]:
|
| 15 |
+
return ["GET"]
|
| 16 |
+
|
| 17 |
+
@classmethod
|
| 18 |
+
def requires_csrf(cls) -> bool:
|
| 19 |
+
return False
|
| 20 |
+
|
| 21 |
+
async def process(self, input: Input, request: Request) -> Output:
|
| 22 |
+
token = csrf.generate_csrf_token()
|
| 23 |
+
return {"token": token, "runtime_id": runtime.get_runtime_id()}
|
python/extensions/system_prompt/_10_system_prompt.py
ADDED
|
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Any
|
| 2 |
+
from python.helpers.extension import Extension
|
| 3 |
+
from python.helpers.mcp_handler import MCPConfig
|
| 4 |
+
from agent import Agent, LoopData
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
class SystemPrompt(Extension):
|
| 8 |
+
|
| 9 |
+
async def execute(self, system_prompt: list[str] = [], loop_data: LoopData = LoopData(), **kwargs: Any):
|
| 10 |
+
# append main system prompt and tools
|
| 11 |
+
main = get_main_prompt(self.agent)
|
| 12 |
+
tools = get_tools_prompt(self.agent)
|
| 13 |
+
mcp_tools = get_mcp_tools_prompt(self.agent)
|
| 14 |
+
final_instruction = "When you are done with the task, use the 'response' tool to return the final answer."
|
| 15 |
+
|
| 16 |
+
system_prompt.append(main)
|
| 17 |
+
system_prompt.append(tools)
|
| 18 |
+
if mcp_tools:
|
| 19 |
+
system_prompt.append(mcp_tools)
|
| 20 |
+
system_prompt.append(final_instruction)
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
def get_main_prompt(agent: Agent):
|
| 24 |
+
return agent.read_prompt("agent.system.main.md")
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
def get_tools_prompt(agent: Agent):
|
| 28 |
+
prompt = agent.read_prompt("agent.system.tools.md")
|
| 29 |
+
if agent.config.chat_model.vision:
|
| 30 |
+
prompt += '\n\n' + agent.read_prompt("agent.system.tools_vision.md")
|
| 31 |
+
return prompt
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
def get_mcp_tools_prompt(agent: Agent):
|
| 35 |
+
mcp_config = MCPConfig.get_instance()
|
| 36 |
+
if mcp_config.servers:
|
| 37 |
+
pre_progress = agent.context.log.progress
|
| 38 |
+
agent.context.log.set_progress("Collecting MCP tools") # MCP might be initializing, better inform via progress bar
|
| 39 |
+
tools = MCPConfig.get_instance().get_tools_prompt()
|
| 40 |
+
agent.context.log.set_progress(pre_progress) # return original progress
|
| 41 |
+
return tools
|
| 42 |
+
return ""
|
| 43 |
+
|
python/helpers/csrf.py
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import secrets
|
| 2 |
+
import hmac
|
| 3 |
+
import hashlib
|
| 4 |
+
import time
|
| 5 |
+
|
| 6 |
+
CSRF_SECRET = secrets.token_bytes(32)
|
| 7 |
+
TOKEN_TTL = 3600 # 1 hour validity
|
| 8 |
+
|
| 9 |
+
def generate_csrf_token():
|
| 10 |
+
nonce = secrets.token_hex(16) # 128-bit random
|
| 11 |
+
timestamp = str(int(time.time()))
|
| 12 |
+
data = f"{nonce}:{timestamp}"
|
| 13 |
+
sig = hmac.new(CSRF_SECRET, data.encode(), hashlib.sha256).hexdigest()
|
| 14 |
+
return f"{data}.{sig}"
|
| 15 |
+
|
| 16 |
+
def verify_csrf_token(token):
|
| 17 |
+
try:
|
| 18 |
+
data, sig = token.rsplit(".", 1)
|
| 19 |
+
expected_sig = hmac.new(CSRF_SECRET, data.encode(), hashlib.sha256).hexdigest()
|
| 20 |
+
if not hmac.compare_digest(sig, expected_sig):
|
| 21 |
+
return False
|
| 22 |
+
# check TTL
|
| 23 |
+
nonce, timestamp = data.split(":")
|
| 24 |
+
if time.time() - int(timestamp) > TOKEN_TTL:
|
| 25 |
+
return False
|
| 26 |
+
return True
|
| 27 |
+
except Exception:
|
| 28 |
+
return False
|
python/helpers/mcp_server.py
ADDED
|
@@ -0,0 +1,433 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
from typing import Annotated, Literal, Union
|
| 3 |
+
from urllib.parse import urlparse
|
| 4 |
+
from openai import BaseModel
|
| 5 |
+
from pydantic import Field
|
| 6 |
+
from fastmcp import FastMCP
|
| 7 |
+
|
| 8 |
+
from agent import AgentContext, AgentContextType, UserMessage
|
| 9 |
+
from python.helpers.persist_chat import remove_chat
|
| 10 |
+
from initialize import initialize_agent
|
| 11 |
+
from python.helpers.print_style import PrintStyle
|
| 12 |
+
from python.helpers import settings
|
| 13 |
+
from starlette.middleware import Middleware
|
| 14 |
+
from starlette.middleware.base import BaseHTTPMiddleware
|
| 15 |
+
from starlette.exceptions import HTTPException as StarletteHTTPException
|
| 16 |
+
from starlette.types import ASGIApp, Receive, Scope, Send
|
| 17 |
+
from fastmcp.server.http import create_sse_app
|
| 18 |
+
from starlette.requests import Request
|
| 19 |
+
import threading
|
| 20 |
+
|
| 21 |
+
_PRINTER = PrintStyle(italic=True, font_color="green", padding=False)
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
mcp_server: FastMCP = FastMCP(
|
| 25 |
+
name="Agent Zero integrated MCP Server",
|
| 26 |
+
instructions="""
|
| 27 |
+
Connect to remote Agent Zero instance.
|
| 28 |
+
Agent Zero is a general AI assistant controlling it's linux environment.
|
| 29 |
+
Agent Zero can install software, manage files, execute commands, code, use internet, etc.
|
| 30 |
+
Agent Zero's environment is isolated unless configured otherwise.
|
| 31 |
+
""",
|
| 32 |
+
)
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
class ToolResponse(BaseModel):
|
| 36 |
+
status: Literal["success"] = Field(
|
| 37 |
+
description="The status of the response", default="success"
|
| 38 |
+
)
|
| 39 |
+
response: str = Field(
|
| 40 |
+
description="The response from the remote Agent Zero Instance"
|
| 41 |
+
)
|
| 42 |
+
chat_id: str = Field(description="The id of the chat this message belongs to.")
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
class ToolError(BaseModel):
|
| 46 |
+
status: Literal["error"] = Field(
|
| 47 |
+
description="The status of the response", default="error"
|
| 48 |
+
)
|
| 49 |
+
error: str = Field(
|
| 50 |
+
description="The error message from the remote Agent Zero Instance"
|
| 51 |
+
)
|
| 52 |
+
chat_id: str = Field(description="The id of the chat this message belongs to.")
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
SEND_MESSAGE_DESCRIPTION = """
|
| 56 |
+
Send a message to the remote Agent Zero Instance.
|
| 57 |
+
This tool is used to send a message to the remote Agent Zero Instance connected remotely via MCP.
|
| 58 |
+
"""
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
@mcp_server.tool(
|
| 62 |
+
name="send_message",
|
| 63 |
+
description=SEND_MESSAGE_DESCRIPTION,
|
| 64 |
+
tags={
|
| 65 |
+
"agent_zero",
|
| 66 |
+
"chat",
|
| 67 |
+
"remote",
|
| 68 |
+
"communication",
|
| 69 |
+
"dialogue",
|
| 70 |
+
"sse",
|
| 71 |
+
"send",
|
| 72 |
+
"message",
|
| 73 |
+
"start",
|
| 74 |
+
"new",
|
| 75 |
+
"continue",
|
| 76 |
+
},
|
| 77 |
+
annotations={
|
| 78 |
+
"remote": True,
|
| 79 |
+
"readOnlyHint": False,
|
| 80 |
+
"destructiveHint": False,
|
| 81 |
+
"idempotentHint": False,
|
| 82 |
+
"openWorldHint": False,
|
| 83 |
+
"title": SEND_MESSAGE_DESCRIPTION,
|
| 84 |
+
},
|
| 85 |
+
)
|
| 86 |
+
async def send_message(
|
| 87 |
+
message: Annotated[
|
| 88 |
+
str,
|
| 89 |
+
Field(
|
| 90 |
+
description="The message to send to the remote Agent Zero Instance",
|
| 91 |
+
title="message",
|
| 92 |
+
),
|
| 93 |
+
],
|
| 94 |
+
attachments: (
|
| 95 |
+
Annotated[
|
| 96 |
+
list[str],
|
| 97 |
+
Field(
|
| 98 |
+
description="Optional: A list of attachments (file paths or web urls) to send to the remote Agent Zero Instance with the message. Default: Empty list",
|
| 99 |
+
title="attachments",
|
| 100 |
+
),
|
| 101 |
+
]
|
| 102 |
+
| None
|
| 103 |
+
) = None,
|
| 104 |
+
chat_id: (
|
| 105 |
+
Annotated[
|
| 106 |
+
str,
|
| 107 |
+
Field(
|
| 108 |
+
description="Optional: ID of the chat. Used to continue a chat. This value is returned in response to sending previous message. Default: Empty string",
|
| 109 |
+
title="chat_id",
|
| 110 |
+
),
|
| 111 |
+
]
|
| 112 |
+
| None
|
| 113 |
+
) = None,
|
| 114 |
+
persistent_chat: (
|
| 115 |
+
Annotated[
|
| 116 |
+
bool,
|
| 117 |
+
Field(
|
| 118 |
+
description="Optional: Whether to use a persistent chat. If true, the chat will be saved and can be continued later. Default: False.",
|
| 119 |
+
title="persistent_chat",
|
| 120 |
+
),
|
| 121 |
+
]
|
| 122 |
+
| None
|
| 123 |
+
) = None,
|
| 124 |
+
) -> Annotated[
|
| 125 |
+
Union[ToolResponse, ToolError],
|
| 126 |
+
Field(
|
| 127 |
+
description="The response from the remote Agent Zero Instance", title="response"
|
| 128 |
+
),
|
| 129 |
+
]:
|
| 130 |
+
context: AgentContext | None = None
|
| 131 |
+
if chat_id:
|
| 132 |
+
context = AgentContext.get(chat_id)
|
| 133 |
+
if not context:
|
| 134 |
+
return ToolError(error="Chat not found", chat_id=chat_id)
|
| 135 |
+
else:
|
| 136 |
+
# If the chat is found, we use the persistent chat flag to determine
|
| 137 |
+
# whether we should save the chat or delete it afterwards
|
| 138 |
+
# If we continue a conversation, it must be persistent
|
| 139 |
+
persistent_chat = True
|
| 140 |
+
else:
|
| 141 |
+
config = initialize_agent()
|
| 142 |
+
context = AgentContext(config=config, type=AgentContextType.BACKGROUND)
|
| 143 |
+
|
| 144 |
+
if not message:
|
| 145 |
+
return ToolError(
|
| 146 |
+
error="Message is required", chat_id=context.id if persistent_chat else ""
|
| 147 |
+
)
|
| 148 |
+
|
| 149 |
+
try:
|
| 150 |
+
response = await _run_chat(context, message, attachments)
|
| 151 |
+
if not persistent_chat:
|
| 152 |
+
context.reset()
|
| 153 |
+
AgentContext.remove(context.id)
|
| 154 |
+
remove_chat(context.id)
|
| 155 |
+
return ToolResponse(
|
| 156 |
+
response=response, chat_id=context.id if persistent_chat else ""
|
| 157 |
+
)
|
| 158 |
+
except Exception as e:
|
| 159 |
+
return ToolError(error=str(e), chat_id=context.id if persistent_chat else "")
|
| 160 |
+
|
| 161 |
+
|
| 162 |
+
FINISH_CHAT_DESCRIPTION = """
|
| 163 |
+
Finish a chat with the remote Agent Zero Instance.
|
| 164 |
+
This tool is used to finish a persistent chat (send_message with persistent_chat=True) with the remote Agent Zero Instance connected remotely via MCP.
|
| 165 |
+
If you want to continue the chat, use the send_message tool instead.
|
| 166 |
+
Always use this tool to finish persistent chat conversations with remote Agent Zero.
|
| 167 |
+
"""
|
| 168 |
+
|
| 169 |
+
|
| 170 |
+
@mcp_server.tool(
|
| 171 |
+
name="finish_chat",
|
| 172 |
+
description=FINISH_CHAT_DESCRIPTION,
|
| 173 |
+
tags={
|
| 174 |
+
"agent_zero",
|
| 175 |
+
"chat",
|
| 176 |
+
"remote",
|
| 177 |
+
"communication",
|
| 178 |
+
"dialogue",
|
| 179 |
+
"sse",
|
| 180 |
+
"finish",
|
| 181 |
+
"close",
|
| 182 |
+
"end",
|
| 183 |
+
"stop",
|
| 184 |
+
},
|
| 185 |
+
annotations={
|
| 186 |
+
"remote": True,
|
| 187 |
+
"readOnlyHint": False,
|
| 188 |
+
"destructiveHint": True,
|
| 189 |
+
"idempotentHint": False,
|
| 190 |
+
"openWorldHint": False,
|
| 191 |
+
"title": FINISH_CHAT_DESCRIPTION,
|
| 192 |
+
},
|
| 193 |
+
)
|
| 194 |
+
async def finish_chat(
|
| 195 |
+
chat_id: Annotated[
|
| 196 |
+
str,
|
| 197 |
+
Field(
|
| 198 |
+
description="ID of the chat to be finished. This value is returned in response to sending previous message.",
|
| 199 |
+
title="chat_id",
|
| 200 |
+
),
|
| 201 |
+
]
|
| 202 |
+
) -> Annotated[
|
| 203 |
+
Union[ToolResponse, ToolError],
|
| 204 |
+
Field(
|
| 205 |
+
description="The response from the remote Agent Zero Instance", title="response"
|
| 206 |
+
),
|
| 207 |
+
]:
|
| 208 |
+
if not chat_id:
|
| 209 |
+
return ToolError(error="Chat ID is required", chat_id="")
|
| 210 |
+
|
| 211 |
+
context = AgentContext.get(chat_id)
|
| 212 |
+
if not context:
|
| 213 |
+
return ToolError(error="Chat not found", chat_id=chat_id)
|
| 214 |
+
else:
|
| 215 |
+
context.reset()
|
| 216 |
+
AgentContext.remove(context.id)
|
| 217 |
+
remove_chat(context.id)
|
| 218 |
+
return ToolResponse(response="Chat finished", chat_id=chat_id)
|
| 219 |
+
|
| 220 |
+
|
| 221 |
+
async def _run_chat(
|
| 222 |
+
context: AgentContext, message: str, attachments: list[str] | None = None
|
| 223 |
+
):
|
| 224 |
+
try:
|
| 225 |
+
_PRINTER.print("MCP Chat message received")
|
| 226 |
+
|
| 227 |
+
# Attachment filenames for logging
|
| 228 |
+
attachment_filenames = []
|
| 229 |
+
if attachments:
|
| 230 |
+
for attachment in attachments:
|
| 231 |
+
if os.path.exists(attachment):
|
| 232 |
+
attachment_filenames.append(attachment)
|
| 233 |
+
else:
|
| 234 |
+
try:
|
| 235 |
+
url = urlparse(attachment)
|
| 236 |
+
if url.scheme in ["http", "https", "ftp", "ftps", "sftp"]:
|
| 237 |
+
attachment_filenames.append(attachment)
|
| 238 |
+
else:
|
| 239 |
+
_PRINTER.print(f"Skipping attachment: [{attachment}]")
|
| 240 |
+
except Exception:
|
| 241 |
+
_PRINTER.print(f"Skipping attachment: [{attachment}]")
|
| 242 |
+
|
| 243 |
+
_PRINTER.print("User message:")
|
| 244 |
+
_PRINTER.print(f"> {message}")
|
| 245 |
+
if attachment_filenames:
|
| 246 |
+
_PRINTER.print("Attachments:")
|
| 247 |
+
for filename in attachment_filenames:
|
| 248 |
+
_PRINTER.print(f"- {filename}")
|
| 249 |
+
|
| 250 |
+
task = context.communicate(
|
| 251 |
+
UserMessage(
|
| 252 |
+
message=message, system_message=[], attachments=attachment_filenames
|
| 253 |
+
)
|
| 254 |
+
)
|
| 255 |
+
result = await task.result()
|
| 256 |
+
|
| 257 |
+
# Success
|
| 258 |
+
_PRINTER.print(f"MCP Chat message completed: {result}")
|
| 259 |
+
|
| 260 |
+
return result
|
| 261 |
+
|
| 262 |
+
except Exception as e:
|
| 263 |
+
# Error
|
| 264 |
+
_PRINTER.print(f"MCP Chat message failed: {e}")
|
| 265 |
+
|
| 266 |
+
raise RuntimeError(f"MCP Chat message failed: {e}") from e
|
| 267 |
+
|
| 268 |
+
|
| 269 |
+
class DynamicMcpProxy:
|
| 270 |
+
_instance: "DynamicMcpProxy | None" = None
|
| 271 |
+
|
| 272 |
+
"""A dynamic proxy that allows swapping the underlying MCP applications on the fly."""
|
| 273 |
+
|
| 274 |
+
def __init__(self):
|
| 275 |
+
cfg = settings.get_settings()
|
| 276 |
+
self.token = ""
|
| 277 |
+
self.sse_app: ASGIApp | None = None
|
| 278 |
+
self.http_app: ASGIApp | None = None
|
| 279 |
+
self.http_session_manager = None
|
| 280 |
+
self.http_session_task_group = None
|
| 281 |
+
self._lock = threading.RLock() # Use RLock to avoid deadlocks
|
| 282 |
+
self.reconfigure(cfg["mcp_server_token"])
|
| 283 |
+
|
| 284 |
+
@staticmethod
|
| 285 |
+
def get_instance():
|
| 286 |
+
if DynamicMcpProxy._instance is None:
|
| 287 |
+
DynamicMcpProxy._instance = DynamicMcpProxy()
|
| 288 |
+
return DynamicMcpProxy._instance
|
| 289 |
+
|
| 290 |
+
def reconfigure(self, token: str):
|
| 291 |
+
if self.token == token:
|
| 292 |
+
return
|
| 293 |
+
|
| 294 |
+
self.token = token
|
| 295 |
+
sse_path = f"/t-{self.token}/sse"
|
| 296 |
+
http_path = f"/t-{self.token}/http"
|
| 297 |
+
message_path = f"/t-{self.token}/messages/"
|
| 298 |
+
|
| 299 |
+
# Update settings in the MCP server instance if provided
|
| 300 |
+
mcp_server.settings.message_path = message_path
|
| 301 |
+
mcp_server.settings.sse_path = sse_path
|
| 302 |
+
|
| 303 |
+
# Create new MCP apps with updated settings
|
| 304 |
+
with self._lock:
|
| 305 |
+
self.sse_app = create_sse_app(
|
| 306 |
+
server=mcp_server,
|
| 307 |
+
message_path=mcp_server.settings.message_path,
|
| 308 |
+
sse_path=mcp_server.settings.sse_path,
|
| 309 |
+
auth_server_provider=mcp_server._auth_server_provider,
|
| 310 |
+
auth_settings=mcp_server.settings.auth,
|
| 311 |
+
debug=mcp_server.settings.debug,
|
| 312 |
+
routes=mcp_server._additional_http_routes,
|
| 313 |
+
middleware=[Middleware(BaseHTTPMiddleware, dispatch=mcp_middleware)],
|
| 314 |
+
)
|
| 315 |
+
|
| 316 |
+
# For HTTP, we need to create a custom app since the lifespan manager
|
| 317 |
+
# doesn't work properly in our Flask/Werkzeug environment
|
| 318 |
+
self.http_app = self._create_custom_http_app(
|
| 319 |
+
http_path,
|
| 320 |
+
mcp_server._auth_server_provider,
|
| 321 |
+
mcp_server.settings.auth,
|
| 322 |
+
mcp_server.settings.debug,
|
| 323 |
+
mcp_server._additional_http_routes,
|
| 324 |
+
)
|
| 325 |
+
|
| 326 |
+
def _create_custom_http_app(self, streamable_http_path, auth_server_provider, auth_settings, debug, routes):
|
| 327 |
+
"""Create a custom HTTP app that manages the session manager manually."""
|
| 328 |
+
from fastmcp.server.http import setup_auth_middleware_and_routes, create_base_app
|
| 329 |
+
from mcp.server.streamable_http_manager import StreamableHTTPSessionManager
|
| 330 |
+
from starlette.routing import Mount
|
| 331 |
+
from mcp.server.auth.middleware.bearer_auth import RequireAuthMiddleware
|
| 332 |
+
import anyio
|
| 333 |
+
|
| 334 |
+
server_routes = []
|
| 335 |
+
server_middleware = []
|
| 336 |
+
|
| 337 |
+
self.http_session_task_group = None
|
| 338 |
+
|
| 339 |
+
|
| 340 |
+
# Create session manager
|
| 341 |
+
self.http_session_manager = StreamableHTTPSessionManager(
|
| 342 |
+
app=mcp_server._mcp_server,
|
| 343 |
+
event_store=None,
|
| 344 |
+
json_response=True,
|
| 345 |
+
stateless=False,
|
| 346 |
+
)
|
| 347 |
+
|
| 348 |
+
|
| 349 |
+
# Custom ASGI handler that ensures task group is initialized
|
| 350 |
+
async def handle_streamable_http(scope, receive, send):
|
| 351 |
+
# Lazy initialization of task group
|
| 352 |
+
if self.http_session_task_group is None:
|
| 353 |
+
self.http_session_task_group = anyio.create_task_group()
|
| 354 |
+
await self.http_session_task_group.__aenter__()
|
| 355 |
+
if self.http_session_manager:
|
| 356 |
+
self.http_session_manager._task_group = self.http_session_task_group
|
| 357 |
+
|
| 358 |
+
if self.http_session_manager:
|
| 359 |
+
await self.http_session_manager.handle_request(scope, receive, send)
|
| 360 |
+
|
| 361 |
+
# Get auth middleware and routes
|
| 362 |
+
auth_middleware, auth_routes, required_scopes = setup_auth_middleware_and_routes(
|
| 363 |
+
auth_server_provider, auth_settings
|
| 364 |
+
)
|
| 365 |
+
|
| 366 |
+
server_routes.extend(auth_routes)
|
| 367 |
+
server_middleware.extend(auth_middleware)
|
| 368 |
+
|
| 369 |
+
# Add StreamableHTTP routes with or without auth
|
| 370 |
+
if auth_server_provider:
|
| 371 |
+
server_routes.append(
|
| 372 |
+
Mount(
|
| 373 |
+
streamable_http_path,
|
| 374 |
+
app=RequireAuthMiddleware(handle_streamable_http, required_scopes),
|
| 375 |
+
)
|
| 376 |
+
)
|
| 377 |
+
else:
|
| 378 |
+
server_routes.append(
|
| 379 |
+
Mount(
|
| 380 |
+
streamable_http_path,
|
| 381 |
+
app=handle_streamable_http,
|
| 382 |
+
)
|
| 383 |
+
)
|
| 384 |
+
|
| 385 |
+
# Add custom routes with lowest precedence
|
| 386 |
+
if routes:
|
| 387 |
+
server_routes.extend(routes)
|
| 388 |
+
|
| 389 |
+
# Add middleware
|
| 390 |
+
server_middleware.append(Middleware(BaseHTTPMiddleware, dispatch=mcp_middleware))
|
| 391 |
+
|
| 392 |
+
# Create and return the app
|
| 393 |
+
return create_base_app(
|
| 394 |
+
routes=server_routes,
|
| 395 |
+
middleware=server_middleware,
|
| 396 |
+
debug=debug,
|
| 397 |
+
)
|
| 398 |
+
|
| 399 |
+
async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None:
|
| 400 |
+
"""Forward the ASGI calls to the appropriate app based on the URL path"""
|
| 401 |
+
with self._lock:
|
| 402 |
+
sse_app = self.sse_app
|
| 403 |
+
http_app = self.http_app
|
| 404 |
+
|
| 405 |
+
if not sse_app or not http_app:
|
| 406 |
+
raise RuntimeError("MCP apps not initialized")
|
| 407 |
+
|
| 408 |
+
# Route based on path
|
| 409 |
+
path = scope.get("path", "")
|
| 410 |
+
|
| 411 |
+
if f"/t-{self.token}/sse" in path or f"t-{self.token}/messages" in path:
|
| 412 |
+
# Route to SSE app
|
| 413 |
+
await sse_app(scope, receive, send)
|
| 414 |
+
elif f"/t-{self.token}/http" in path:
|
| 415 |
+
# Route to HTTP app
|
| 416 |
+
await http_app(scope, receive, send)
|
| 417 |
+
else:
|
| 418 |
+
raise StarletteHTTPException(
|
| 419 |
+
status_code=403, detail="MCP forbidden"
|
| 420 |
+
)
|
| 421 |
+
|
| 422 |
+
|
| 423 |
+
async def mcp_middleware(request: Request, call_next):
|
| 424 |
+
|
| 425 |
+
# check if MCP server is enabled
|
| 426 |
+
cfg = settings.get_settings()
|
| 427 |
+
if not cfg["mcp_server_enabled"]:
|
| 428 |
+
PrintStyle.error("[MCP] Access denied: MCP server is disabled in settings.")
|
| 429 |
+
raise StarletteHTTPException(
|
| 430 |
+
status_code=403, detail="MCP server is disabled in settings."
|
| 431 |
+
)
|
| 432 |
+
|
| 433 |
+
return await call_next(request)
|
python/helpers/searxng.py
ADDED
|
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import aiohttp
|
| 2 |
+
import asyncio
|
| 3 |
+
from python.helpers import runtime
|
| 4 |
+
|
| 5 |
+
# List of public SearxNG instances
|
| 6 |
+
# Find more at https://searx.space/
|
| 7 |
+
INSTANCES = [
|
| 8 |
+
"https://searx.be/search",
|
| 9 |
+
"https://searx.info/search",
|
| 10 |
+
"https://searx.work/search",
|
| 11 |
+
"https://searx.priv.au/search",
|
| 12 |
+
"https://searx.tiekoetter.com/search",
|
| 13 |
+
"https://searx.baczek.me/search",
|
| 14 |
+
"https://searx.rodeo/search",
|
| 15 |
+
]
|
| 16 |
+
|
| 17 |
+
async def search(query:str):
|
| 18 |
+
return await _search(query=query)
|
| 19 |
+
|
| 20 |
+
async def _search(query:str):
|
| 21 |
+
timeout = aiohttp.ClientTimeout(total=30)
|
| 22 |
+
for instance in INSTANCES:
|
| 23 |
+
try:
|
| 24 |
+
async with aiohttp.ClientSession(timeout=timeout) as session:
|
| 25 |
+
async with session.post(instance, data={"q": query, "format": "json"}) as response:
|
| 26 |
+
if response.status == 200:
|
| 27 |
+
try:
|
| 28 |
+
return await response.json()
|
| 29 |
+
except aiohttp.client_exceptions.ContentTypeError:
|
| 30 |
+
# This instance is not returning JSON, so we try the next one
|
| 31 |
+
continue
|
| 32 |
+
except (aiohttp.ClientConnectorError, asyncio.TimeoutError):
|
| 33 |
+
# This instance is not reachable or timed out, so we try the next one
|
| 34 |
+
continue
|
| 35 |
+
# If all instances fail, we return an error
|
| 36 |
+
raise Exception("All SearxNG instances failed to respond.")
|
python/helpers/settings.py
ADDED
|
@@ -0,0 +1,1602 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import base64
|
| 2 |
+
import hashlib
|
| 3 |
+
import json
|
| 4 |
+
import os
|
| 5 |
+
import re
|
| 6 |
+
import subprocess
|
| 7 |
+
from typing import Any, Literal, TypedDict, cast
|
| 8 |
+
|
| 9 |
+
import models
|
| 10 |
+
from python.helpers import runtime, whisper, defer, git
|
| 11 |
+
from . import files, dotenv
|
| 12 |
+
from python.helpers.print_style import PrintStyle
|
| 13 |
+
from python.helpers.providers import get_providers
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
class Settings(TypedDict):
|
| 17 |
+
version: str
|
| 18 |
+
|
| 19 |
+
chat_model_provider: str
|
| 20 |
+
chat_model_name: str
|
| 21 |
+
chat_model_api_base: str
|
| 22 |
+
chat_model_kwargs: dict[str, str]
|
| 23 |
+
chat_model_ctx_length: int
|
| 24 |
+
chat_model_ctx_history: float
|
| 25 |
+
chat_model_vision: bool
|
| 26 |
+
chat_model_rl_requests: int
|
| 27 |
+
chat_model_rl_input: int
|
| 28 |
+
chat_model_rl_output: int
|
| 29 |
+
|
| 30 |
+
util_model_provider: str
|
| 31 |
+
util_model_name: str
|
| 32 |
+
util_model_api_base: str
|
| 33 |
+
util_model_kwargs: dict[str, str]
|
| 34 |
+
util_model_ctx_length: int
|
| 35 |
+
util_model_ctx_input: float
|
| 36 |
+
util_model_rl_requests: int
|
| 37 |
+
util_model_rl_input: int
|
| 38 |
+
util_model_rl_output: int
|
| 39 |
+
|
| 40 |
+
embed_model_provider: str
|
| 41 |
+
embed_model_name: str
|
| 42 |
+
embed_model_api_base: str
|
| 43 |
+
embed_model_kwargs: dict[str, str]
|
| 44 |
+
embed_model_rl_requests: int
|
| 45 |
+
embed_model_rl_input: int
|
| 46 |
+
|
| 47 |
+
browser_model_provider: str
|
| 48 |
+
browser_model_name: str
|
| 49 |
+
browser_model_api_base: str
|
| 50 |
+
browser_model_vision: bool
|
| 51 |
+
browser_model_rl_requests: int
|
| 52 |
+
browser_model_rl_input: int
|
| 53 |
+
browser_model_rl_output: int
|
| 54 |
+
browser_model_kwargs: dict[str, str]
|
| 55 |
+
|
| 56 |
+
agent_profile: str
|
| 57 |
+
agent_memory_subdir: str
|
| 58 |
+
agent_knowledge_subdir: str
|
| 59 |
+
|
| 60 |
+
memory_recall_enabled: bool
|
| 61 |
+
memory_recall_delayed: bool
|
| 62 |
+
memory_recall_interval: int
|
| 63 |
+
memory_recall_history_len: int
|
| 64 |
+
memory_recall_memories_max_search: int
|
| 65 |
+
memory_recall_solutions_max_search: int
|
| 66 |
+
memory_recall_memories_max_result: int
|
| 67 |
+
memory_recall_solutions_max_result: int
|
| 68 |
+
memory_recall_similarity_threshold: float
|
| 69 |
+
memory_recall_query_prep: bool
|
| 70 |
+
memory_recall_post_filter: bool
|
| 71 |
+
memory_memorize_enabled: bool
|
| 72 |
+
memory_memorize_consolidation: bool
|
| 73 |
+
memory_memorize_replace_threshold: float
|
| 74 |
+
|
| 75 |
+
api_keys: dict[str, str]
|
| 76 |
+
|
| 77 |
+
auth_login: str
|
| 78 |
+
auth_password: str
|
| 79 |
+
root_password: str
|
| 80 |
+
|
| 81 |
+
rfc_auto_docker: bool
|
| 82 |
+
rfc_url: str
|
| 83 |
+
rfc_password: str
|
| 84 |
+
rfc_port_http: int
|
| 85 |
+
rfc_port_ssh: int
|
| 86 |
+
|
| 87 |
+
shell_interface: Literal['local','ssh']
|
| 88 |
+
|
| 89 |
+
stt_model_size: str
|
| 90 |
+
stt_language: str
|
| 91 |
+
stt_silence_threshold: float
|
| 92 |
+
stt_silence_duration: int
|
| 93 |
+
stt_waiting_timeout: int
|
| 94 |
+
|
| 95 |
+
tts_kokoro: bool
|
| 96 |
+
|
| 97 |
+
mcp_servers: str
|
| 98 |
+
mcp_client_init_timeout: int
|
| 99 |
+
mcp_client_tool_timeout: int
|
| 100 |
+
mcp_server_enabled: bool
|
| 101 |
+
mcp_server_token: str
|
| 102 |
+
|
| 103 |
+
a2a_server_enabled: bool
|
| 104 |
+
|
| 105 |
+
|
| 106 |
+
class PartialSettings(Settings, total=False):
|
| 107 |
+
pass
|
| 108 |
+
|
| 109 |
+
|
| 110 |
+
class FieldOption(TypedDict):
|
| 111 |
+
value: str
|
| 112 |
+
label: str
|
| 113 |
+
|
| 114 |
+
|
| 115 |
+
class SettingsField(TypedDict, total=False):
|
| 116 |
+
id: str
|
| 117 |
+
title: str
|
| 118 |
+
description: str
|
| 119 |
+
type: Literal[
|
| 120 |
+
"text",
|
| 121 |
+
"number",
|
| 122 |
+
"select",
|
| 123 |
+
"range",
|
| 124 |
+
"textarea",
|
| 125 |
+
"password",
|
| 126 |
+
"switch",
|
| 127 |
+
"button",
|
| 128 |
+
"html",
|
| 129 |
+
]
|
| 130 |
+
value: Any
|
| 131 |
+
min: float
|
| 132 |
+
max: float
|
| 133 |
+
step: float
|
| 134 |
+
hidden: bool
|
| 135 |
+
options: list[FieldOption]
|
| 136 |
+
|
| 137 |
+
|
| 138 |
+
class SettingsSection(TypedDict, total=False):
|
| 139 |
+
id: str
|
| 140 |
+
title: str
|
| 141 |
+
description: str
|
| 142 |
+
fields: list[SettingsField]
|
| 143 |
+
tab: str # Indicates which tab this section belongs to
|
| 144 |
+
|
| 145 |
+
|
| 146 |
+
class SettingsOutput(TypedDict):
|
| 147 |
+
sections: list[SettingsSection]
|
| 148 |
+
|
| 149 |
+
|
| 150 |
+
PASSWORD_PLACEHOLDER = "****PSWD****"
|
| 151 |
+
API_KEY_PLACEHOLDER = "************"
|
| 152 |
+
|
| 153 |
+
SETTINGS_FILE = files.get_abs_path("tmp/settings.json")
|
| 154 |
+
_settings: Settings | None = None
|
| 155 |
+
|
| 156 |
+
|
| 157 |
+
def convert_out(settings: Settings) -> SettingsOutput:
|
| 158 |
+
default_settings = get_default_settings()
|
| 159 |
+
|
| 160 |
+
# main model section
|
| 161 |
+
chat_model_fields: list[SettingsField] = []
|
| 162 |
+
chat_model_fields.append(
|
| 163 |
+
{
|
| 164 |
+
"id": "chat_model_provider",
|
| 165 |
+
"title": "Chat model provider",
|
| 166 |
+
"description": "Select provider for main chat model used by Agent Zero",
|
| 167 |
+
"type": "select",
|
| 168 |
+
"value": settings["chat_model_provider"],
|
| 169 |
+
"options": cast(list[FieldOption], get_providers("chat")),
|
| 170 |
+
}
|
| 171 |
+
)
|
| 172 |
+
chat_model_fields.append(
|
| 173 |
+
{
|
| 174 |
+
"id": "chat_model_name",
|
| 175 |
+
"title": "Chat model name",
|
| 176 |
+
"description": "Exact name of model from selected provider",
|
| 177 |
+
"type": "text",
|
| 178 |
+
"value": settings["chat_model_name"],
|
| 179 |
+
}
|
| 180 |
+
)
|
| 181 |
+
|
| 182 |
+
chat_model_fields.append(
|
| 183 |
+
{
|
| 184 |
+
"id": "chat_model_api_base",
|
| 185 |
+
"title": "Chat model API base URL",
|
| 186 |
+
"description": "API base URL for main chat model. Leave empty for default. Only relevant for Azure, local and custom (other) providers.",
|
| 187 |
+
"type": "text",
|
| 188 |
+
"value": settings["chat_model_api_base"],
|
| 189 |
+
}
|
| 190 |
+
)
|
| 191 |
+
|
| 192 |
+
chat_model_fields.append(
|
| 193 |
+
{
|
| 194 |
+
"id": "chat_model_ctx_length",
|
| 195 |
+
"title": "Chat model context length",
|
| 196 |
+
"description": "Maximum number of tokens in the context window for LLM. System prompt, chat history, RAG and response all count towards this limit.",
|
| 197 |
+
"type": "number",
|
| 198 |
+
"value": settings["chat_model_ctx_length"],
|
| 199 |
+
}
|
| 200 |
+
)
|
| 201 |
+
|
| 202 |
+
chat_model_fields.append(
|
| 203 |
+
{
|
| 204 |
+
"id": "chat_model_ctx_history",
|
| 205 |
+
"title": "Context window space for chat history",
|
| 206 |
+
"description": "Portion of context window dedicated to chat history visible to the agent. Chat history will automatically be optimized to fit. Smaller size will result in shorter and more summarized history. The remaining space will be used for system prompt, RAG and response.",
|
| 207 |
+
"type": "range",
|
| 208 |
+
"min": 0.01,
|
| 209 |
+
"max": 1,
|
| 210 |
+
"step": 0.01,
|
| 211 |
+
"value": settings["chat_model_ctx_history"],
|
| 212 |
+
}
|
| 213 |
+
)
|
| 214 |
+
|
| 215 |
+
chat_model_fields.append(
|
| 216 |
+
{
|
| 217 |
+
"id": "chat_model_vision",
|
| 218 |
+
"title": "Supports Vision",
|
| 219 |
+
"description": "Models capable of Vision can for example natively see the content of image attachments.",
|
| 220 |
+
"type": "switch",
|
| 221 |
+
"value": settings["chat_model_vision"],
|
| 222 |
+
}
|
| 223 |
+
)
|
| 224 |
+
|
| 225 |
+
chat_model_fields.append(
|
| 226 |
+
{
|
| 227 |
+
"id": "chat_model_rl_requests",
|
| 228 |
+
"title": "Requests per minute limit",
|
| 229 |
+
"description": "Limits the number of requests per minute to the chat model. Waits if the limit is exceeded. Set to 0 to disable rate limiting.",
|
| 230 |
+
"type": "number",
|
| 231 |
+
"value": settings["chat_model_rl_requests"],
|
| 232 |
+
}
|
| 233 |
+
)
|
| 234 |
+
|
| 235 |
+
chat_model_fields.append(
|
| 236 |
+
{
|
| 237 |
+
"id": "chat_model_rl_input",
|
| 238 |
+
"title": "Input tokens per minute limit",
|
| 239 |
+
"description": "Limits the number of input tokens per minute to the chat model. Waits if the limit is exceeded. Set to 0 to disable rate limiting.",
|
| 240 |
+
"type": "number",
|
| 241 |
+
"value": settings["chat_model_rl_input"],
|
| 242 |
+
}
|
| 243 |
+
)
|
| 244 |
+
|
| 245 |
+
chat_model_fields.append(
|
| 246 |
+
{
|
| 247 |
+
"id": "chat_model_rl_output",
|
| 248 |
+
"title": "Output tokens per minute limit",
|
| 249 |
+
"description": "Limits the number of output tokens per minute to the chat model. Waits if the limit is exceeded. Set to 0 to disable rate limiting.",
|
| 250 |
+
"type": "number",
|
| 251 |
+
"value": settings["chat_model_rl_output"],
|
| 252 |
+
}
|
| 253 |
+
)
|
| 254 |
+
|
| 255 |
+
chat_model_fields.append(
|
| 256 |
+
{
|
| 257 |
+
"id": "ollama_base_url",
|
| 258 |
+
"title": "Ollama Base URL",
|
| 259 |
+
"description": "The base URL for the Ollama API.",
|
| 260 |
+
"type": "html",
|
| 261 |
+
"value": "<code>http://localhost:11434</code>",
|
| 262 |
+
}
|
| 263 |
+
)
|
| 264 |
+
|
| 265 |
+
chat_model_fields.append(
|
| 266 |
+
{
|
| 267 |
+
"id": "chat_model_kwargs",
|
| 268 |
+
"title": "Chat model additional parameters",
|
| 269 |
+
"description": "Any other parameters supported by <a href='https://docs.litellm.ai/docs/set_keys' target='_blank'>LiteLLM</a>. Format is KEY=VALUE on individual lines, just like .env file.",
|
| 270 |
+
"type": "textarea",
|
| 271 |
+
"value": _dict_to_env(settings["chat_model_kwargs"]),
|
| 272 |
+
}
|
| 273 |
+
)
|
| 274 |
+
|
| 275 |
+
chat_model_section: SettingsSection = {
|
| 276 |
+
"id": "chat_model",
|
| 277 |
+
"title": "Chat Model",
|
| 278 |
+
"description": "Selection and settings for main chat model used by Agent Zero",
|
| 279 |
+
"fields": chat_model_fields,
|
| 280 |
+
"tab": "agent",
|
| 281 |
+
}
|
| 282 |
+
|
| 283 |
+
# main model section
|
| 284 |
+
util_model_fields: list[SettingsField] = []
|
| 285 |
+
util_model_fields.append(
|
| 286 |
+
{
|
| 287 |
+
"id": "util_model_provider",
|
| 288 |
+
"title": "Utility model provider",
|
| 289 |
+
"description": "Select provider for utility model used by the framework",
|
| 290 |
+
"type": "select",
|
| 291 |
+
"value": settings["util_model_provider"],
|
| 292 |
+
"options": cast(list[FieldOption], get_providers("chat")),
|
| 293 |
+
}
|
| 294 |
+
)
|
| 295 |
+
util_model_fields.append(
|
| 296 |
+
{
|
| 297 |
+
"id": "util_model_name",
|
| 298 |
+
"title": "Utility model name",
|
| 299 |
+
"description": "Exact name of model from selected provider",
|
| 300 |
+
"type": "text",
|
| 301 |
+
"value": settings["util_model_name"],
|
| 302 |
+
}
|
| 303 |
+
)
|
| 304 |
+
|
| 305 |
+
util_model_fields.append(
|
| 306 |
+
{
|
| 307 |
+
"id": "util_model_api_base",
|
| 308 |
+
"title": "Utility model API base URL",
|
| 309 |
+
"description": "API base URL for utility model. Leave empty for default. Only relevant for Azure, local and custom (other) providers.",
|
| 310 |
+
"type": "text",
|
| 311 |
+
"value": settings["util_model_api_base"],
|
| 312 |
+
}
|
| 313 |
+
)
|
| 314 |
+
|
| 315 |
+
util_model_fields.append(
|
| 316 |
+
{
|
| 317 |
+
"id": "util_model_rl_requests",
|
| 318 |
+
"title": "Requests per minute limit",
|
| 319 |
+
"description": "Limits the number of requests per minute to the utility model. Waits if the limit is exceeded. Set to 0 to disable rate limiting.",
|
| 320 |
+
"type": "number",
|
| 321 |
+
"value": settings["util_model_rl_requests"],
|
| 322 |
+
}
|
| 323 |
+
)
|
| 324 |
+
|
| 325 |
+
util_model_fields.append(
|
| 326 |
+
{
|
| 327 |
+
"id": "util_model_rl_input",
|
| 328 |
+
"title": "Input tokens per minute limit",
|
| 329 |
+
"description": "Limits the number of input tokens per minute to the utility model. Waits if the limit is exceeded. Set to 0 to disable rate limiting.",
|
| 330 |
+
"type": "number",
|
| 331 |
+
"value": settings["util_model_rl_input"],
|
| 332 |
+
}
|
| 333 |
+
)
|
| 334 |
+
|
| 335 |
+
util_model_fields.append(
|
| 336 |
+
{
|
| 337 |
+
"id": "util_model_rl_output",
|
| 338 |
+
"title": "Output tokens per minute limit",
|
| 339 |
+
"description": "Limits the number of output tokens per minute to the utility model. Waits if the limit is exceeded. Set to 0 to disable rate limiting.",
|
| 340 |
+
"type": "number",
|
| 341 |
+
"value": settings["util_model_rl_output"],
|
| 342 |
+
}
|
| 343 |
+
)
|
| 344 |
+
|
| 345 |
+
util_model_fields.append(
|
| 346 |
+
{
|
| 347 |
+
"id": "util_model_kwargs",
|
| 348 |
+
"title": "Utility model additional parameters",
|
| 349 |
+
"description": "Any other parameters supported by <a href='https://docs.litellm.ai/docs/set_keys' target='_blank'>LiteLLM</a>. Format is KEY=VALUE on individual lines, just like .env file.",
|
| 350 |
+
"type": "textarea",
|
| 351 |
+
"value": _dict_to_env(settings["util_model_kwargs"]),
|
| 352 |
+
}
|
| 353 |
+
)
|
| 354 |
+
|
| 355 |
+
util_model_section: SettingsSection = {
|
| 356 |
+
"id": "util_model",
|
| 357 |
+
"title": "Utility model",
|
| 358 |
+
"description": "Smaller, cheaper, faster model for handling utility tasks like organizing memory, preparing prompts, summarizing.",
|
| 359 |
+
"fields": util_model_fields,
|
| 360 |
+
"tab": "agent",
|
| 361 |
+
}
|
| 362 |
+
|
| 363 |
+
# embedding model section
|
| 364 |
+
embed_model_fields: list[SettingsField] = []
|
| 365 |
+
embed_model_fields.append(
|
| 366 |
+
{
|
| 367 |
+
"id": "embed_model_provider",
|
| 368 |
+
"title": "Embedding model provider",
|
| 369 |
+
"description": "Select provider for embedding model used by the framework",
|
| 370 |
+
"type": "select",
|
| 371 |
+
"value": settings["embed_model_provider"],
|
| 372 |
+
"options": cast(list[FieldOption], get_providers("embedding")),
|
| 373 |
+
}
|
| 374 |
+
)
|
| 375 |
+
embed_model_fields.append(
|
| 376 |
+
{
|
| 377 |
+
"id": "embed_model_name",
|
| 378 |
+
"title": "Embedding model name",
|
| 379 |
+
"description": "Exact name of model from selected provider",
|
| 380 |
+
"type": "text",
|
| 381 |
+
"value": settings["embed_model_name"],
|
| 382 |
+
}
|
| 383 |
+
)
|
| 384 |
+
|
| 385 |
+
embed_model_fields.append(
|
| 386 |
+
{
|
| 387 |
+
"id": "embed_model_api_base",
|
| 388 |
+
"title": "Embedding model API base URL",
|
| 389 |
+
"description": "API base URL for embedding model. Leave empty for default. Only relevant for Azure, local and custom (other) providers.",
|
| 390 |
+
"type": "text",
|
| 391 |
+
"value": settings["embed_model_api_base"],
|
| 392 |
+
}
|
| 393 |
+
)
|
| 394 |
+
|
| 395 |
+
embed_model_fields.append(
|
| 396 |
+
{
|
| 397 |
+
"id": "embed_model_rl_requests",
|
| 398 |
+
"title": "Requests per minute limit",
|
| 399 |
+
"description": "Limits the number of requests per minute to the embedding model. Waits if the limit is exceeded. Set to 0 to disable rate limiting.",
|
| 400 |
+
"type": "number",
|
| 401 |
+
"value": settings["embed_model_rl_requests"],
|
| 402 |
+
}
|
| 403 |
+
)
|
| 404 |
+
|
| 405 |
+
embed_model_fields.append(
|
| 406 |
+
{
|
| 407 |
+
"id": "embed_model_rl_input",
|
| 408 |
+
"title": "Input tokens per minute limit",
|
| 409 |
+
"description": "Limits the number of input tokens per minute to the embedding model. Waits if the limit is exceeded. Set to 0 to disable rate limiting.",
|
| 410 |
+
"type": "number",
|
| 411 |
+
"value": settings["embed_model_rl_input"],
|
| 412 |
+
}
|
| 413 |
+
)
|
| 414 |
+
|
| 415 |
+
embed_model_fields.append(
|
| 416 |
+
{
|
| 417 |
+
"id": "embed_model_kwargs",
|
| 418 |
+
"title": "Embedding model additional parameters",
|
| 419 |
+
"description": "Any other parameters supported by <a href='https://docs.litellm.ai/docs/set_keys' target='_blank'>LiteLLM</a>. Format is KEY=VALUE on individual lines, just like .env file.",
|
| 420 |
+
"type": "textarea",
|
| 421 |
+
"value": _dict_to_env(settings["embed_model_kwargs"]),
|
| 422 |
+
}
|
| 423 |
+
)
|
| 424 |
+
|
| 425 |
+
embed_model_section: SettingsSection = {
|
| 426 |
+
"id": "embed_model",
|
| 427 |
+
"title": "Embedding Model",
|
| 428 |
+
"description": f"Settings for the embedding model used by Agent Zero.<br><h4>⚠️ No need to change</h4>The default HuggingFace model {default_settings['embed_model_name']} is preloaded and runs locally within the docker container and there's no need to change it unless you have a specific requirements for embedding.",
|
| 429 |
+
"fields": embed_model_fields,
|
| 430 |
+
"tab": "agent",
|
| 431 |
+
}
|
| 432 |
+
|
| 433 |
+
# embedding model section
|
| 434 |
+
browser_model_fields: list[SettingsField] = []
|
| 435 |
+
browser_model_fields.append(
|
| 436 |
+
{
|
| 437 |
+
"id": "browser_model_provider",
|
| 438 |
+
"title": "Web Browser model provider",
|
| 439 |
+
"description": "Select provider for web browser model used by <a href='https://github.com/browser-use/browser-use' target='_blank'>browser-use</a> framework",
|
| 440 |
+
"type": "select",
|
| 441 |
+
"value": settings["browser_model_provider"],
|
| 442 |
+
"options": cast(list[FieldOption], get_providers("chat")),
|
| 443 |
+
}
|
| 444 |
+
)
|
| 445 |
+
browser_model_fields.append(
|
| 446 |
+
{
|
| 447 |
+
"id": "browser_model_name",
|
| 448 |
+
"title": "Web Browser model name",
|
| 449 |
+
"description": "Exact name of model from selected provider",
|
| 450 |
+
"type": "text",
|
| 451 |
+
"value": settings["browser_model_name"],
|
| 452 |
+
}
|
| 453 |
+
)
|
| 454 |
+
|
| 455 |
+
browser_model_fields.append(
|
| 456 |
+
{
|
| 457 |
+
"id": "browser_model_api_base",
|
| 458 |
+
"title": "Web Browser model API base URL",
|
| 459 |
+
"description": "API base URL for web browser model. Leave empty for default. Only relevant for Azure, local and custom (other) providers.",
|
| 460 |
+
"type": "text",
|
| 461 |
+
"value": settings["browser_model_api_base"],
|
| 462 |
+
}
|
| 463 |
+
)
|
| 464 |
+
|
| 465 |
+
browser_model_fields.append(
|
| 466 |
+
{
|
| 467 |
+
"id": "browser_model_vision",
|
| 468 |
+
"title": "Use Vision",
|
| 469 |
+
"description": "Models capable of Vision can use it to analyze web pages from screenshots. Increases quality but also token usage.",
|
| 470 |
+
"type": "switch",
|
| 471 |
+
"value": settings["browser_model_vision"],
|
| 472 |
+
}
|
| 473 |
+
)
|
| 474 |
+
|
| 475 |
+
browser_model_fields.append(
|
| 476 |
+
{
|
| 477 |
+
"id": "browser_model_rl_requests",
|
| 478 |
+
"title": "Web Browser model rate limit requests",
|
| 479 |
+
"description": "Rate limit requests for web browser model.",
|
| 480 |
+
"type": "number",
|
| 481 |
+
"value": settings["browser_model_rl_requests"],
|
| 482 |
+
}
|
| 483 |
+
)
|
| 484 |
+
|
| 485 |
+
browser_model_fields.append(
|
| 486 |
+
{
|
| 487 |
+
"id": "browser_model_rl_input",
|
| 488 |
+
"title": "Web Browser model rate limit input",
|
| 489 |
+
"description": "Rate limit input for web browser model.",
|
| 490 |
+
"type": "number",
|
| 491 |
+
"value": settings["browser_model_rl_input"],
|
| 492 |
+
}
|
| 493 |
+
)
|
| 494 |
+
|
| 495 |
+
browser_model_fields.append(
|
| 496 |
+
{
|
| 497 |
+
"id": "browser_model_rl_output",
|
| 498 |
+
"title": "Web Browser model rate limit output",
|
| 499 |
+
"description": "Rate limit output for web browser model.",
|
| 500 |
+
"type": "number",
|
| 501 |
+
"value": settings["browser_model_rl_output"],
|
| 502 |
+
}
|
| 503 |
+
)
|
| 504 |
+
|
| 505 |
+
browser_model_fields.append(
|
| 506 |
+
{
|
| 507 |
+
"id": "browser_model_kwargs",
|
| 508 |
+
"title": "Web Browser model additional parameters",
|
| 509 |
+
"description": "Any other parameters supported by <a href='https://docs.litellm.ai/docs/set_keys' target='_blank'>LiteLLM</a>. Format is KEY=VALUE on individual lines, just like .env file.",
|
| 510 |
+
"type": "textarea",
|
| 511 |
+
"value": _dict_to_env(settings["browser_model_kwargs"]),
|
| 512 |
+
}
|
| 513 |
+
)
|
| 514 |
+
|
| 515 |
+
browser_model_section: SettingsSection = {
|
| 516 |
+
"id": "browser_model",
|
| 517 |
+
"title": "Web Browser Model",
|
| 518 |
+
"description": "Settings for the web browser model. Agent Zero uses <a href='https://github.com/browser-use/browser-use' target='_blank'>browser-use</a> agentic framework to handle web interactions.",
|
| 519 |
+
"fields": browser_model_fields,
|
| 520 |
+
"tab": "agent",
|
| 521 |
+
}
|
| 522 |
+
|
| 523 |
+
# basic auth section
|
| 524 |
+
auth_fields: list[SettingsField] = []
|
| 525 |
+
|
| 526 |
+
auth_fields.append(
|
| 527 |
+
{
|
| 528 |
+
"id": "auth_login",
|
| 529 |
+
"title": "UI Login",
|
| 530 |
+
"description": "Set user name for web UI",
|
| 531 |
+
"type": "text",
|
| 532 |
+
"value": dotenv.get_dotenv_value(dotenv.KEY_AUTH_LOGIN) or "",
|
| 533 |
+
}
|
| 534 |
+
)
|
| 535 |
+
|
| 536 |
+
auth_fields.append(
|
| 537 |
+
{
|
| 538 |
+
"id": "auth_password",
|
| 539 |
+
"title": "UI Password",
|
| 540 |
+
"description": "Set user password for web UI",
|
| 541 |
+
"type": "password",
|
| 542 |
+
"value": (
|
| 543 |
+
PASSWORD_PLACEHOLDER
|
| 544 |
+
if dotenv.get_dotenv_value(dotenv.KEY_AUTH_PASSWORD)
|
| 545 |
+
else ""
|
| 546 |
+
),
|
| 547 |
+
}
|
| 548 |
+
)
|
| 549 |
+
|
| 550 |
+
if runtime.is_dockerized():
|
| 551 |
+
auth_fields.append(
|
| 552 |
+
{
|
| 553 |
+
"id": "root_password",
|
| 554 |
+
"title": "root Password",
|
| 555 |
+
"description": "Change linux root password in docker container. This password can be used for SSH access. Original password was randomly generated during setup.",
|
| 556 |
+
"type": "password",
|
| 557 |
+
"value": "",
|
| 558 |
+
}
|
| 559 |
+
)
|
| 560 |
+
|
| 561 |
+
auth_section: SettingsSection = {
|
| 562 |
+
"id": "auth",
|
| 563 |
+
"title": "Authentication",
|
| 564 |
+
"description": "Settings for authentication to use Agent Zero Web UI.",
|
| 565 |
+
"fields": auth_fields,
|
| 566 |
+
"tab": "external",
|
| 567 |
+
}
|
| 568 |
+
|
| 569 |
+
# api keys model section
|
| 570 |
+
api_keys_fields: list[SettingsField] = []
|
| 571 |
+
|
| 572 |
+
# Collect unique providers from both chat and embedding sections
|
| 573 |
+
providers_seen: set[str] = set()
|
| 574 |
+
for p_type in ("chat", "embedding"):
|
| 575 |
+
for provider in get_providers(p_type):
|
| 576 |
+
pid_lower = provider["value"].lower()
|
| 577 |
+
if pid_lower in providers_seen:
|
| 578 |
+
continue
|
| 579 |
+
providers_seen.add(pid_lower)
|
| 580 |
+
api_keys_fields.append(
|
| 581 |
+
_get_api_key_field(settings, pid_lower, provider["label"])
|
| 582 |
+
)
|
| 583 |
+
|
| 584 |
+
api_keys_section: SettingsSection = {
|
| 585 |
+
"id": "api_keys",
|
| 586 |
+
"title": "API Keys",
|
| 587 |
+
"description": "API keys for model providers and services used by Agent Zero. You can set multiple API keys separated by a comma (,). They will be used in round-robin fashion.",
|
| 588 |
+
"fields": api_keys_fields,
|
| 589 |
+
"tab": "external",
|
| 590 |
+
}
|
| 591 |
+
|
| 592 |
+
# Agent config section
|
| 593 |
+
agent_fields: list[SettingsField] = []
|
| 594 |
+
|
| 595 |
+
agent_fields.append(
|
| 596 |
+
{
|
| 597 |
+
"id": "agent_profile",
|
| 598 |
+
"title": "Default agent profile",
|
| 599 |
+
"description": "Subdirectory of /agents folder to be used by default agent no. 0. Subordinate agents can be spawned with other profiles, that is on their superior agent to decide. This setting affects the behaviour of the top level agent you communicate with.",
|
| 600 |
+
"type": "select",
|
| 601 |
+
"value": settings["agent_profile"],
|
| 602 |
+
"options": [
|
| 603 |
+
{"value": subdir, "label": subdir}
|
| 604 |
+
for subdir in files.get_subdirectories("agents")
|
| 605 |
+
if subdir != "_example"
|
| 606 |
+
],
|
| 607 |
+
}
|
| 608 |
+
)
|
| 609 |
+
|
| 610 |
+
agent_fields.append(
|
| 611 |
+
{
|
| 612 |
+
"id": "agent_knowledge_subdir",
|
| 613 |
+
"title": "Knowledge subdirectory",
|
| 614 |
+
"description": "Subdirectory of /knowledge folder to use for agent knowledge import. 'default' subfolder is always imported and contains framework knowledge.",
|
| 615 |
+
"type": "select",
|
| 616 |
+
"value": settings["agent_knowledge_subdir"],
|
| 617 |
+
"options": [
|
| 618 |
+
{"value": subdir, "label": subdir}
|
| 619 |
+
for subdir in files.get_subdirectories("knowledge", exclude="default")
|
| 620 |
+
],
|
| 621 |
+
}
|
| 622 |
+
)
|
| 623 |
+
|
| 624 |
+
agent_section: SettingsSection = {
|
| 625 |
+
"id": "agent",
|
| 626 |
+
"title": "Agent Config",
|
| 627 |
+
"description": "Agent parameters.",
|
| 628 |
+
"fields": agent_fields,
|
| 629 |
+
"tab": "agent",
|
| 630 |
+
}
|
| 631 |
+
|
| 632 |
+
memory_fields: list[SettingsField] = []
|
| 633 |
+
|
| 634 |
+
memory_fields.append(
|
| 635 |
+
{
|
| 636 |
+
"id": "agent_memory_subdir",
|
| 637 |
+
"title": "Memory Subdirectory",
|
| 638 |
+
"description": "Subdirectory of /memory folder to use for agent memory storage. Used to separate memory storage between different instances.",
|
| 639 |
+
"type": "text",
|
| 640 |
+
"value": settings["agent_memory_subdir"],
|
| 641 |
+
# "options": [
|
| 642 |
+
# {"value": subdir, "label": subdir}
|
| 643 |
+
# for subdir in files.get_subdirectories("memory", exclude="embeddings")
|
| 644 |
+
# ],
|
| 645 |
+
}
|
| 646 |
+
)
|
| 647 |
+
|
| 648 |
+
memory_fields.append(
|
| 649 |
+
{
|
| 650 |
+
"id": "memory_recall_enabled",
|
| 651 |
+
"title": "Memory auto-recall enabled",
|
| 652 |
+
"description": "Agent Zero will automatically recall memories based on convesation context.",
|
| 653 |
+
"type": "switch",
|
| 654 |
+
"value": settings["memory_recall_enabled"],
|
| 655 |
+
}
|
| 656 |
+
)
|
| 657 |
+
|
| 658 |
+
memory_fields.append(
|
| 659 |
+
{
|
| 660 |
+
"id": "memory_recall_delayed",
|
| 661 |
+
"title": "Memory auto-recall delayed",
|
| 662 |
+
"description": "The agent will not wait for auto memory recall. Memories will be delivered one message later. This speeds up agent's response time but may result in less relevant first step.",
|
| 663 |
+
"type": "switch",
|
| 664 |
+
"value": settings["memory_recall_delayed"],
|
| 665 |
+
}
|
| 666 |
+
)
|
| 667 |
+
|
| 668 |
+
memory_fields.append(
|
| 669 |
+
{
|
| 670 |
+
"id": "memory_recall_query_prep",
|
| 671 |
+
"title": "Auto-recall AI query preparation",
|
| 672 |
+
"description": "Enables vector DB query preparation from conversation context by utility LLM for auto-recall. Improves search quality, adds 1 utility LLM call per auto-recall.",
|
| 673 |
+
"type": "switch",
|
| 674 |
+
"value": settings["memory_recall_query_prep"],
|
| 675 |
+
}
|
| 676 |
+
)
|
| 677 |
+
|
| 678 |
+
memory_fields.append(
|
| 679 |
+
{
|
| 680 |
+
"id": "memory_recall_post_filter",
|
| 681 |
+
"title": "Auto-recall AI post-filtering",
|
| 682 |
+
"description": "Enables memory relevance filtering by utility LLM for auto-recall. Improves search quality, adds 1 utility LLM call per auto-recall.",
|
| 683 |
+
"type": "switch",
|
| 684 |
+
"value": settings["memory_recall_post_filter"],
|
| 685 |
+
}
|
| 686 |
+
)
|
| 687 |
+
|
| 688 |
+
memory_fields.append(
|
| 689 |
+
{
|
| 690 |
+
"id": "memory_recall_interval",
|
| 691 |
+
"title": "Memory auto-recall interval",
|
| 692 |
+
"description": "Memories are recalled after every user or superior agent message. During agent's monologue, memories are recalled every X turns based on this parameter.",
|
| 693 |
+
"type": "range",
|
| 694 |
+
"min": 1,
|
| 695 |
+
"max": 10,
|
| 696 |
+
"step": 1,
|
| 697 |
+
"value": settings["memory_recall_interval"],
|
| 698 |
+
}
|
| 699 |
+
)
|
| 700 |
+
|
| 701 |
+
memory_fields.append(
|
| 702 |
+
{
|
| 703 |
+
"id": "memory_recall_history_len",
|
| 704 |
+
"title": "Memory auto-recall history length",
|
| 705 |
+
"description": "The length of conversation history passed to memory recall LLM for context (in characters).",
|
| 706 |
+
"type": "number",
|
| 707 |
+
"value": settings["memory_recall_history_len"],
|
| 708 |
+
}
|
| 709 |
+
)
|
| 710 |
+
|
| 711 |
+
memory_fields.append(
|
| 712 |
+
{
|
| 713 |
+
"id": "memory_recall_similarity_threshold",
|
| 714 |
+
"title": "Memory auto-recall similarity threshold",
|
| 715 |
+
"description": "The threshold for similarity search in memory recall (0 = no similarity, 1 = exact match).",
|
| 716 |
+
"type": "range",
|
| 717 |
+
"min": 0,
|
| 718 |
+
"max": 1,
|
| 719 |
+
"step": 0.01,
|
| 720 |
+
"value": settings["memory_recall_similarity_threshold"],
|
| 721 |
+
}
|
| 722 |
+
)
|
| 723 |
+
|
| 724 |
+
memory_fields.append(
|
| 725 |
+
{
|
| 726 |
+
"id": "memory_recall_memories_max_search",
|
| 727 |
+
"title": "Memory auto-recall max memories to search",
|
| 728 |
+
"description": "The maximum number of memories returned by vector DB for further processing.",
|
| 729 |
+
"type": "number",
|
| 730 |
+
"value": settings["memory_recall_memories_max_search"],
|
| 731 |
+
}
|
| 732 |
+
)
|
| 733 |
+
|
| 734 |
+
memory_fields.append(
|
| 735 |
+
{
|
| 736 |
+
"id": "memory_recall_memories_max_result",
|
| 737 |
+
"title": "Memory auto-recall max memories to use",
|
| 738 |
+
"description": "The maximum number of memories to inject into A0's context window.",
|
| 739 |
+
"type": "number",
|
| 740 |
+
"value": settings["memory_recall_memories_max_result"],
|
| 741 |
+
}
|
| 742 |
+
)
|
| 743 |
+
|
| 744 |
+
memory_fields.append(
|
| 745 |
+
{
|
| 746 |
+
"id": "memory_recall_solutions_max_search",
|
| 747 |
+
"title": "Memory auto-recall max solutions to search",
|
| 748 |
+
"description": "The maximum number of solutions returned by vector DB for further processing.",
|
| 749 |
+
"type": "number",
|
| 750 |
+
"value": settings["memory_recall_solutions_max_search"],
|
| 751 |
+
}
|
| 752 |
+
)
|
| 753 |
+
|
| 754 |
+
memory_fields.append(
|
| 755 |
+
{
|
| 756 |
+
"id": "memory_recall_solutions_max_result",
|
| 757 |
+
"title": "Memory auto-recall max solutions to use",
|
| 758 |
+
"description": "The maximum number of solutions to inject into A0's context window.",
|
| 759 |
+
"type": "number",
|
| 760 |
+
"value": settings["memory_recall_solutions_max_result"],
|
| 761 |
+
}
|
| 762 |
+
)
|
| 763 |
+
|
| 764 |
+
memory_fields.append(
|
| 765 |
+
{
|
| 766 |
+
"id": "memory_memorize_enabled",
|
| 767 |
+
"title": "Auto-memorize enabled",
|
| 768 |
+
"description": "A0 will automatically memorize facts and solutions from conversation history.",
|
| 769 |
+
"type": "switch",
|
| 770 |
+
"value": settings["memory_memorize_enabled"],
|
| 771 |
+
}
|
| 772 |
+
)
|
| 773 |
+
|
| 774 |
+
memory_fields.append(
|
| 775 |
+
{
|
| 776 |
+
"id": "memory_memorize_consolidation",
|
| 777 |
+
"title": "Auto-memorize AI consolidation",
|
| 778 |
+
"description": "A0 will automatically consolidate similar memories using utility LLM. Improves memory quality over time, adds 2 utility LLM calls per memory.",
|
| 779 |
+
"type": "switch",
|
| 780 |
+
"value": settings["memory_memorize_consolidation"],
|
| 781 |
+
}
|
| 782 |
+
)
|
| 783 |
+
|
| 784 |
+
memory_fields.append(
|
| 785 |
+
{
|
| 786 |
+
"id": "memory_memorize_replace_threshold",
|
| 787 |
+
"title": "Auto-memorize replacement threshold",
|
| 788 |
+
"description": "Only applies when AI consolidation is disabled. Replaces previous similar memories with new ones based on this threshold. 0 = replace even if not similar at all, 1 = replace only if exact match.",
|
| 789 |
+
"type": "range",
|
| 790 |
+
"min": 0,
|
| 791 |
+
"max": 1,
|
| 792 |
+
"step": 0.01,
|
| 793 |
+
"value": settings["memory_memorize_replace_threshold"],
|
| 794 |
+
}
|
| 795 |
+
)
|
| 796 |
+
|
| 797 |
+
memory_section: SettingsSection = {
|
| 798 |
+
"id": "memory",
|
| 799 |
+
"title": "Memory",
|
| 800 |
+
"description": "Configuration of A0's memory system. A0 memorizes and recalls memories automatically to help it's context awareness.",
|
| 801 |
+
"fields": memory_fields,
|
| 802 |
+
"tab": "agent",
|
| 803 |
+
}
|
| 804 |
+
|
| 805 |
+
dev_fields: list[SettingsField] = []
|
| 806 |
+
|
| 807 |
+
dev_fields.append(
|
| 808 |
+
{
|
| 809 |
+
"id": "shell_interface",
|
| 810 |
+
"title": "Shell Interface",
|
| 811 |
+
"description": "Terminal interface used for Code Execution Tool. Local Python TTY works locally in both dockerized and development environments. SSH always connects to dockerized environment (automatically at localhost or RFC host address).",
|
| 812 |
+
"type": "select",
|
| 813 |
+
"value": settings["shell_interface"],
|
| 814 |
+
"options": [{"value": "local", "label": "Local Python TTY"}, {"value": "ssh", "label": "SSH"}],
|
| 815 |
+
}
|
| 816 |
+
)
|
| 817 |
+
|
| 818 |
+
if runtime.is_development():
|
| 819 |
+
# dev_fields.append(
|
| 820 |
+
# {
|
| 821 |
+
# "id": "rfc_auto_docker",
|
| 822 |
+
# "title": "RFC Auto Docker Management",
|
| 823 |
+
# "description": "Automatically create dockerized instance of A0 for RFCs using this instance's code base and, settings and .env.",
|
| 824 |
+
# "type": "text",
|
| 825 |
+
# "value": settings["rfc_auto_docker"],
|
| 826 |
+
# }
|
| 827 |
+
# )
|
| 828 |
+
|
| 829 |
+
dev_fields.append(
|
| 830 |
+
{
|
| 831 |
+
"id": "rfc_url",
|
| 832 |
+
"title": "RFC Destination URL",
|
| 833 |
+
"description": "URL of dockerized A0 instance for remote function calls. Do not specify port here.",
|
| 834 |
+
"type": "text",
|
| 835 |
+
"value": settings["rfc_url"],
|
| 836 |
+
}
|
| 837 |
+
)
|
| 838 |
+
|
| 839 |
+
dev_fields.append(
|
| 840 |
+
{
|
| 841 |
+
"id": "rfc_password",
|
| 842 |
+
"title": "RFC Password",
|
| 843 |
+
"description": "Password for remote function calls. Passwords must match on both instances. RFCs can not be used with empty password.",
|
| 844 |
+
"type": "password",
|
| 845 |
+
"value": (
|
| 846 |
+
PASSWORD_PLACEHOLDER
|
| 847 |
+
if dotenv.get_dotenv_value(dotenv.KEY_RFC_PASSWORD)
|
| 848 |
+
else ""
|
| 849 |
+
),
|
| 850 |
+
}
|
| 851 |
+
)
|
| 852 |
+
|
| 853 |
+
if runtime.is_development():
|
| 854 |
+
dev_fields.append(
|
| 855 |
+
{
|
| 856 |
+
"id": "rfc_port_http",
|
| 857 |
+
"title": "RFC HTTP port",
|
| 858 |
+
"description": "HTTP port for dockerized instance of A0.",
|
| 859 |
+
"type": "text",
|
| 860 |
+
"value": settings["rfc_port_http"],
|
| 861 |
+
}
|
| 862 |
+
)
|
| 863 |
+
|
| 864 |
+
dev_fields.append(
|
| 865 |
+
{
|
| 866 |
+
"id": "rfc_port_ssh",
|
| 867 |
+
"title": "RFC SSH port",
|
| 868 |
+
"description": "SSH port for dockerized instance of A0.",
|
| 869 |
+
"type": "text",
|
| 870 |
+
"value": settings["rfc_port_ssh"],
|
| 871 |
+
}
|
| 872 |
+
)
|
| 873 |
+
|
| 874 |
+
dev_section: SettingsSection = {
|
| 875 |
+
"id": "dev",
|
| 876 |
+
"title": "Development",
|
| 877 |
+
"description": "Parameters for A0 framework development. RFCs (remote function calls) are used to call functions on another A0 instance. You can develop and debug A0 natively on your local system while redirecting some functions to A0 instance in docker. This is crucial for development as A0 needs to run in standardized environment to support all features.",
|
| 878 |
+
"fields": dev_fields,
|
| 879 |
+
"tab": "developer",
|
| 880 |
+
}
|
| 881 |
+
|
| 882 |
+
# code_exec_fields: list[SettingsField] = []
|
| 883 |
+
|
| 884 |
+
# code_exec_fields.append(
|
| 885 |
+
# {
|
| 886 |
+
# "id": "code_exec_ssh_enabled",
|
| 887 |
+
# "title": "Use SSH for code execution",
|
| 888 |
+
# "description": "Code execution will use SSH to connect to the terminal. When disabled, a local python terminal interface is used instead. SSH should only be used in development environment or when encountering issues with the local python terminal interface.",
|
| 889 |
+
# "type": "switch",
|
| 890 |
+
# "value": settings["code_exec_ssh_enabled"],
|
| 891 |
+
# }
|
| 892 |
+
# )
|
| 893 |
+
|
| 894 |
+
# code_exec_fields.append(
|
| 895 |
+
# {
|
| 896 |
+
# "id": "code_exec_ssh_addr",
|
| 897 |
+
# "title": "Code execution SSH address",
|
| 898 |
+
# "description": "Address of the SSH server for code execution. Only applies when SSH is enabled.",
|
| 899 |
+
# "type": "text",
|
| 900 |
+
# "value": settings["code_exec_ssh_addr"],
|
| 901 |
+
# }
|
| 902 |
+
# )
|
| 903 |
+
|
| 904 |
+
# code_exec_fields.append(
|
| 905 |
+
# {
|
| 906 |
+
# "id": "code_exec_ssh_port",
|
| 907 |
+
# "title": "Code execution SSH port",
|
| 908 |
+
# "description": "Port of the SSH server for code execution. Only applies when SSH is enabled.",
|
| 909 |
+
# "type": "text",
|
| 910 |
+
# "value": settings["code_exec_ssh_port"],
|
| 911 |
+
# }
|
| 912 |
+
# )
|
| 913 |
+
|
| 914 |
+
# code_exec_section: SettingsSection = {
|
| 915 |
+
# "id": "code_exec",
|
| 916 |
+
# "title": "Code execution",
|
| 917 |
+
# "description": "Configuration of code execution by the agent.",
|
| 918 |
+
# "fields": code_exec_fields,
|
| 919 |
+
# "tab": "developer",
|
| 920 |
+
# }
|
| 921 |
+
|
| 922 |
+
# Speech to text section
|
| 923 |
+
stt_fields: list[SettingsField] = []
|
| 924 |
+
|
| 925 |
+
stt_fields.append(
|
| 926 |
+
{
|
| 927 |
+
"id": "stt_microphone_section",
|
| 928 |
+
"title": "Microphone device",
|
| 929 |
+
"description": "Select the microphone device to use for speech-to-text.",
|
| 930 |
+
"value": "<x-component path='/settings/speech/microphone.html' />",
|
| 931 |
+
"type": "html",
|
| 932 |
+
}
|
| 933 |
+
)
|
| 934 |
+
|
| 935 |
+
stt_fields.append(
|
| 936 |
+
{
|
| 937 |
+
"id": "stt_model_size",
|
| 938 |
+
"title": "Speech-to-text model size",
|
| 939 |
+
"description": "Select the speech-to-text model size",
|
| 940 |
+
"type": "select",
|
| 941 |
+
"value": settings["stt_model_size"],
|
| 942 |
+
"options": [
|
| 943 |
+
{"value": "tiny", "label": "Tiny (39M, English)"},
|
| 944 |
+
{"value": "base", "label": "Base (74M, English)"},
|
| 945 |
+
{"value": "small", "label": "Small (244M, English)"},
|
| 946 |
+
{"value": "medium", "label": "Medium (769M, English)"},
|
| 947 |
+
{"value": "large", "label": "Large (1.5B, Multilingual)"},
|
| 948 |
+
{"value": "turbo", "label": "Turbo (Multilingual)"},
|
| 949 |
+
],
|
| 950 |
+
}
|
| 951 |
+
)
|
| 952 |
+
|
| 953 |
+
stt_fields.append(
|
| 954 |
+
{
|
| 955 |
+
"id": "stt_language",
|
| 956 |
+
"title": "Speech-to-text language code",
|
| 957 |
+
"description": "Language code (e.g. en, fr, it)",
|
| 958 |
+
"type": "text",
|
| 959 |
+
"value": settings["stt_language"],
|
| 960 |
+
}
|
| 961 |
+
)
|
| 962 |
+
|
| 963 |
+
stt_fields.append(
|
| 964 |
+
{
|
| 965 |
+
"id": "stt_silence_threshold",
|
| 966 |
+
"title": "Microphone silence threshold",
|
| 967 |
+
"description": "Silence detection threshold. Lower values are more sensitive to noise.",
|
| 968 |
+
"type": "range",
|
| 969 |
+
"min": 0,
|
| 970 |
+
"max": 1,
|
| 971 |
+
"step": 0.01,
|
| 972 |
+
"value": settings["stt_silence_threshold"],
|
| 973 |
+
}
|
| 974 |
+
)
|
| 975 |
+
|
| 976 |
+
stt_fields.append(
|
| 977 |
+
{
|
| 978 |
+
"id": "stt_silence_duration",
|
| 979 |
+
"title": "Microphone silence duration (ms)",
|
| 980 |
+
"description": "Duration of silence before the system considers speaking to have ended.",
|
| 981 |
+
"type": "text",
|
| 982 |
+
"value": settings["stt_silence_duration"],
|
| 983 |
+
}
|
| 984 |
+
)
|
| 985 |
+
|
| 986 |
+
stt_fields.append(
|
| 987 |
+
{
|
| 988 |
+
"id": "stt_waiting_timeout",
|
| 989 |
+
"title": "Microphone waiting timeout (ms)",
|
| 990 |
+
"description": "Duration of silence before the system closes the microphone.",
|
| 991 |
+
"type": "text",
|
| 992 |
+
"value": settings["stt_waiting_timeout"],
|
| 993 |
+
}
|
| 994 |
+
)
|
| 995 |
+
|
| 996 |
+
# TTS fields
|
| 997 |
+
tts_fields: list[SettingsField] = []
|
| 998 |
+
|
| 999 |
+
tts_fields.append(
|
| 1000 |
+
{
|
| 1001 |
+
"id": "tts_kokoro",
|
| 1002 |
+
"title": "Enable Kokoro TTS",
|
| 1003 |
+
"description": "Enable higher quality server-side AI (Kokoro) instead of browser-based text-to-speech.",
|
| 1004 |
+
"type": "switch",
|
| 1005 |
+
"value": settings["tts_kokoro"],
|
| 1006 |
+
}
|
| 1007 |
+
)
|
| 1008 |
+
|
| 1009 |
+
speech_section: SettingsSection = {
|
| 1010 |
+
"id": "speech",
|
| 1011 |
+
"title": "Speech",
|
| 1012 |
+
"description": "Voice transcription and speech synthesis settings.",
|
| 1013 |
+
"fields": stt_fields + tts_fields,
|
| 1014 |
+
"tab": "agent",
|
| 1015 |
+
}
|
| 1016 |
+
|
| 1017 |
+
# MCP section
|
| 1018 |
+
mcp_client_fields: list[SettingsField] = []
|
| 1019 |
+
|
| 1020 |
+
mcp_client_fields.append(
|
| 1021 |
+
{
|
| 1022 |
+
"id": "mcp_servers_config",
|
| 1023 |
+
"title": "MCP Servers Configuration",
|
| 1024 |
+
"description": "External MCP servers can be configured here.",
|
| 1025 |
+
"type": "button",
|
| 1026 |
+
"value": "Open",
|
| 1027 |
+
}
|
| 1028 |
+
)
|
| 1029 |
+
|
| 1030 |
+
mcp_client_fields.append(
|
| 1031 |
+
{
|
| 1032 |
+
"id": "mcp_servers",
|
| 1033 |
+
"title": "MCP Servers",
|
| 1034 |
+
"description": "(JSON list of) >> RemoteServer <<: [name, url, headers, timeout (opt), sse_read_timeout (opt), disabled (opt)] / >> Local Server <<: [name, command, args, env, encoding (opt), encoding_error_handler (opt), disabled (opt)]",
|
| 1035 |
+
"type": "textarea",
|
| 1036 |
+
"value": settings["mcp_servers"],
|
| 1037 |
+
"hidden": True,
|
| 1038 |
+
}
|
| 1039 |
+
)
|
| 1040 |
+
|
| 1041 |
+
mcp_client_fields.append(
|
| 1042 |
+
{
|
| 1043 |
+
"id": "mcp_client_init_timeout",
|
| 1044 |
+
"title": "MCP Client Init Timeout",
|
| 1045 |
+
"description": "Timeout for MCP client initialization (in seconds). Higher values might be required for complex MCPs, but might also slowdown system startup.",
|
| 1046 |
+
"type": "number",
|
| 1047 |
+
"value": settings["mcp_client_init_timeout"],
|
| 1048 |
+
}
|
| 1049 |
+
)
|
| 1050 |
+
|
| 1051 |
+
mcp_client_fields.append(
|
| 1052 |
+
{
|
| 1053 |
+
"id": "mcp_client_tool_timeout",
|
| 1054 |
+
"title": "MCP Client Tool Timeout",
|
| 1055 |
+
"description": "Timeout for MCP client tool execution. Higher values might be required for complex tools, but might also result in long responses with failing tools.",
|
| 1056 |
+
"type": "number",
|
| 1057 |
+
"value": settings["mcp_client_tool_timeout"],
|
| 1058 |
+
}
|
| 1059 |
+
)
|
| 1060 |
+
|
| 1061 |
+
mcp_client_section: SettingsSection = {
|
| 1062 |
+
"id": "mcp_client",
|
| 1063 |
+
"title": "External MCP Servers",
|
| 1064 |
+
"description": "Agent Zero can use external MCP servers, local or remote as tools.",
|
| 1065 |
+
"fields": mcp_client_fields,
|
| 1066 |
+
"tab": "mcp",
|
| 1067 |
+
}
|
| 1068 |
+
|
| 1069 |
+
mcp_server_fields: list[SettingsField] = []
|
| 1070 |
+
|
| 1071 |
+
mcp_server_fields.append(
|
| 1072 |
+
{
|
| 1073 |
+
"id": "mcp_server_enabled",
|
| 1074 |
+
"title": "Enable A0 MCP Server",
|
| 1075 |
+
"description": "Expose Agent Zero as an SSE/HTTP MCP server. This will make this A0 instance available to MCP clients.",
|
| 1076 |
+
"type": "switch",
|
| 1077 |
+
"value": settings["mcp_server_enabled"],
|
| 1078 |
+
}
|
| 1079 |
+
)
|
| 1080 |
+
|
| 1081 |
+
mcp_server_fields.append(
|
| 1082 |
+
{
|
| 1083 |
+
"id": "mcp_server_token",
|
| 1084 |
+
"title": "MCP Server Token",
|
| 1085 |
+
"description": "Token for MCP server authentication.",
|
| 1086 |
+
"type": "text",
|
| 1087 |
+
"hidden": True,
|
| 1088 |
+
"value": settings["mcp_server_token"],
|
| 1089 |
+
}
|
| 1090 |
+
)
|
| 1091 |
+
|
| 1092 |
+
mcp_server_section: SettingsSection = {
|
| 1093 |
+
"id": "mcp_server",
|
| 1094 |
+
"title": "A0 MCP Server",
|
| 1095 |
+
"description": "Agent Zero can be exposed as an SSE MCP server. See <a href=\"javascript:openModal('settings/mcp/server/example.html')\">connection example</a>.",
|
| 1096 |
+
"fields": mcp_server_fields,
|
| 1097 |
+
"tab": "mcp",
|
| 1098 |
+
}
|
| 1099 |
+
|
| 1100 |
+
# -------- A2A Section --------
|
| 1101 |
+
a2a_fields: list[SettingsField] = []
|
| 1102 |
+
|
| 1103 |
+
a2a_fields.append(
|
| 1104 |
+
{
|
| 1105 |
+
"id": "a2a_server_enabled",
|
| 1106 |
+
"title": "Enable A2A server",
|
| 1107 |
+
"description": "Expose Agent Zero as A2A server. This allows other agents to connect to A0 via A2A protocol.",
|
| 1108 |
+
"type": "switch",
|
| 1109 |
+
"value": settings["a2a_server_enabled"],
|
| 1110 |
+
}
|
| 1111 |
+
)
|
| 1112 |
+
|
| 1113 |
+
a2a_section: SettingsSection = {
|
| 1114 |
+
"id": "a2a_server",
|
| 1115 |
+
"title": "A0 A2A Server",
|
| 1116 |
+
"description": "Agent Zero can be exposed as an A2A server. See <a href=\"javascript:openModal('settings/a2a/a2a-connection.html')\">connection example</a>.",
|
| 1117 |
+
"fields": a2a_fields,
|
| 1118 |
+
"tab": "mcp",
|
| 1119 |
+
}
|
| 1120 |
+
|
| 1121 |
+
|
| 1122 |
+
# External API section
|
| 1123 |
+
external_api_fields: list[SettingsField] = []
|
| 1124 |
+
|
| 1125 |
+
external_api_fields.append(
|
| 1126 |
+
{
|
| 1127 |
+
"id": "external_api_examples",
|
| 1128 |
+
"title": "API Examples",
|
| 1129 |
+
"description": "View examples for using Agent Zero's external API endpoints with API key authentication.",
|
| 1130 |
+
"type": "button",
|
| 1131 |
+
"value": "Show API Examples",
|
| 1132 |
+
}
|
| 1133 |
+
)
|
| 1134 |
+
|
| 1135 |
+
external_api_section: SettingsSection = {
|
| 1136 |
+
"id": "external_api",
|
| 1137 |
+
"title": "External API",
|
| 1138 |
+
"description": "Agent Zero provides external API endpoints for integration with other applications. "
|
| 1139 |
+
"These endpoints use API key authentication and support text messages and file attachments.",
|
| 1140 |
+
"fields": external_api_fields,
|
| 1141 |
+
"tab": "external",
|
| 1142 |
+
}
|
| 1143 |
+
|
| 1144 |
+
# Backup & Restore section
|
| 1145 |
+
backup_fields: list[SettingsField] = []
|
| 1146 |
+
|
| 1147 |
+
backup_fields.append(
|
| 1148 |
+
{
|
| 1149 |
+
"id": "backup_create",
|
| 1150 |
+
"title": "Create Backup",
|
| 1151 |
+
"description": "Create a backup archive of selected files and configurations "
|
| 1152 |
+
"using customizable patterns.",
|
| 1153 |
+
"type": "button",
|
| 1154 |
+
"value": "Create Backup",
|
| 1155 |
+
}
|
| 1156 |
+
)
|
| 1157 |
+
|
| 1158 |
+
backup_fields.append(
|
| 1159 |
+
{
|
| 1160 |
+
"id": "backup_restore",
|
| 1161 |
+
"title": "Restore from Backup",
|
| 1162 |
+
"description": "Restore files and configurations from a backup archive "
|
| 1163 |
+
"with pattern-based selection.",
|
| 1164 |
+
"type": "button",
|
| 1165 |
+
"value": "Restore Backup",
|
| 1166 |
+
}
|
| 1167 |
+
)
|
| 1168 |
+
|
| 1169 |
+
backup_section: SettingsSection = {
|
| 1170 |
+
"id": "backup_restore",
|
| 1171 |
+
"title": "Backup & Restore",
|
| 1172 |
+
"description": "Backup and restore Agent Zero data and configurations "
|
| 1173 |
+
"using glob pattern-based file selection.",
|
| 1174 |
+
"fields": backup_fields,
|
| 1175 |
+
"tab": "backup",
|
| 1176 |
+
}
|
| 1177 |
+
|
| 1178 |
+
# Add the section to the result
|
| 1179 |
+
result: SettingsOutput = {
|
| 1180 |
+
"sections": [
|
| 1181 |
+
agent_section,
|
| 1182 |
+
chat_model_section,
|
| 1183 |
+
util_model_section,
|
| 1184 |
+
browser_model_section,
|
| 1185 |
+
embed_model_section,
|
| 1186 |
+
memory_section,
|
| 1187 |
+
speech_section,
|
| 1188 |
+
api_keys_section,
|
| 1189 |
+
auth_section,
|
| 1190 |
+
mcp_client_section,
|
| 1191 |
+
mcp_server_section,
|
| 1192 |
+
a2a_section,
|
| 1193 |
+
external_api_section,
|
| 1194 |
+
backup_section,
|
| 1195 |
+
dev_section,
|
| 1196 |
+
# code_exec_section,
|
| 1197 |
+
]
|
| 1198 |
+
}
|
| 1199 |
+
return result
|
| 1200 |
+
|
| 1201 |
+
|
| 1202 |
+
def _get_api_key_field(settings: Settings, provider: str, title: str) -> SettingsField:
|
| 1203 |
+
key = settings["api_keys"].get(provider, models.get_api_key(provider))
|
| 1204 |
+
# For API keys, use simple asterisk placeholder for existing keys
|
| 1205 |
+
return {
|
| 1206 |
+
"id": f"api_key_{provider}",
|
| 1207 |
+
"title": title,
|
| 1208 |
+
"type": "text",
|
| 1209 |
+
"value": (API_KEY_PLACEHOLDER if key and key != "None" else ""),
|
| 1210 |
+
}
|
| 1211 |
+
|
| 1212 |
+
|
| 1213 |
+
def convert_in(settings: dict) -> Settings:
|
| 1214 |
+
current = get_settings()
|
| 1215 |
+
for section in settings["sections"]:
|
| 1216 |
+
if "fields" in section:
|
| 1217 |
+
for field in section["fields"]:
|
| 1218 |
+
# Skip saving if value is a placeholder
|
| 1219 |
+
should_skip = (
|
| 1220 |
+
field["value"] == PASSWORD_PLACEHOLDER or
|
| 1221 |
+
field["value"] == API_KEY_PLACEHOLDER
|
| 1222 |
+
)
|
| 1223 |
+
|
| 1224 |
+
if not should_skip:
|
| 1225 |
+
if field["id"].endswith("_kwargs"):
|
| 1226 |
+
current[field["id"]] = _env_to_dict(field["value"])
|
| 1227 |
+
elif field["id"].startswith("api_key_"):
|
| 1228 |
+
current["api_keys"][field["id"]] = field["value"]
|
| 1229 |
+
else:
|
| 1230 |
+
current[field["id"]] = field["value"]
|
| 1231 |
+
return current
|
| 1232 |
+
|
| 1233 |
+
|
| 1234 |
+
def get_settings() -> Settings:
|
| 1235 |
+
global _settings
|
| 1236 |
+
if not _settings:
|
| 1237 |
+
_settings = _read_settings_file()
|
| 1238 |
+
if not _settings:
|
| 1239 |
+
_settings = get_default_settings()
|
| 1240 |
+
norm = normalize_settings(_settings)
|
| 1241 |
+
return norm
|
| 1242 |
+
|
| 1243 |
+
|
| 1244 |
+
def set_settings(settings: Settings, apply: bool = True):
|
| 1245 |
+
global _settings
|
| 1246 |
+
previous = _settings
|
| 1247 |
+
_settings = normalize_settings(settings)
|
| 1248 |
+
_write_settings_file(_settings)
|
| 1249 |
+
if apply:
|
| 1250 |
+
_apply_settings(previous)
|
| 1251 |
+
|
| 1252 |
+
|
| 1253 |
+
def set_settings_delta(delta: dict, apply: bool = True):
|
| 1254 |
+
current = get_settings()
|
| 1255 |
+
new = {**current, **delta}
|
| 1256 |
+
set_settings(new, apply) # type: ignore
|
| 1257 |
+
|
| 1258 |
+
|
| 1259 |
+
def normalize_settings(settings: Settings) -> Settings:
|
| 1260 |
+
copy = settings.copy()
|
| 1261 |
+
default = get_default_settings()
|
| 1262 |
+
|
| 1263 |
+
# adjust settings values to match current version if needed
|
| 1264 |
+
if "version" not in copy or copy["version"] != default["version"]:
|
| 1265 |
+
_adjust_to_version(copy, default)
|
| 1266 |
+
copy["version"] = default["version"] # sync version
|
| 1267 |
+
|
| 1268 |
+
# remove keys that are not in default
|
| 1269 |
+
keys_to_remove = [key for key in copy if key not in default]
|
| 1270 |
+
for key in keys_to_remove:
|
| 1271 |
+
del copy[key]
|
| 1272 |
+
|
| 1273 |
+
# add missing keys and normalize types
|
| 1274 |
+
for key, value in default.items():
|
| 1275 |
+
if key not in copy:
|
| 1276 |
+
copy[key] = value
|
| 1277 |
+
else:
|
| 1278 |
+
try:
|
| 1279 |
+
copy[key] = type(value)(copy[key]) # type: ignore
|
| 1280 |
+
if isinstance(copy[key], str):
|
| 1281 |
+
copy[key] = copy[key].strip() # strip strings
|
| 1282 |
+
except (ValueError, TypeError):
|
| 1283 |
+
copy[key] = value # make default instead
|
| 1284 |
+
|
| 1285 |
+
# mcp server token is set automatically
|
| 1286 |
+
copy["mcp_server_token"] = create_auth_token()
|
| 1287 |
+
|
| 1288 |
+
return copy
|
| 1289 |
+
|
| 1290 |
+
|
| 1291 |
+
def _adjust_to_version(settings: Settings, default: Settings):
|
| 1292 |
+
# starting with 0.9, the default prompt subfolder for agent no. 0 is agent0
|
| 1293 |
+
# switch to agent0 if the old default is used from v0.8
|
| 1294 |
+
if "version" not in settings or settings["version"].startswith("v0.8"):
|
| 1295 |
+
if "agent_profile" not in settings or settings["agent_profile"] == "default":
|
| 1296 |
+
settings["agent_profile"] = "agent0"
|
| 1297 |
+
|
| 1298 |
+
|
| 1299 |
+
def _read_settings_file() -> Settings | None:
|
| 1300 |
+
if os.path.exists(SETTINGS_FILE):
|
| 1301 |
+
content = files.read_file(SETTINGS_FILE)
|
| 1302 |
+
parsed = json.loads(content)
|
| 1303 |
+
return normalize_settings(parsed)
|
| 1304 |
+
|
| 1305 |
+
|
| 1306 |
+
def _write_settings_file(settings: Settings):
|
| 1307 |
+
settings = settings.copy()
|
| 1308 |
+
_write_sensitive_settings(settings)
|
| 1309 |
+
_remove_sensitive_settings(settings)
|
| 1310 |
+
|
| 1311 |
+
# write settings
|
| 1312 |
+
content = json.dumps(settings, indent=4)
|
| 1313 |
+
files.write_file(SETTINGS_FILE, content)
|
| 1314 |
+
|
| 1315 |
+
|
| 1316 |
+
def _remove_sensitive_settings(settings: Settings):
|
| 1317 |
+
settings["api_keys"] = {}
|
| 1318 |
+
settings["auth_login"] = ""
|
| 1319 |
+
settings["auth_password"] = ""
|
| 1320 |
+
settings["rfc_password"] = ""
|
| 1321 |
+
settings["root_password"] = ""
|
| 1322 |
+
settings["mcp_server_token"] = ""
|
| 1323 |
+
|
| 1324 |
+
|
| 1325 |
+
def _write_sensitive_settings(settings: Settings):
|
| 1326 |
+
for key, val in settings["api_keys"].items():
|
| 1327 |
+
dotenv.save_dotenv_value(key.upper(), val)
|
| 1328 |
+
|
| 1329 |
+
dotenv.save_dotenv_value(dotenv.KEY_AUTH_LOGIN, settings["auth_login"])
|
| 1330 |
+
if settings["auth_password"]:
|
| 1331 |
+
dotenv.save_dotenv_value(dotenv.KEY_AUTH_PASSWORD, settings["auth_password"])
|
| 1332 |
+
if settings["rfc_password"]:
|
| 1333 |
+
dotenv.save_dotenv_value(dotenv.KEY_RFC_PASSWORD, settings["rfc_password"])
|
| 1334 |
+
|
| 1335 |
+
if settings["root_password"]:
|
| 1336 |
+
dotenv.save_dotenv_value(dotenv.KEY_ROOT_PASSWORD, settings["root_password"])
|
| 1337 |
+
if settings["root_password"]:
|
| 1338 |
+
set_root_password(settings["root_password"])
|
| 1339 |
+
|
| 1340 |
+
|
| 1341 |
+
def get_default_settings() -> Settings:
|
| 1342 |
+
api_base_url = os.getenv("API_BASE_URL", "https://api.helmholtz-blablador.fz-juelich.de/v1")
|
| 1343 |
+
return Settings(
|
| 1344 |
+
version=_get_version(),
|
| 1345 |
+
chat_model_provider="Other OpenAI compatible",
|
| 1346 |
+
chat_model_name="alias-large",
|
| 1347 |
+
chat_model_api_base=api_base_url,
|
| 1348 |
+
chat_model_kwargs={"temperature": "0"},
|
| 1349 |
+
chat_model_ctx_length=100000,
|
| 1350 |
+
chat_model_ctx_history=0.7,
|
| 1351 |
+
chat_model_vision=True,
|
| 1352 |
+
chat_model_rl_requests=0,
|
| 1353 |
+
chat_model_rl_input=0,
|
| 1354 |
+
chat_model_rl_output=0,
|
| 1355 |
+
util_model_provider="Other OpenAI compatible",
|
| 1356 |
+
util_model_name="alias-large",
|
| 1357 |
+
util_model_api_base=api_base_url,
|
| 1358 |
+
util_model_ctx_length=100000,
|
| 1359 |
+
util_model_ctx_input=0.7,
|
| 1360 |
+
util_model_kwargs={"temperature": "0"},
|
| 1361 |
+
util_model_rl_requests=0,
|
| 1362 |
+
util_model_rl_input=0,
|
| 1363 |
+
util_model_rl_output=0,
|
| 1364 |
+
embed_model_provider="huggingface",
|
| 1365 |
+
embed_model_name="sentence-transformers/all-MiniLM-L6-v2",
|
| 1366 |
+
embed_model_api_base="",
|
| 1367 |
+
embed_model_kwargs={},
|
| 1368 |
+
embed_model_rl_requests=0,
|
| 1369 |
+
embed_model_rl_input=0,
|
| 1370 |
+
browser_model_provider="Google",
|
| 1371 |
+
browser_model_name="gemini-2.0-flash-lite",
|
| 1372 |
+
browser_model_api_base="",
|
| 1373 |
+
browser_model_vision=True,
|
| 1374 |
+
browser_model_rl_requests=0,
|
| 1375 |
+
browser_model_rl_input=0,
|
| 1376 |
+
browser_model_rl_output=0,
|
| 1377 |
+
browser_model_kwargs={"temperature": "0"},
|
| 1378 |
+
memory_recall_enabled=True,
|
| 1379 |
+
memory_recall_delayed=False,
|
| 1380 |
+
memory_recall_interval=3,
|
| 1381 |
+
memory_recall_history_len=10000,
|
| 1382 |
+
memory_recall_memories_max_search=12,
|
| 1383 |
+
memory_recall_solutions_max_search=8,
|
| 1384 |
+
memory_recall_memories_max_result=5,
|
| 1385 |
+
memory_recall_solutions_max_result=3,
|
| 1386 |
+
memory_recall_similarity_threshold=0.7,
|
| 1387 |
+
memory_recall_query_prep=True,
|
| 1388 |
+
memory_recall_post_filter=True,
|
| 1389 |
+
memory_memorize_enabled=True,
|
| 1390 |
+
memory_memorize_consolidation=True,
|
| 1391 |
+
memory_memorize_replace_threshold=0.9,
|
| 1392 |
+
api_keys={},
|
| 1393 |
+
auth_login="",
|
| 1394 |
+
auth_password="",
|
| 1395 |
+
root_password="",
|
| 1396 |
+
agent_profile="agent0",
|
| 1397 |
+
agent_memory_subdir="default",
|
| 1398 |
+
agent_knowledge_subdir="custom",
|
| 1399 |
+
rfc_auto_docker=True,
|
| 1400 |
+
rfc_url="localhost",
|
| 1401 |
+
rfc_password="",
|
| 1402 |
+
rfc_port_http=55080,
|
| 1403 |
+
rfc_port_ssh=55022,
|
| 1404 |
+
shell_interface="local" if runtime.is_dockerized() else "ssh",
|
| 1405 |
+
stt_model_size="base",
|
| 1406 |
+
stt_language="en",
|
| 1407 |
+
stt_silence_threshold=0.3,
|
| 1408 |
+
stt_silence_duration=1000,
|
| 1409 |
+
stt_waiting_timeout=2000,
|
| 1410 |
+
tts_kokoro=True,
|
| 1411 |
+
mcp_servers='{\n "mcpServers": {}\n}',
|
| 1412 |
+
mcp_client_init_timeout=10,
|
| 1413 |
+
mcp_client_tool_timeout=120,
|
| 1414 |
+
mcp_server_enabled=False,
|
| 1415 |
+
mcp_server_token=create_auth_token(),
|
| 1416 |
+
a2a_server_enabled=False,
|
| 1417 |
+
)
|
| 1418 |
+
|
| 1419 |
+
|
| 1420 |
+
|
| 1421 |
+
def _apply_settings(previous: Settings | None):
|
| 1422 |
+
global _settings
|
| 1423 |
+
if _settings:
|
| 1424 |
+
from agent import AgentContext
|
| 1425 |
+
from initialize import initialize_agent
|
| 1426 |
+
|
| 1427 |
+
config = initialize_agent()
|
| 1428 |
+
for ctx in AgentContext._contexts.values():
|
| 1429 |
+
ctx.config = config # reinitialize context config with new settings
|
| 1430 |
+
# apply config to agents
|
| 1431 |
+
agent = ctx.agent0
|
| 1432 |
+
while agent:
|
| 1433 |
+
agent.config = ctx.config
|
| 1434 |
+
agent = agent.get_data(agent.DATA_NAME_SUBORDINATE)
|
| 1435 |
+
|
| 1436 |
+
# reload whisper model if necessary
|
| 1437 |
+
if not previous or _settings["stt_model_size"] != previous["stt_model_size"]:
|
| 1438 |
+
task = defer.DeferredTask().start_task(
|
| 1439 |
+
whisper.preload, _settings["stt_model_size"])
|
| 1440 |
+
# TODO overkill, replace with background task
|
| 1441 |
+
|
| 1442 |
+
# force memory reload on embedding model change
|
| 1443 |
+
if not previous or (
|
| 1444 |
+
_settings["embed_model_name"] != previous["embed_model_name"]
|
| 1445 |
+
or _settings["embed_model_provider"] != previous["embed_model_provider"]
|
| 1446 |
+
or _settings["embed_model_kwargs"] != previous["embed_model_kwargs"]
|
| 1447 |
+
):
|
| 1448 |
+
from python.helpers.memory import reload as memory_reload
|
| 1449 |
+
|
| 1450 |
+
memory_reload()
|
| 1451 |
+
|
| 1452 |
+
# update mcp settings if necessary
|
| 1453 |
+
if not previous or _settings["mcp_servers"] != previous["mcp_servers"]:
|
| 1454 |
+
from python.helpers.mcp_handler import MCPConfig
|
| 1455 |
+
|
| 1456 |
+
async def update_mcp_settings(mcp_servers: str):
|
| 1457 |
+
PrintStyle(
|
| 1458 |
+
background_color="black", font_color="white", padding=True
|
| 1459 |
+
).print("Updating MCP config...")
|
| 1460 |
+
AgentContext.log_to_all(
|
| 1461 |
+
type="info", content="Updating MCP settings...", temp=True
|
| 1462 |
+
)
|
| 1463 |
+
|
| 1464 |
+
mcp_config = MCPConfig.get_instance()
|
| 1465 |
+
try:
|
| 1466 |
+
MCPConfig.update(mcp_servers)
|
| 1467 |
+
except Exception as e:
|
| 1468 |
+
AgentContext.log_to_all(
|
| 1469 |
+
type="error",
|
| 1470 |
+
content=f"Failed to update MCP settings: {e}",
|
| 1471 |
+
temp=False,
|
| 1472 |
+
)
|
| 1473 |
+
(
|
| 1474 |
+
PrintStyle(
|
| 1475 |
+
background_color="red", font_color="black", padding=True
|
| 1476 |
+
).print("Failed to update MCP settings")
|
| 1477 |
+
)
|
| 1478 |
+
(
|
| 1479 |
+
PrintStyle(
|
| 1480 |
+
background_color="black", font_color="red", padding=True
|
| 1481 |
+
).print(f"{e}")
|
| 1482 |
+
)
|
| 1483 |
+
|
| 1484 |
+
PrintStyle(
|
| 1485 |
+
background_color="#6734C3", font_color="white", padding=True
|
| 1486 |
+
).print("Parsed MCP config:")
|
| 1487 |
+
(
|
| 1488 |
+
PrintStyle(
|
| 1489 |
+
background_color="#334455", font_color="white", padding=False
|
| 1490 |
+
).print(mcp_config.model_dump_json())
|
| 1491 |
+
)
|
| 1492 |
+
AgentContext.log_to_all(
|
| 1493 |
+
type="info", content="Finished updating MCP settings.", temp=True
|
| 1494 |
+
)
|
| 1495 |
+
|
| 1496 |
+
task2 = defer.DeferredTask().start_task(
|
| 1497 |
+
update_mcp_settings, config.mcp_servers
|
| 1498 |
+
) # TODO overkill, replace with background task
|
| 1499 |
+
|
| 1500 |
+
# update token in mcp server
|
| 1501 |
+
current_token = (
|
| 1502 |
+
create_auth_token()
|
| 1503 |
+
) # TODO - ugly, token in settings is generated from dotenv and does not always correspond
|
| 1504 |
+
if not previous or current_token != previous["mcp_server_token"]:
|
| 1505 |
+
|
| 1506 |
+
async def update_mcp_token(token: str):
|
| 1507 |
+
from python.helpers.mcp_server import DynamicMcpProxy
|
| 1508 |
+
|
| 1509 |
+
DynamicMcpProxy.get_instance().reconfigure(token=token)
|
| 1510 |
+
|
| 1511 |
+
task3 = defer.DeferredTask().start_task(
|
| 1512 |
+
update_mcp_token, current_token
|
| 1513 |
+
) # TODO overkill, replace with background task
|
| 1514 |
+
|
| 1515 |
+
# update token in a2a server
|
| 1516 |
+
if not previous or current_token != previous["mcp_server_token"]:
|
| 1517 |
+
|
| 1518 |
+
async def update_a2a_token(token: str):
|
| 1519 |
+
from python.helpers.fasta2a_server import DynamicA2AProxy
|
| 1520 |
+
|
| 1521 |
+
DynamicA2AProxy.get_instance().reconfigure(token=token)
|
| 1522 |
+
|
| 1523 |
+
task4 = defer.DeferredTask().start_task(
|
| 1524 |
+
update_a2a_token, current_token
|
| 1525 |
+
) # TODO overkill, replace with background task
|
| 1526 |
+
|
| 1527 |
+
|
| 1528 |
+
def _env_to_dict(data: str):
|
| 1529 |
+
env_dict = {}
|
| 1530 |
+
line_pattern = re.compile(r"\s*([^#][^=]*)\s*=\s*(.*)")
|
| 1531 |
+
for line in data.splitlines():
|
| 1532 |
+
match = line_pattern.match(line)
|
| 1533 |
+
if match:
|
| 1534 |
+
key, value = match.groups()
|
| 1535 |
+
# Remove optional surrounding quotes (single or double)
|
| 1536 |
+
value = value.strip().strip('"').strip("'")
|
| 1537 |
+
env_dict[key.strip()] = value
|
| 1538 |
+
return env_dict
|
| 1539 |
+
|
| 1540 |
+
|
| 1541 |
+
def _dict_to_env(data_dict):
|
| 1542 |
+
lines = []
|
| 1543 |
+
for key, value in data_dict.items():
|
| 1544 |
+
if "\n" in value:
|
| 1545 |
+
value = f"'{value}'"
|
| 1546 |
+
elif " " in value or value == "" or any(c in value for c in "'\""):
|
| 1547 |
+
value = f'"{value}"'
|
| 1548 |
+
lines.append(f"{key}={value}")
|
| 1549 |
+
return "\n".join(lines)
|
| 1550 |
+
|
| 1551 |
+
|
| 1552 |
+
def set_root_password(password: str):
|
| 1553 |
+
if not runtime.is_dockerized():
|
| 1554 |
+
raise Exception("root password can only be set in dockerized environments")
|
| 1555 |
+
_result = subprocess.run(
|
| 1556 |
+
["chpasswd"],
|
| 1557 |
+
input=f"root:{password}".encode(),
|
| 1558 |
+
capture_output=True,
|
| 1559 |
+
check=True,
|
| 1560 |
+
)
|
| 1561 |
+
dotenv.save_dotenv_value(dotenv.KEY_ROOT_PASSWORD, password)
|
| 1562 |
+
|
| 1563 |
+
def get_runtime_config(set: Settings):
|
| 1564 |
+
if runtime.is_dockerized():
|
| 1565 |
+
return {
|
| 1566 |
+
"code_exec_ssh_enabled": set["shell_interface"] == "ssh",
|
| 1567 |
+
"code_exec_ssh_addr": "localhost",
|
| 1568 |
+
"code_exec_ssh_port": 22,
|
| 1569 |
+
"code_exec_ssh_user": "root",
|
| 1570 |
+
}
|
| 1571 |
+
else:
|
| 1572 |
+
host = set["rfc_url"]
|
| 1573 |
+
if "//" in host:
|
| 1574 |
+
host = host.split("//")[1]
|
| 1575 |
+
if ":" in host:
|
| 1576 |
+
host, port = host.split(":")
|
| 1577 |
+
if host.endswith("/"):
|
| 1578 |
+
host = host[:-1]
|
| 1579 |
+
return {
|
| 1580 |
+
"code_exec_ssh_enabled": set["shell_interface"] == "ssh",
|
| 1581 |
+
"code_exec_ssh_addr": host,
|
| 1582 |
+
"code_exec_ssh_port": set["rfc_port_ssh"],
|
| 1583 |
+
"code_exec_ssh_user": "root",
|
| 1584 |
+
}
|
| 1585 |
+
|
| 1586 |
+
def create_auth_token() -> str:
|
| 1587 |
+
runtime_id = runtime.get_persistent_id()
|
| 1588 |
+
username = dotenv.get_dotenv_value(dotenv.KEY_AUTH_LOGIN) or ""
|
| 1589 |
+
password = dotenv.get_dotenv_value(dotenv.KEY_AUTH_PASSWORD) or ""
|
| 1590 |
+
# use base64 encoding for a more compact token with alphanumeric chars
|
| 1591 |
+
hash_bytes = hashlib.sha256(f"{runtime_id}:{username}:{password}".encode()).digest()
|
| 1592 |
+
# encode as base64 and remove any non-alphanumeric chars (like +, /, =)
|
| 1593 |
+
b64_token = base64.urlsafe_b64encode(hash_bytes).decode().replace("=", "")
|
| 1594 |
+
return b64_token[:16]
|
| 1595 |
+
|
| 1596 |
+
|
| 1597 |
+
def _get_version():
|
| 1598 |
+
try:
|
| 1599 |
+
git_info = git.get_git_info()
|
| 1600 |
+
return str(git_info.get("short_tag", "")).strip() or "unknown"
|
| 1601 |
+
except Exception:
|
| 1602 |
+
return "unknown"
|
python/tools/search_engine.py
ADDED
|
@@ -0,0 +1,81 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import requests
|
| 2 |
+
from typing import List, Dict, Any
|
| 3 |
+
from python.helpers.tool import Tool, Response
|
| 4 |
+
|
| 5 |
+
# The base URL of the public SearXNG instance.
|
| 6 |
+
# WARNING: Using a public, third-party instance is not recommended for production
|
| 7 |
+
# due to significant reliability, security, and privacy risks.
|
| 8 |
+
SEARXNG_BASE_URL = "https://CJJ-on-HF-SearXNG.hf.space"
|
| 9 |
+
|
| 10 |
+
class SearchEngine(Tool):
|
| 11 |
+
"""
|
| 12 |
+
A tool to perform web searches using a public SearXNG instance.
|
| 13 |
+
"""
|
| 14 |
+
|
| 15 |
+
async def execute(self, query: str, category: str = "general", num_results: int = 5) -> Response:
|
| 16 |
+
"""
|
| 17 |
+
Performs a web search using a public SearXNG instance and returns formatted results.
|
| 18 |
+
This tool allows for targeted searches using categories defined in the SearXNG instance.
|
| 19 |
+
|
| 20 |
+
Args:
|
| 21 |
+
query (str): The search query string.
|
| 22 |
+
category (str): The SearXNG category to search in (e.g., 'science', 'it', 'news').
|
| 23 |
+
Defaults to 'general' for a broad search.
|
| 24 |
+
num_results (int): The maximum number of search results to return. Defaults to 5.
|
| 25 |
+
|
| 26 |
+
Returns:
|
| 27 |
+
Response: A Response object containing the formatted search results or an error message.
|
| 28 |
+
"""
|
| 29 |
+
if not query:
|
| 30 |
+
return Response(message="Error: The search query cannot be empty.", break_loop=False)
|
| 31 |
+
|
| 32 |
+
# Construct the query with a category prefix if specified.
|
| 33 |
+
# This leverages the power of SearXNG's engine configuration.
|
| 34 |
+
search_query = f"!{category} {query}" if category and category != "general" else query
|
| 35 |
+
|
| 36 |
+
params = {
|
| 37 |
+
"q": search_query,
|
| 38 |
+
"format": "json", # Essential for machine-readable output
|
| 39 |
+
"pageno": 1,
|
| 40 |
+
}
|
| 41 |
+
|
| 42 |
+
try:
|
| 43 |
+
response = requests.get(
|
| 44 |
+
f"{SEARXNG_BASE_URL}/search",
|
| 45 |
+
params=params,
|
| 46 |
+
timeout=15 # A generous but necessary timeout for a public service
|
| 47 |
+
)
|
| 48 |
+
# Raise an HTTPError for bad responses (4xx or 5xx)
|
| 49 |
+
response.raise_for_status()
|
| 50 |
+
|
| 51 |
+
data = response.json()
|
| 52 |
+
results: List[Dict[str, Any]] = data.get("results", [])
|
| 53 |
+
|
| 54 |
+
if not results:
|
| 55 |
+
return Response(message=f"No search results found for the query: '{query}'", break_loop=False)
|
| 56 |
+
|
| 57 |
+
# Format the results into a clean, readable string for the agent
|
| 58 |
+
formatted_output = []
|
| 59 |
+
for i, res in enumerate(results[:num_results]):
|
| 60 |
+
title = res.get("title", "No Title Provided")
|
| 61 |
+
url = res.get("url", "No URL Provided")
|
| 62 |
+
snippet = res.get("content") or res.get("description", "No Snippet Provided")
|
| 63 |
+
|
| 64 |
+
# Sanitize snippet to remove excessive newlines for cleaner LLM input
|
| 65 |
+
clean_snippet = ' '.join(snippet.split()) if snippet else "No Snippet Provided"
|
| 66 |
+
|
| 67 |
+
formatted_output.append(
|
| 68 |
+
f"Result {i+1}:\n"
|
| 69 |
+
f" Title: {title}\n"
|
| 70 |
+
f" URL: {url}\n"
|
| 71 |
+
f" Snippet: {clean_snippet}"
|
| 72 |
+
)
|
| 73 |
+
|
| 74 |
+
return Response(message="\n---\n".join(formatted_output), break_loop=False)
|
| 75 |
+
|
| 76 |
+
except requests.exceptions.Timeout:
|
| 77 |
+
return Response(message="Error: The search request timed out. The SearXNG instance may be offline or overloaded.", break_loop=False)
|
| 78 |
+
except requests.exceptions.RequestException as e:
|
| 79 |
+
return Response(message=f"Error: A network error occurred while contacting the search service: {e}", break_loop=False)
|
| 80 |
+
except ValueError: # Catches JSON decoding errors
|
| 81 |
+
return Response(message="Error: Failed to parse a valid JSON response from the search service. The service might be down or returning malformed data.", break_loop=False)
|
requirements.txt
ADDED
|
@@ -0,0 +1,46 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
a2wsgi==1.10.8
|
| 2 |
+
ansio==0.0.2
|
| 3 |
+
browser-use==0.2.5
|
| 4 |
+
docker==7.1.0
|
| 5 |
+
duckduckgo-search==6.1.12
|
| 6 |
+
faiss-cpu==1.11.0
|
| 7 |
+
fastmcp==2.3.4
|
| 8 |
+
fasta2a==0.5.0
|
| 9 |
+
flask[async]==3.0.3
|
| 10 |
+
flask-basicauth==0.2.0
|
| 11 |
+
flaredantic==0.1.4
|
| 12 |
+
GitPython==3.1.43
|
| 13 |
+
inputimeout==1.0.4
|
| 14 |
+
kokoro==0.9.4
|
| 15 |
+
simpleeval==1.0.3
|
| 16 |
+
langchain-core==0.3.49
|
| 17 |
+
langchain-community==0.3.19
|
| 18 |
+
langchain-unstructured[all-docs]==0.1.6
|
| 19 |
+
openai-whisper==20240930
|
| 20 |
+
lxml_html_clean==0.3.1
|
| 21 |
+
markdown==3.7
|
| 22 |
+
mcp==1.9.0
|
| 23 |
+
newspaper3k==0.2.8
|
| 24 |
+
paramiko==3.5.0
|
| 25 |
+
playwright==1.52.0
|
| 26 |
+
pypdf==4.3.1
|
| 27 |
+
python-dotenv==1.1.0
|
| 28 |
+
pytz==2024.2
|
| 29 |
+
sentence-transformers==3.0.1
|
| 30 |
+
tiktoken==0.12.0
|
| 31 |
+
unstructured[all-docs]==0.16.23
|
| 32 |
+
unstructured-client==0.31.0
|
| 33 |
+
webcolors==24.6.0
|
| 34 |
+
nest-asyncio==1.6.0
|
| 35 |
+
crontab==1.0.1
|
| 36 |
+
litellm==1.74.0
|
| 37 |
+
markdownify==1.1.0
|
| 38 |
+
pymupdf==1.25.3
|
| 39 |
+
pytesseract==0.3.13
|
| 40 |
+
pdf2image==1.17.0
|
| 41 |
+
pathspec==0.12.1
|
| 42 |
+
psutil==7.1.3
|
| 43 |
+
soundfile==0.13.1
|
| 44 |
+
gradio_client
|
| 45 |
+
fastapi
|
| 46 |
+
uvicorn
|
run_ui.py
ADDED
|
@@ -0,0 +1,306 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import secrets, hmac, hashlib, time, base64
|
| 2 |
+
from datetime import timedelta
|
| 3 |
+
import os
|
| 4 |
+
import socket
|
| 5 |
+
import struct
|
| 6 |
+
from functools import wraps
|
| 7 |
+
import threading
|
| 8 |
+
from flask import Flask, request, Response, session
|
| 9 |
+
from flask_basicauth import BasicAuth
|
| 10 |
+
import initialize
|
| 11 |
+
from python.helpers import files, git, mcp_server, fasta2a_server
|
| 12 |
+
from python.helpers.files import get_abs_path
|
| 13 |
+
from python.helpers import runtime, dotenv, process
|
| 14 |
+
from python.helpers.extract_tools import load_classes_from_folder
|
| 15 |
+
from python.helpers.api import ApiHandler
|
| 16 |
+
from python.helpers.print_style import PrintStyle
|
| 17 |
+
import atexit
|
| 18 |
+
import asyncio
|
| 19 |
+
|
| 20 |
+
CSRF_SECRET = secrets.token_bytes(32) # or os.environ["CSRF_SECRET"].encode()
|
| 21 |
+
TOKEN_TTL = 3600 # 1 hour validity
|
| 22 |
+
|
| 23 |
+
def generate_csrf_token():
|
| 24 |
+
nonce = secrets.token_hex(16) # 128-bit random
|
| 25 |
+
timestamp = str(int(time.time()))
|
| 26 |
+
data = f"{nonce}:{timestamp}"
|
| 27 |
+
sig = hmac.new(CSRF_SECRET, data.encode(), hashlib.sha256).hexdigest()
|
| 28 |
+
return f"{data}.{sig}"
|
| 29 |
+
|
| 30 |
+
def verify_csrf_token(token):
|
| 31 |
+
try:
|
| 32 |
+
data, sig = token.rsplit(".", 1)
|
| 33 |
+
expected_sig = hmac.new(CSRF_SECRET, data.encode(), hashlib.sha256).hexdigest()
|
| 34 |
+
if not hmac.compare_digest(sig, expected_sig):
|
| 35 |
+
return False
|
| 36 |
+
# check TTL
|
| 37 |
+
nonce, timestamp = data.split(":")
|
| 38 |
+
if time.time() - int(timestamp) > TOKEN_TTL:
|
| 39 |
+
return False
|
| 40 |
+
return True
|
| 41 |
+
except Exception:
|
| 42 |
+
return False
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
# Set the new timezone to 'UTC'
|
| 46 |
+
os.environ["TZ"] = "UTC"
|
| 47 |
+
os.environ["TOKENIZERS_PARALLELISM"] = "false"
|
| 48 |
+
# Apply the timezone change
|
| 49 |
+
if hasattr(time, 'tzset'):
|
| 50 |
+
time.tzset()
|
| 51 |
+
|
| 52 |
+
# initialize the internal Flask server
|
| 53 |
+
webapp = Flask("app", static_folder=get_abs_path("./webui"), static_url_path="/")
|
| 54 |
+
webapp.secret_key = os.getenv("FLASK_SECRET_KEY") or secrets.token_hex(32)
|
| 55 |
+
webapp.config.update(
|
| 56 |
+
JSON_SORT_KEYS=False,
|
| 57 |
+
SESSION_COOKIE_NAME="session_" + runtime.get_runtime_id(), # bind the session cookie name to runtime id to prevent session collision on same host
|
| 58 |
+
SESSION_COOKIE_SAMESITE="Strict",
|
| 59 |
+
SESSION_PERMANENT=True,
|
| 60 |
+
PERMANENT_SESSION_LIFETIME=timedelta(days=1)
|
| 61 |
+
)
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
lock = threading.Lock()
|
| 65 |
+
|
| 66 |
+
# Set up basic authentication for UI and API but not MCP
|
| 67 |
+
basic_auth = BasicAuth(webapp)
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
def is_loopback_address(address):
|
| 71 |
+
loopback_checker = {
|
| 72 |
+
socket.AF_INET: lambda x: struct.unpack("!I", socket.inet_aton(x))[0]
|
| 73 |
+
>> (32 - 8)
|
| 74 |
+
== 127,
|
| 75 |
+
socket.AF_INET6: lambda x: x == "::1",
|
| 76 |
+
}
|
| 77 |
+
address_type = "hostname"
|
| 78 |
+
try:
|
| 79 |
+
socket.inet_pton(socket.AF_INET6, address)
|
| 80 |
+
address_type = "ipv6"
|
| 81 |
+
except socket.error:
|
| 82 |
+
try:
|
| 83 |
+
socket.inet_pton(socket.AF_INET, address)
|
| 84 |
+
address_type = "ipv4"
|
| 85 |
+
except socket.error:
|
| 86 |
+
address_type = "hostname"
|
| 87 |
+
|
| 88 |
+
if address_type == "ipv4":
|
| 89 |
+
return loopback_checker[socket.AF_INET](address)
|
| 90 |
+
elif address_type == "ipv6":
|
| 91 |
+
return loopback_checker[socket.AF_INET6](address)
|
| 92 |
+
else:
|
| 93 |
+
for family in (socket.AF_INET, socket.AF_INET6):
|
| 94 |
+
try:
|
| 95 |
+
r = socket.getaddrinfo(address, None, family, socket.SOCK_STREAM)
|
| 96 |
+
except socket.gaierror:
|
| 97 |
+
return False
|
| 98 |
+
for family, _, _, _, sockaddr in r:
|
| 99 |
+
if not loopback_checker[family](sockaddr[0]):
|
| 100 |
+
return False
|
| 101 |
+
return True
|
| 102 |
+
|
| 103 |
+
|
| 104 |
+
def requires_api_key(f):
|
| 105 |
+
@wraps(f)
|
| 106 |
+
async def decorated(*args, **kwargs):
|
| 107 |
+
# Use the auth token from settings (same as MCP server)
|
| 108 |
+
from python.helpers.settings import get_settings
|
| 109 |
+
valid_api_key = get_settings()["mcp_server_token"]
|
| 110 |
+
|
| 111 |
+
if api_key := request.headers.get("X-API-KEY"):
|
| 112 |
+
if api_key != valid_api_key:
|
| 113 |
+
return Response("Invalid API key", 401)
|
| 114 |
+
elif request.json and request.json.get("api_key"):
|
| 115 |
+
api_key = request.json.get("api_key")
|
| 116 |
+
if api_key != valid_api_key:
|
| 117 |
+
return Response("Invalid API key", 401)
|
| 118 |
+
else:
|
| 119 |
+
return Response("API key required", 401)
|
| 120 |
+
return await f(*args, **kwargs)
|
| 121 |
+
|
| 122 |
+
return decorated
|
| 123 |
+
|
| 124 |
+
|
| 125 |
+
# allow only loopback addresses
|
| 126 |
+
def requires_loopback(f):
|
| 127 |
+
@wraps(f)
|
| 128 |
+
async def decorated(*args, **kwargs):
|
| 129 |
+
if not is_loopback_address(request.remote_addr):
|
| 130 |
+
return Response(
|
| 131 |
+
"Access denied.",
|
| 132 |
+
403,
|
| 133 |
+
{},
|
| 134 |
+
)
|
| 135 |
+
return await f(*args, **kwargs)
|
| 136 |
+
|
| 137 |
+
return decorated
|
| 138 |
+
|
| 139 |
+
|
| 140 |
+
# require authentication for handlers
|
| 141 |
+
def requires_auth(f):
|
| 142 |
+
@wraps(f)
|
| 143 |
+
async def decorated(*args, **kwargs):
|
| 144 |
+
user = dotenv.get_dotenv_value("AUTH_LOGIN")
|
| 145 |
+
password = dotenv.get_dotenv_value("AUTH_PASSWORD")
|
| 146 |
+
if user and password:
|
| 147 |
+
auth = request.authorization
|
| 148 |
+
if not auth or not (auth.username == user and auth.password == password):
|
| 149 |
+
return Response(
|
| 150 |
+
"Could not verify your access level for that URL.\n"
|
| 151 |
+
"You have to login with proper credentials",
|
| 152 |
+
401,
|
| 153 |
+
{"WWW-Authenticate": 'Basic realm="Login Required"'},
|
| 154 |
+
)
|
| 155 |
+
return await f(*args, **kwargs)
|
| 156 |
+
|
| 157 |
+
return decorated
|
| 158 |
+
|
| 159 |
+
|
| 160 |
+
def csrf_protect(f):
|
| 161 |
+
@wraps(f)
|
| 162 |
+
async def decorated(*args, **kwargs):
|
| 163 |
+
header = request.headers.get("X-CSRF-Token")
|
| 164 |
+
if not header or not verify_csrf_token(header):
|
| 165 |
+
print("Invalid or missing CSRF token!")
|
| 166 |
+
return Response("CSRF token missing or invalid", 403)
|
| 167 |
+
print("CSRF token OK.")
|
| 168 |
+
return await f(*args, **kwargs)
|
| 169 |
+
|
| 170 |
+
return decorated
|
| 171 |
+
|
| 172 |
+
|
| 173 |
+
# handle default address, load index
|
| 174 |
+
@webapp.route("/", methods=["GET"])
|
| 175 |
+
@requires_auth
|
| 176 |
+
async def serve_index():
|
| 177 |
+
PrintStyle().print("Serving index.html")
|
| 178 |
+
gitinfo = None
|
| 179 |
+
try:
|
| 180 |
+
gitinfo = git.get_git_info()
|
| 181 |
+
except Exception as e:
|
| 182 |
+
PrintStyle().error(f"Error getting git info: {e}")
|
| 183 |
+
gitinfo = {
|
| 184 |
+
"version": "unknown",
|
| 185 |
+
"commit_time": "unknown",
|
| 186 |
+
}
|
| 187 |
+
index_content = files.read_file("webui/index.html")
|
| 188 |
+
index_content = files.replace_placeholders_text(
|
| 189 |
+
_content=index_content,
|
| 190 |
+
version_no=gitinfo["version"],
|
| 191 |
+
version_time=gitinfo["commit_time"]
|
| 192 |
+
)
|
| 193 |
+
|
| 194 |
+
# Generate and inject CSRF token and runtime_id into meta tags
|
| 195 |
+
csrf_token = generate_csrf_token()
|
| 196 |
+
runtime_id = runtime.get_runtime_id()
|
| 197 |
+
meta_tags = f'''<meta name="csrf-token" content="{csrf_token}">
|
| 198 |
+
<meta name="runtime-id" content="{runtime_id}">'''
|
| 199 |
+
index_content = index_content.replace("</head>", f"{meta_tags}</head>")
|
| 200 |
+
PrintStyle().print("Finished serving index.html")
|
| 201 |
+
return index_content
|
| 202 |
+
|
| 203 |
+
def run():
|
| 204 |
+
PrintStyle().print("Initializing framework...")
|
| 205 |
+
|
| 206 |
+
# Suppress only request logs but keep the startup messages
|
| 207 |
+
from werkzeug.serving import WSGIRequestHandler
|
| 208 |
+
from werkzeug.serving import make_server
|
| 209 |
+
from werkzeug.middleware.dispatcher import DispatcherMiddleware
|
| 210 |
+
from a2wsgi import ASGIMiddleware
|
| 211 |
+
|
| 212 |
+
PrintStyle().print("Starting server...")
|
| 213 |
+
|
| 214 |
+
class NoRequestLoggingWSGIRequestHandler(WSGIRequestHandler):
|
| 215 |
+
def log_request(self, code="-", size="-"):
|
| 216 |
+
pass # Override to suppress request logging
|
| 217 |
+
|
| 218 |
+
# Get configuration from environment
|
| 219 |
+
port = runtime.get_web_ui_port()
|
| 220 |
+
host = (
|
| 221 |
+
runtime.get_arg("host") or dotenv.get_dotenv_value("WEB_UI_HOST") or "localhost"
|
| 222 |
+
)
|
| 223 |
+
server = None
|
| 224 |
+
|
| 225 |
+
def register_api_handler(app, handler: type[ApiHandler]):
|
| 226 |
+
name = handler.__module__.split(".")[-1]
|
| 227 |
+
instance = handler(app, lock)
|
| 228 |
+
|
| 229 |
+
async def handler_wrap():
|
| 230 |
+
return await instance.handle_request(request=request)
|
| 231 |
+
|
| 232 |
+
if handler.requires_loopback():
|
| 233 |
+
handler_wrap = requires_loopback(handler_wrap)
|
| 234 |
+
if handler.requires_auth():
|
| 235 |
+
handler_wrap = requires_auth(handler_wrap)
|
| 236 |
+
if handler.requires_api_key():
|
| 237 |
+
handler_wrap = requires_api_key(handler_wrap)
|
| 238 |
+
if handler.requires_csrf():
|
| 239 |
+
handler_wrap = csrf_protect(handler_wrap)
|
| 240 |
+
|
| 241 |
+
app.add_url_rule(
|
| 242 |
+
f"/{name}",
|
| 243 |
+
f"/{name}",
|
| 244 |
+
handler_wrap,
|
| 245 |
+
methods=handler.get_methods(),
|
| 246 |
+
)
|
| 247 |
+
|
| 248 |
+
# initialize and register API handlers
|
| 249 |
+
handlers = load_classes_from_folder("python/api", "*.py", ApiHandler)
|
| 250 |
+
for handler in handlers:
|
| 251 |
+
register_api_handler(webapp, handler)
|
| 252 |
+
|
| 253 |
+
# add the webapp, mcp, and a2a to the app
|
| 254 |
+
middleware_routes = {
|
| 255 |
+
"/mcp": ASGIMiddleware(app=mcp_server.DynamicMcpProxy.get_instance()), # type: ignore
|
| 256 |
+
"/a2a": ASGIMiddleware(app=fasta2a_server.DynamicA2AProxy.get_instance()), # type: ignore
|
| 257 |
+
}
|
| 258 |
+
|
| 259 |
+
app = DispatcherMiddleware(webapp, middleware_routes) # type: ignore
|
| 260 |
+
|
| 261 |
+
PrintStyle().debug(f"Starting server at http://{host}:{port} ...")
|
| 262 |
+
|
| 263 |
+
server = make_server(
|
| 264 |
+
host=host,
|
| 265 |
+
port=port,
|
| 266 |
+
app=app,
|
| 267 |
+
request_handler=NoRequestLoggingWSGIRequestHandler,
|
| 268 |
+
threaded=True,
|
| 269 |
+
)
|
| 270 |
+
process.set_server(server)
|
| 271 |
+
server.log_startup()
|
| 272 |
+
|
| 273 |
+
# Start init_a0 in a background thread when server starts
|
| 274 |
+
# threading.Thread(target=init_a0, daemon=True).start()
|
| 275 |
+
init_a0()
|
| 276 |
+
|
| 277 |
+
# run the server
|
| 278 |
+
server.serve_forever()
|
| 279 |
+
|
| 280 |
+
|
| 281 |
+
def init_a0():
|
| 282 |
+
# initialize contexts and MCP
|
| 283 |
+
init_chats = initialize.initialize_chats()
|
| 284 |
+
# only wait for init chats, otherwise they would seem to disappear for a while on restart
|
| 285 |
+
init_chats.result_sync()
|
| 286 |
+
|
| 287 |
+
initialize.initialize_mcp()
|
| 288 |
+
# start job loop
|
| 289 |
+
initialize.initialize_job_loop()
|
| 290 |
+
# preload
|
| 291 |
+
initialize.initialize_preload()
|
| 292 |
+
|
| 293 |
+
|
| 294 |
+
def shutdown_mcp():
|
| 295 |
+
proxy = mcp_server.DynamicMcpProxy.get_instance()
|
| 296 |
+
if proxy:
|
| 297 |
+
asyncio.run(proxy.close())
|
| 298 |
+
|
| 299 |
+
atexit.register(shutdown_mcp)
|
| 300 |
+
|
| 301 |
+
|
| 302 |
+
# run the internal server
|
| 303 |
+
if __name__ == "__main__":
|
| 304 |
+
runtime.initialize()
|
| 305 |
+
dotenv.load_dotenv()
|
| 306 |
+
run()
|
searxng/settings.yml
ADDED
|
File without changes
|
start.sh
ADDED
|
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/bin/bash
|
| 2 |
+
|
| 3 |
+
echo "Starting Skilled-Agent..."
|
| 4 |
+
|
| 5 |
+
# Run secret check
|
| 6 |
+
python check_secrets.py
|
| 7 |
+
|
| 8 |
+
# Start FastAPI app
|
| 9 |
+
# We use port 5000 as configured in the Space metadata
|
| 10 |
+
uvicorn api_app:app --host 0.0.0.0 --port 5000
|
webui/index.html
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
webui/js/api.js
ADDED
|
@@ -0,0 +1,90 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/**
|
| 2 |
+
* Call a JSON-in JSON-out API endpoint
|
| 3 |
+
* Data is automatically serialized
|
| 4 |
+
* @param {string} endpoint - The API endpoint to call
|
| 5 |
+
* @param {any} data - The data to send to the API
|
| 6 |
+
* @returns {Promise<any>} The JSON response from the API
|
| 7 |
+
*/
|
| 8 |
+
export async function callJsonApi(endpoint, data) {
|
| 9 |
+
const response = await fetchApi(endpoint, {
|
| 10 |
+
method: "POST",
|
| 11 |
+
headers: {
|
| 12 |
+
"Content-Type": "application/json",
|
| 13 |
+
},
|
| 14 |
+
credentials: "same-origin",
|
| 15 |
+
body: JSON.stringify(data),
|
| 16 |
+
});
|
| 17 |
+
|
| 18 |
+
if (!response.ok) {
|
| 19 |
+
const error = await response.text();
|
| 20 |
+
throw new Error(error);
|
| 21 |
+
}
|
| 22 |
+
const jsonResponse = await response.json();
|
| 23 |
+
return jsonResponse;
|
| 24 |
+
}
|
| 25 |
+
|
| 26 |
+
/**
|
| 27 |
+
* Fetch wrapper for A0 APIs that ensures token exchange
|
| 28 |
+
* Automatically adds CSRF token to request headers
|
| 29 |
+
* @param {string} url - The URL to fetch
|
| 30 |
+
* @param {Object} [request] - The fetch request options
|
| 31 |
+
* @returns {Promise<Response>} The fetch response
|
| 32 |
+
*/
|
| 33 |
+
export async function fetchApi(url, request) {
|
| 34 |
+
async function _wrap(retry) {
|
| 35 |
+
// get the CSRF token
|
| 36 |
+
const token = await getCsrfToken();
|
| 37 |
+
|
| 38 |
+
// create a new request object if none was provided
|
| 39 |
+
const finalRequest = request || {};
|
| 40 |
+
|
| 41 |
+
// ensure headers object exists
|
| 42 |
+
finalRequest.headers = finalRequest.headers || {};
|
| 43 |
+
|
| 44 |
+
// add the CSRF token to the headers
|
| 45 |
+
finalRequest.headers["X-CSRF-Token"] = token;
|
| 46 |
+
|
| 47 |
+
// perform the fetch with the updated request
|
| 48 |
+
const response = await fetch(url, finalRequest);
|
| 49 |
+
|
| 50 |
+
// check if there was an CSRF error
|
| 51 |
+
if (response.status === 403 && retry) {
|
| 52 |
+
// retry the request with new token
|
| 53 |
+
csrfToken = null;
|
| 54 |
+
return await _wrap(false);
|
| 55 |
+
}
|
| 56 |
+
|
| 57 |
+
// return the response
|
| 58 |
+
return response;
|
| 59 |
+
}
|
| 60 |
+
|
| 61 |
+
// perform the request
|
| 62 |
+
const response = await _wrap(true);
|
| 63 |
+
|
| 64 |
+
// return the response
|
| 65 |
+
return response;
|
| 66 |
+
}
|
| 67 |
+
|
| 68 |
+
// csrf token stored locally
|
| 69 |
+
let csrfToken = null;
|
| 70 |
+
|
| 71 |
+
/**
|
| 72 |
+
* Get the CSRF token for API requests
|
| 73 |
+
* Caches the token after first request
|
| 74 |
+
* @returns {Promise<string>} The CSRF token
|
| 75 |
+
*/
|
| 76 |
+
async function getCsrfToken() {
|
| 77 |
+
if (csrfToken) return csrfToken;
|
| 78 |
+
const tokenElement = document.querySelector('meta[name="csrf-token"]');
|
| 79 |
+
if (tokenElement) {
|
| 80 |
+
csrfToken = tokenElement.content;
|
| 81 |
+
return csrfToken;
|
| 82 |
+
}
|
| 83 |
+
// fallback to fetch, but this should not happen
|
| 84 |
+
const response = await fetch("/csrf_token", {
|
| 85 |
+
credentials: "same-origin",
|
| 86 |
+
}).then((r) => r.json());
|
| 87 |
+
csrfToken = response.token;
|
| 88 |
+
document.cookie = `csrf_token_${response.runtime_id}=${csrfToken}; SameSite=Strict; Path=/`;
|
| 89 |
+
return csrfToken;
|
| 90 |
+
}
|
webui/js/index.js
ADDED
|
@@ -0,0 +1,1275 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import * as msgs from "/js/messages.js";
|
| 2 |
+
import * as api from "/js/api.js";
|
| 3 |
+
import * as css from "/js/css.js";
|
| 4 |
+
import { sleep } from "/js/sleep.js";
|
| 5 |
+
import { store as attachmentsStore } from "/components/chat/attachments/attachmentsStore.js";
|
| 6 |
+
import { store as speechStore } from "/components/chat/speech/speech-store.js";
|
| 7 |
+
import { store as notificationStore } from "/components/notifications/notification-store.js";
|
| 8 |
+
|
| 9 |
+
globalThis.fetchApi = api.fetchApi; // TODO - backward compatibility for non-modular scripts, remove once refactored to alpine
|
| 10 |
+
|
| 11 |
+
const leftPanel = document.getElementById("left-panel");
|
| 12 |
+
const rightPanel = document.getElementById("right-panel");
|
| 13 |
+
const container = document.querySelector(".container");
|
| 14 |
+
const chatInput = document.getElementById("chat-input");
|
| 15 |
+
const chatHistory = document.getElementById("chat-history");
|
| 16 |
+
const sendButton = document.getElementById("send-button");
|
| 17 |
+
const inputSection = document.getElementById("input-section");
|
| 18 |
+
const statusSection = document.getElementById("status-section");
|
| 19 |
+
const chatsSection = document.getElementById("chats-section");
|
| 20 |
+
const tasksSection = document.getElementById("tasks-section");
|
| 21 |
+
const progressBar = document.getElementById("progress-bar");
|
| 22 |
+
const autoScrollSwitch = document.getElementById("auto-scroll-switch");
|
| 23 |
+
const timeDate = document.getElementById("time-date-container");
|
| 24 |
+
|
| 25 |
+
let autoScroll = true;
|
| 26 |
+
let context = "";
|
| 27 |
+
let resetCounter = 0;
|
| 28 |
+
let skipOneSpeech = false;
|
| 29 |
+
let connectionStatus = undefined; // undefined = not checked yet, true = connected, false = disconnected
|
| 30 |
+
|
| 31 |
+
// Initialize the toggle button
|
| 32 |
+
setupSidebarToggle();
|
| 33 |
+
// Initialize tabs
|
| 34 |
+
setupTabs();
|
| 35 |
+
|
| 36 |
+
export function getAutoScroll() {
|
| 37 |
+
return autoScroll;
|
| 38 |
+
}
|
| 39 |
+
|
| 40 |
+
function isMobile() {
|
| 41 |
+
return window.innerWidth <= 768;
|
| 42 |
+
}
|
| 43 |
+
|
| 44 |
+
function toggleSidebar(show) {
|
| 45 |
+
const overlay = document.getElementById("sidebar-overlay");
|
| 46 |
+
if (typeof show === "boolean") {
|
| 47 |
+
leftPanel.classList.toggle("hidden", !show);
|
| 48 |
+
rightPanel.classList.toggle("expanded", !show);
|
| 49 |
+
overlay.classList.toggle("visible", show);
|
| 50 |
+
} else {
|
| 51 |
+
leftPanel.classList.toggle("hidden");
|
| 52 |
+
rightPanel.classList.toggle("expanded");
|
| 53 |
+
overlay.classList.toggle(
|
| 54 |
+
"visible",
|
| 55 |
+
!leftPanel.classList.contains("hidden")
|
| 56 |
+
);
|
| 57 |
+
}
|
| 58 |
+
}
|
| 59 |
+
|
| 60 |
+
function handleResize() {
|
| 61 |
+
const overlay = document.getElementById("sidebar-overlay");
|
| 62 |
+
if (isMobile()) {
|
| 63 |
+
leftPanel.classList.add("hidden");
|
| 64 |
+
rightPanel.classList.add("expanded");
|
| 65 |
+
overlay.classList.remove("visible");
|
| 66 |
+
} else {
|
| 67 |
+
leftPanel.classList.remove("hidden");
|
| 68 |
+
rightPanel.classList.remove("expanded");
|
| 69 |
+
overlay.classList.remove("visible");
|
| 70 |
+
}
|
| 71 |
+
}
|
| 72 |
+
|
| 73 |
+
globalThis.addEventListener("load", handleResize);
|
| 74 |
+
globalThis.addEventListener("resize", handleResize);
|
| 75 |
+
|
| 76 |
+
document.addEventListener("DOMContentLoaded", () => {
|
| 77 |
+
const overlay = document.getElementById("sidebar-overlay");
|
| 78 |
+
overlay.addEventListener("click", () => {
|
| 79 |
+
if (isMobile()) {
|
| 80 |
+
toggleSidebar(false);
|
| 81 |
+
}
|
| 82 |
+
});
|
| 83 |
+
});
|
| 84 |
+
|
| 85 |
+
function setupSidebarToggle() {
|
| 86 |
+
const leftPanel = document.getElementById("left-panel");
|
| 87 |
+
const rightPanel = document.getElementById("right-panel");
|
| 88 |
+
const toggleSidebarButton = document.getElementById("toggle-sidebar");
|
| 89 |
+
if (toggleSidebarButton) {
|
| 90 |
+
toggleSidebarButton.addEventListener("click", toggleSidebar);
|
| 91 |
+
} else {
|
| 92 |
+
console.error("Toggle sidebar button not found");
|
| 93 |
+
setTimeout(setupSidebarToggle, 100);
|
| 94 |
+
}
|
| 95 |
+
}
|
| 96 |
+
document.addEventListener("DOMContentLoaded", setupSidebarToggle);
|
| 97 |
+
|
| 98 |
+
export async function sendMessage() {
|
| 99 |
+
const sendButton = document.getElementById("send-button");
|
| 100 |
+
try {
|
| 101 |
+
sendButton.classList.add("loading");
|
| 102 |
+
sendButton.disabled = true;
|
| 103 |
+
|
| 104 |
+
const message = chatInput.value.trim();
|
| 105 |
+
const attachmentsWithUrls = attachmentsStore.getAttachmentsForSending();
|
| 106 |
+
const hasAttachments = attachmentsWithUrls.length > 0;
|
| 107 |
+
|
| 108 |
+
if (message || hasAttachments) {
|
| 109 |
+
let response;
|
| 110 |
+
const messageId = generateGUID();
|
| 111 |
+
|
| 112 |
+
// Clear input and attachments
|
| 113 |
+
chatInput.value = "";
|
| 114 |
+
attachmentsStore.clearAttachments();
|
| 115 |
+
adjustTextareaHeight();
|
| 116 |
+
|
| 117 |
+
// Include attachments in the user message
|
| 118 |
+
if (hasAttachments) {
|
| 119 |
+
const heading =
|
| 120 |
+
attachmentsWithUrls.length > 0
|
| 121 |
+
? "Uploading attachments..."
|
| 122 |
+
: "User message";
|
| 123 |
+
|
| 124 |
+
// Render user message with attachments
|
| 125 |
+
setMessage(messageId, "user", heading, message, false, {
|
| 126 |
+
// attachments: attachmentsWithUrls, // skip here, let the backend properly log them
|
| 127 |
+
});
|
| 128 |
+
|
| 129 |
+
// sleep one frame to render the message before upload starts - better UX
|
| 130 |
+
sleep(0);
|
| 131 |
+
|
| 132 |
+
const formData = new FormData();
|
| 133 |
+
formData.append("text", message);
|
| 134 |
+
formData.append("context", context);
|
| 135 |
+
formData.append("message_id", messageId);
|
| 136 |
+
|
| 137 |
+
for (let i = 0; i < attachmentsWithUrls.length; i++) {
|
| 138 |
+
formData.append("attachments", attachmentsWithUrls[i].file);
|
| 139 |
+
}
|
| 140 |
+
|
| 141 |
+
response = await api.fetchApi("/message_async", {
|
| 142 |
+
method: "POST",
|
| 143 |
+
body: formData,
|
| 144 |
+
});
|
| 145 |
+
} else {
|
| 146 |
+
// For text-only messages
|
| 147 |
+
const data = {
|
| 148 |
+
text: message,
|
| 149 |
+
context,
|
| 150 |
+
message_id: messageId,
|
| 151 |
+
};
|
| 152 |
+
response = await api.fetchApi("/message_async", {
|
| 153 |
+
method: "POST",
|
| 154 |
+
headers: {
|
| 155 |
+
"Content-Type": "application/json",
|
| 156 |
+
},
|
| 157 |
+
body: JSON.stringify(data),
|
| 158 |
+
});
|
| 159 |
+
}
|
| 160 |
+
|
| 161 |
+
// Handle response
|
| 162 |
+
const jsonResponse = await response.json();
|
| 163 |
+
if (!jsonResponse) {
|
| 164 |
+
toast("No response returned.", "error");
|
| 165 |
+
} else {
|
| 166 |
+
setContext(jsonResponse.context);
|
| 167 |
+
}
|
| 168 |
+
}
|
| 169 |
+
} catch (e) {
|
| 170 |
+
toastFetchError("Error sending message", e); // Will use new notification system
|
| 171 |
+
} finally {
|
| 172 |
+
sendButton.classList.remove("loading");
|
| 173 |
+
sendButton.disabled = false;
|
| 174 |
+
}
|
| 175 |
+
}
|
| 176 |
+
|
| 177 |
+
function toastFetchError(text, error) {
|
| 178 |
+
console.error(text, error);
|
| 179 |
+
// Use new frontend error notification system (async, but we don't need to wait)
|
| 180 |
+
const errorMessage = error?.message || error?.toString() || "Unknown error";
|
| 181 |
+
|
| 182 |
+
if (getConnectionStatus()) {
|
| 183 |
+
// Backend is connected, just show the error
|
| 184 |
+
toastFrontendError(`${text}: ${errorMessage}`).catch((e) =>
|
| 185 |
+
console.error("Failed to show error toast:", e)
|
| 186 |
+
);
|
| 187 |
+
} else {
|
| 188 |
+
// Backend is disconnected, show connection error
|
| 189 |
+
toastFrontendError(
|
| 190 |
+
`${text} (backend appears to be disconnected): ${errorMessage}`,
|
| 191 |
+
"Connection Error"
|
| 192 |
+
).catch((e) => console.error("Failed to show connection error toast:", e));
|
| 193 |
+
}
|
| 194 |
+
}
|
| 195 |
+
globalThis.toastFetchError = toastFetchError;
|
| 196 |
+
|
| 197 |
+
chatInput.addEventListener("keydown", (e) => {
|
| 198 |
+
if (e.key === "Enter" && !e.shiftKey) {
|
| 199 |
+
e.preventDefault();
|
| 200 |
+
sendMessage();
|
| 201 |
+
}
|
| 202 |
+
});
|
| 203 |
+
|
| 204 |
+
sendButton.addEventListener("click", sendMessage);
|
| 205 |
+
|
| 206 |
+
export function updateChatInput(text) {
|
| 207 |
+
console.log("updateChatInput called with:", text);
|
| 208 |
+
|
| 209 |
+
// Append text with proper spacing
|
| 210 |
+
const currentValue = chatInput.value;
|
| 211 |
+
const needsSpace = currentValue.length > 0 && !currentValue.endsWith(" ");
|
| 212 |
+
chatInput.value = currentValue + (needsSpace ? " " : "") + text + " ";
|
| 213 |
+
|
| 214 |
+
// Adjust height and trigger input event
|
| 215 |
+
adjustTextareaHeight();
|
| 216 |
+
chatInput.dispatchEvent(new Event("input"));
|
| 217 |
+
|
| 218 |
+
console.log("Updated chat input value:", chatInput.value);
|
| 219 |
+
}
|
| 220 |
+
|
| 221 |
+
function updateUserTime() {
|
| 222 |
+
const now = new Date();
|
| 223 |
+
const hours = now.getHours();
|
| 224 |
+
const minutes = now.getMinutes();
|
| 225 |
+
const seconds = now.getSeconds();
|
| 226 |
+
const ampm = hours >= 12 ? "pm" : "am";
|
| 227 |
+
const formattedHours = hours % 12 || 12;
|
| 228 |
+
|
| 229 |
+
// Format the time
|
| 230 |
+
const timeString = `${formattedHours}:${minutes
|
| 231 |
+
.toString()
|
| 232 |
+
.padStart(2, "0")}:${seconds.toString().padStart(2, "0")} ${ampm}`;
|
| 233 |
+
|
| 234 |
+
// Format the date
|
| 235 |
+
const options = { year: "numeric", month: "short", day: "numeric" };
|
| 236 |
+
const dateString = now.toLocaleDateString(undefined, options);
|
| 237 |
+
|
| 238 |
+
// Update the HTML
|
| 239 |
+
const userTimeElement = document.getElementById("time-date");
|
| 240 |
+
userTimeElement.innerHTML = `${timeString}<br><span id="user-date">${dateString}</span>`;
|
| 241 |
+
}
|
| 242 |
+
|
| 243 |
+
updateUserTime();
|
| 244 |
+
setInterval(updateUserTime, 1000);
|
| 245 |
+
|
| 246 |
+
function setMessage(id, type, heading, content, temp, kvps = null) {
|
| 247 |
+
const result = msgs.setMessage(id, type, heading, content, temp, kvps);
|
| 248 |
+
if (autoScroll) chatHistory.scrollTop = chatHistory.scrollHeight;
|
| 249 |
+
return result;
|
| 250 |
+
}
|
| 251 |
+
|
| 252 |
+
globalThis.loadKnowledge = async function () {
|
| 253 |
+
const input = document.createElement("input");
|
| 254 |
+
input.type = "file";
|
| 255 |
+
input.accept = ".txt,.pdf,.csv,.html,.json,.md";
|
| 256 |
+
input.multiple = true;
|
| 257 |
+
|
| 258 |
+
input.onchange = async () => {
|
| 259 |
+
try {
|
| 260 |
+
const formData = new FormData();
|
| 261 |
+
for (let file of input.files) {
|
| 262 |
+
formData.append("files[]", file);
|
| 263 |
+
}
|
| 264 |
+
|
| 265 |
+
formData.append("ctxid", getContext());
|
| 266 |
+
|
| 267 |
+
const response = await api.fetchApi("/import_knowledge", {
|
| 268 |
+
method: "POST",
|
| 269 |
+
body: formData,
|
| 270 |
+
});
|
| 271 |
+
|
| 272 |
+
if (!response.ok) {
|
| 273 |
+
toast(await response.text(), "error");
|
| 274 |
+
} else {
|
| 275 |
+
const data = await response.json();
|
| 276 |
+
toast(
|
| 277 |
+
"Knowledge files imported: " + data.filenames.join(", "),
|
| 278 |
+
"success"
|
| 279 |
+
);
|
| 280 |
+
}
|
| 281 |
+
} catch (e) {
|
| 282 |
+
toastFetchError("Error loading knowledge", e);
|
| 283 |
+
}
|
| 284 |
+
};
|
| 285 |
+
|
| 286 |
+
input.click();
|
| 287 |
+
};
|
| 288 |
+
|
| 289 |
+
function adjustTextareaHeight() {
|
| 290 |
+
chatInput.style.height = "auto";
|
| 291 |
+
chatInput.style.height = chatInput.scrollHeight + "px";
|
| 292 |
+
}
|
| 293 |
+
|
| 294 |
+
export const sendJsonData = async function (url, data) {
|
| 295 |
+
return await api.callJsonApi(url, data);
|
| 296 |
+
// const response = await api.fetchApi(url, {
|
| 297 |
+
// method: 'POST',
|
| 298 |
+
// headers: {
|
| 299 |
+
// 'Content-Type': 'application/json'
|
| 300 |
+
// },
|
| 301 |
+
// body: JSON.stringify(data)
|
| 302 |
+
// });
|
| 303 |
+
|
| 304 |
+
// if (!response.ok) {
|
| 305 |
+
// const error = await response.text();
|
| 306 |
+
// throw new Error(error);
|
| 307 |
+
// }
|
| 308 |
+
// const jsonResponse = await response.json();
|
| 309 |
+
// return jsonResponse;
|
| 310 |
+
};
|
| 311 |
+
globalThis.sendJsonData = sendJsonData;
|
| 312 |
+
|
| 313 |
+
function generateGUID() {
|
| 314 |
+
return "xxxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx".replace(/[xy]/g, function (c) {
|
| 315 |
+
var r = (Math.random() * 16) | 0;
|
| 316 |
+
var v = c === "x" ? r : (r & 0x3) | 0x8;
|
| 317 |
+
return v.toString(16);
|
| 318 |
+
});
|
| 319 |
+
}
|
| 320 |
+
|
| 321 |
+
function getConnectionStatus() {
|
| 322 |
+
return connectionStatus;
|
| 323 |
+
}
|
| 324 |
+
|
| 325 |
+
function setConnectionStatus(connected) {
|
| 326 |
+
connectionStatus = connected;
|
| 327 |
+
if (globalThis.Alpine && timeDate) {
|
| 328 |
+
const statusIconEl = timeDate.querySelector(".status-icon");
|
| 329 |
+
if (statusIconEl) {
|
| 330 |
+
const statusIcon = Alpine.$data(statusIconEl);
|
| 331 |
+
if (statusIcon) {
|
| 332 |
+
statusIcon.connected = connected;
|
| 333 |
+
}
|
| 334 |
+
}
|
| 335 |
+
}
|
| 336 |
+
}
|
| 337 |
+
|
| 338 |
+
let lastLogVersion = 0;
|
| 339 |
+
let lastLogGuid = "";
|
| 340 |
+
let lastSpokenNo = 0;
|
| 341 |
+
|
| 342 |
+
async function poll() {
|
| 343 |
+
let updated = false;
|
| 344 |
+
try {
|
| 345 |
+
// Get timezone from navigator
|
| 346 |
+
const timezone = Intl.DateTimeFormat().resolvedOptions().timeZone;
|
| 347 |
+
|
| 348 |
+
const log_from = lastLogVersion;
|
| 349 |
+
const response = await sendJsonData("/poll", {
|
| 350 |
+
log_from: log_from,
|
| 351 |
+
notifications_from: notificationStore.lastNotificationVersion || 0,
|
| 352 |
+
context: context || null,
|
| 353 |
+
timezone: timezone,
|
| 354 |
+
});
|
| 355 |
+
|
| 356 |
+
// Check if the response is valid
|
| 357 |
+
if (!response) {
|
| 358 |
+
console.error("Invalid response from poll endpoint");
|
| 359 |
+
return false;
|
| 360 |
+
}
|
| 361 |
+
|
| 362 |
+
if (!context) setContext(response.context);
|
| 363 |
+
if (response.context != context) return; //skip late polls after context change
|
| 364 |
+
|
| 365 |
+
// if the chat has been reset, restart this poll as it may have been called with incorrect log_from
|
| 366 |
+
if (lastLogGuid != response.log_guid) {
|
| 367 |
+
chatHistory.innerHTML = "";
|
| 368 |
+
lastLogVersion = 0;
|
| 369 |
+
lastLogGuid = response.log_guid;
|
| 370 |
+
await poll();
|
| 371 |
+
return;
|
| 372 |
+
}
|
| 373 |
+
|
| 374 |
+
if (lastLogVersion != response.log_version) {
|
| 375 |
+
updated = true;
|
| 376 |
+
for (const log of response.logs) {
|
| 377 |
+
const messageId = log.id || log.no; // Use log.id if available
|
| 378 |
+
setMessage(
|
| 379 |
+
messageId,
|
| 380 |
+
log.type,
|
| 381 |
+
log.heading,
|
| 382 |
+
log.content,
|
| 383 |
+
log.temp,
|
| 384 |
+
log.kvps
|
| 385 |
+
);
|
| 386 |
+
}
|
| 387 |
+
afterMessagesUpdate(response.logs);
|
| 388 |
+
}
|
| 389 |
+
|
| 390 |
+
lastLogVersion = response.log_version;
|
| 391 |
+
lastLogGuid = response.log_guid;
|
| 392 |
+
|
| 393 |
+
updateProgress(response.log_progress, response.log_progress_active);
|
| 394 |
+
|
| 395 |
+
// Update notifications from response
|
| 396 |
+
notificationStore.updateFromPoll(response);
|
| 397 |
+
|
| 398 |
+
//set ui model vars from backend
|
| 399 |
+
if (globalThis.Alpine && inputSection) {
|
| 400 |
+
const inputAD = Alpine.$data(inputSection);
|
| 401 |
+
if (inputAD) {
|
| 402 |
+
inputAD.paused = response.paused;
|
| 403 |
+
}
|
| 404 |
+
}
|
| 405 |
+
|
| 406 |
+
// Update status icon state
|
| 407 |
+
setConnectionStatus(true);
|
| 408 |
+
|
| 409 |
+
// Update chats list and sort by created_at time (newer first)
|
| 410 |
+
let chatsAD = null;
|
| 411 |
+
let contexts = response.contexts || [];
|
| 412 |
+
if (globalThis.Alpine && chatsSection) {
|
| 413 |
+
chatsAD = Alpine.$data(chatsSection);
|
| 414 |
+
if (chatsAD) {
|
| 415 |
+
chatsAD.contexts = contexts.sort(
|
| 416 |
+
(a, b) => (b.created_at || 0) - (a.created_at || 0)
|
| 417 |
+
);
|
| 418 |
+
}
|
| 419 |
+
}
|
| 420 |
+
|
| 421 |
+
// Update tasks list and sort by creation time (newer first)
|
| 422 |
+
const tasksSection = document.getElementById("tasks-section");
|
| 423 |
+
if (globalThis.Alpine && tasksSection) {
|
| 424 |
+
const tasksAD = Alpine.$data(tasksSection);
|
| 425 |
+
if (tasksAD) {
|
| 426 |
+
let tasks = response.tasks || [];
|
| 427 |
+
|
| 428 |
+
// Always update tasks to ensure state changes are reflected
|
| 429 |
+
if (tasks.length > 0) {
|
| 430 |
+
// Sort the tasks by creation time
|
| 431 |
+
const sortedTasks = [...tasks].sort(
|
| 432 |
+
(a, b) => (b.created_at || 0) - (a.created_at || 0)
|
| 433 |
+
);
|
| 434 |
+
|
| 435 |
+
// Assign the sorted tasks to the Alpine data
|
| 436 |
+
tasksAD.tasks = sortedTasks;
|
| 437 |
+
} else {
|
| 438 |
+
// Make sure to use a new empty array instance
|
| 439 |
+
tasksAD.tasks = [];
|
| 440 |
+
}
|
| 441 |
+
}
|
| 442 |
+
}
|
| 443 |
+
|
| 444 |
+
// Make sure the active context is properly selected in both lists
|
| 445 |
+
if (context) {
|
| 446 |
+
// Update selection in the active tab
|
| 447 |
+
const activeTab = localStorage.getItem("activeTab") || "chats";
|
| 448 |
+
|
| 449 |
+
if (activeTab === "chats" && chatsAD) {
|
| 450 |
+
chatsAD.selected = context;
|
| 451 |
+
localStorage.setItem("lastSelectedChat", context);
|
| 452 |
+
|
| 453 |
+
// Check if this context exists in the chats list
|
| 454 |
+
const contextExists = contexts.some((ctx) => ctx.id === context);
|
| 455 |
+
|
| 456 |
+
// If it doesn't exist in the chats list but we're in chats tab, try to select the first chat
|
| 457 |
+
if (!contextExists && contexts.length > 0) {
|
| 458 |
+
// Check if the current context is empty before creating a new one
|
| 459 |
+
// If there's already a current context and we're just updating UI, don't automatically
|
| 460 |
+
// create a new context by calling setContext
|
| 461 |
+
const firstChatId = contexts[0].id;
|
| 462 |
+
|
| 463 |
+
// Only create a new context if we're not currently in an existing context
|
| 464 |
+
// This helps prevent duplicate contexts when switching tabs
|
| 465 |
+
setContext(firstChatId);
|
| 466 |
+
chatsAD.selected = firstChatId;
|
| 467 |
+
localStorage.setItem("lastSelectedChat", firstChatId);
|
| 468 |
+
}
|
| 469 |
+
} else if (activeTab === "tasks" && tasksSection) {
|
| 470 |
+
const tasksAD = Alpine.$data(tasksSection);
|
| 471 |
+
tasksAD.selected = context;
|
| 472 |
+
localStorage.setItem("lastSelectedTask", context);
|
| 473 |
+
|
| 474 |
+
// Check if this context exists in the tasks list
|
| 475 |
+
const taskExists = response.tasks?.some((task) => task.id === context);
|
| 476 |
+
|
| 477 |
+
// If it doesn't exist in the tasks list but we're in tasks tab, try to select the first task
|
| 478 |
+
if (!taskExists && response.tasks?.length > 0) {
|
| 479 |
+
const firstTaskId = response.tasks[0].id;
|
| 480 |
+
setContext(firstTaskId);
|
| 481 |
+
tasksAD.selected = firstTaskId;
|
| 482 |
+
localStorage.setItem("lastSelectedTask", firstTaskId);
|
| 483 |
+
}
|
| 484 |
+
}
|
| 485 |
+
} else if (
|
| 486 |
+
response.tasks &&
|
| 487 |
+
response.tasks.length > 0 &&
|
| 488 |
+
localStorage.getItem("activeTab") === "tasks"
|
| 489 |
+
) {
|
| 490 |
+
// If we're in tasks tab with no selection but have tasks, select the first one
|
| 491 |
+
const firstTaskId = response.tasks[0].id;
|
| 492 |
+
setContext(firstTaskId);
|
| 493 |
+
if (tasksSection) {
|
| 494 |
+
const tasksAD = Alpine.$data(tasksSection);
|
| 495 |
+
tasksAD.selected = firstTaskId;
|
| 496 |
+
localStorage.setItem("lastSelectedTask", firstTaskId);
|
| 497 |
+
}
|
| 498 |
+
} else if (
|
| 499 |
+
contexts.length > 0 &&
|
| 500 |
+
localStorage.getItem("activeTab") === "chats" &&
|
| 501 |
+
chatsAD
|
| 502 |
+
) {
|
| 503 |
+
// If we're in chats tab with no selection but have chats, select the first one
|
| 504 |
+
const firstChatId = contexts[0].id;
|
| 505 |
+
|
| 506 |
+
// Only set context if we don't already have one to avoid duplicates
|
| 507 |
+
if (!context) {
|
| 508 |
+
setContext(firstChatId);
|
| 509 |
+
chatsAD.selected = firstChatId;
|
| 510 |
+
localStorage.setItem("lastSelectedChat", firstChatId);
|
| 511 |
+
}
|
| 512 |
+
}
|
| 513 |
+
|
| 514 |
+
lastLogVersion = response.log_version;
|
| 515 |
+
lastLogGuid = response.log_guid;
|
| 516 |
+
} catch (error) {
|
| 517 |
+
console.error("Error:", error);
|
| 518 |
+
setConnectionStatus(false);
|
| 519 |
+
}
|
| 520 |
+
|
| 521 |
+
return updated;
|
| 522 |
+
}
|
| 523 |
+
|
| 524 |
+
function afterMessagesUpdate(logs) {
|
| 525 |
+
if (localStorage.getItem("speech") == "true") {
|
| 526 |
+
speakMessages(logs);
|
| 527 |
+
}
|
| 528 |
+
}
|
| 529 |
+
|
| 530 |
+
function speakMessages(logs) {
|
| 531 |
+
if (skipOneSpeech) {
|
| 532 |
+
skipOneSpeech = false;
|
| 533 |
+
return;
|
| 534 |
+
}
|
| 535 |
+
// log.no, log.type, log.heading, log.content
|
| 536 |
+
for (let i = logs.length - 1; i >= 0; i--) {
|
| 537 |
+
const log = logs[i];
|
| 538 |
+
|
| 539 |
+
// if already spoken, end
|
| 540 |
+
// if(log.no < lastSpokenNo) break;
|
| 541 |
+
|
| 542 |
+
// finished response
|
| 543 |
+
if (log.type == "response") {
|
| 544 |
+
// lastSpokenNo = log.no;
|
| 545 |
+
speechStore.speakStream(
|
| 546 |
+
getChatBasedId(log.no),
|
| 547 |
+
log.content,
|
| 548 |
+
log.kvps?.finished
|
| 549 |
+
);
|
| 550 |
+
return;
|
| 551 |
+
|
| 552 |
+
// finished LLM headline, not response
|
| 553 |
+
} else if (
|
| 554 |
+
log.type == "agent" &&
|
| 555 |
+
log.kvps &&
|
| 556 |
+
log.kvps.headline &&
|
| 557 |
+
log.kvps.tool_args &&
|
| 558 |
+
log.kvps.tool_name != "response"
|
| 559 |
+
) {
|
| 560 |
+
// lastSpokenNo = log.no;
|
| 561 |
+
speechStore.speakStream(getChatBasedId(log.no), log.kvps.headline, true);
|
| 562 |
+
return;
|
| 563 |
+
}
|
| 564 |
+
}
|
| 565 |
+
}
|
| 566 |
+
|
| 567 |
+
function updateProgress(progress, active) {
|
| 568 |
+
if (!progress) progress = "";
|
| 569 |
+
|
| 570 |
+
if (!active) {
|
| 571 |
+
removeClassFromElement(progressBar, "shiny-text");
|
| 572 |
+
} else {
|
| 573 |
+
addClassToElement(progressBar, "shiny-text");
|
| 574 |
+
}
|
| 575 |
+
|
| 576 |
+
progress = msgs.convertIcons(progress);
|
| 577 |
+
|
| 578 |
+
if (progressBar.innerHTML != progress) {
|
| 579 |
+
progressBar.innerHTML = progress;
|
| 580 |
+
}
|
| 581 |
+
}
|
| 582 |
+
|
| 583 |
+
globalThis.pauseAgent = async function (paused) {
|
| 584 |
+
try {
|
| 585 |
+
const resp = await sendJsonData("/pause", { paused: paused, context });
|
| 586 |
+
} catch (e) {
|
| 587 |
+
globalThis.toastFetchError("Error pausing agent", e);
|
| 588 |
+
}
|
| 589 |
+
};
|
| 590 |
+
|
| 591 |
+
globalThis.resetChat = async function (ctxid = null) {
|
| 592 |
+
try {
|
| 593 |
+
const resp = await sendJsonData("/chat_reset", {
|
| 594 |
+
context: ctxid === null ? context : ctxid,
|
| 595 |
+
});
|
| 596 |
+
resetCounter++;
|
| 597 |
+
if (ctxid === null) updateAfterScroll();
|
| 598 |
+
} catch (e) {
|
| 599 |
+
globalThis.toastFetchError("Error resetting chat", e);
|
| 600 |
+
}
|
| 601 |
+
};
|
| 602 |
+
|
| 603 |
+
globalThis.newChat = async function () {
|
| 604 |
+
try {
|
| 605 |
+
setContext(generateGUID());
|
| 606 |
+
updateAfterScroll();
|
| 607 |
+
} catch (e) {
|
| 608 |
+
globalThis.toastFetchError("Error creating new chat", e);
|
| 609 |
+
}
|
| 610 |
+
};
|
| 611 |
+
|
| 612 |
+
globalThis.killChat = async function (id) {
|
| 613 |
+
if (!id) {
|
| 614 |
+
console.error("No chat ID provided for deletion");
|
| 615 |
+
return;
|
| 616 |
+
}
|
| 617 |
+
|
| 618 |
+
console.log("Deleting chat with ID:", id);
|
| 619 |
+
|
| 620 |
+
try {
|
| 621 |
+
const chatsAD = Alpine.$data(chatsSection);
|
| 622 |
+
console.log(
|
| 623 |
+
"Current contexts before deletion:",
|
| 624 |
+
JSON.stringify(chatsAD.contexts.map((c) => ({ id: c.id, name: c.name })))
|
| 625 |
+
);
|
| 626 |
+
|
| 627 |
+
// switch to another context if deleting current
|
| 628 |
+
switchFromContext(id);
|
| 629 |
+
|
| 630 |
+
// Delete the chat on the server
|
| 631 |
+
await sendJsonData("/chat_remove", { context: id });
|
| 632 |
+
|
| 633 |
+
// Update the UI manually to ensure the correct chat is removed
|
| 634 |
+
// Deep clone the contexts array to prevent reference issues
|
| 635 |
+
const updatedContexts = chatsAD.contexts.filter((ctx) => ctx.id !== id);
|
| 636 |
+
console.log(
|
| 637 |
+
"Updated contexts after deletion:",
|
| 638 |
+
JSON.stringify(updatedContexts.map((c) => ({ id: c.id, name: c.name })))
|
| 639 |
+
);
|
| 640 |
+
|
| 641 |
+
// Force UI update by creating a new array
|
| 642 |
+
chatsAD.contexts = [...updatedContexts];
|
| 643 |
+
|
| 644 |
+
updateAfterScroll();
|
| 645 |
+
|
| 646 |
+
justToast("Chat deleted successfully", "success", 1000, "chat-removal");
|
| 647 |
+
} catch (e) {
|
| 648 |
+
console.error("Error deleting chat:", e);
|
| 649 |
+
globalThis.toastFetchError("Error deleting chat", e);
|
| 650 |
+
}
|
| 651 |
+
};
|
| 652 |
+
|
| 653 |
+
export function switchFromContext(id) {
|
| 654 |
+
// If we're deleting the currently selected chat, switch to another one first
|
| 655 |
+
if (context === id) {
|
| 656 |
+
const chatsAD = Alpine.$data(chatsSection);
|
| 657 |
+
|
| 658 |
+
// Find an alternate chat to switch to if we're deleting the current one
|
| 659 |
+
let alternateChat = null;
|
| 660 |
+
for (let i = 0; i < chatsAD.contexts.length; i++) {
|
| 661 |
+
if (chatsAD.contexts[i].id !== id) {
|
| 662 |
+
alternateChat = chatsAD.contexts[i];
|
| 663 |
+
break;
|
| 664 |
+
}
|
| 665 |
+
}
|
| 666 |
+
|
| 667 |
+
if (alternateChat) {
|
| 668 |
+
setContext(alternateChat.id);
|
| 669 |
+
} else {
|
| 670 |
+
// If no other chats, create a new empty context
|
| 671 |
+
setContext(generateGUID());
|
| 672 |
+
}
|
| 673 |
+
}
|
| 674 |
+
}
|
| 675 |
+
|
| 676 |
+
// Function to ensure proper UI state when switching contexts
|
| 677 |
+
function ensureProperTabSelection(contextId) {
|
| 678 |
+
// Get current active tab
|
| 679 |
+
const activeTab = localStorage.getItem("activeTab") || "chats";
|
| 680 |
+
|
| 681 |
+
// First attempt to determine if this is a task or chat based on the task list
|
| 682 |
+
const tasksSection = document.getElementById("tasks-section");
|
| 683 |
+
let isTask = false;
|
| 684 |
+
|
| 685 |
+
if (tasksSection) {
|
| 686 |
+
const tasksAD = Alpine.$data(tasksSection);
|
| 687 |
+
if (tasksAD && tasksAD.tasks) {
|
| 688 |
+
isTask = tasksAD.tasks.some((task) => task.id === contextId);
|
| 689 |
+
}
|
| 690 |
+
}
|
| 691 |
+
|
| 692 |
+
// If we're selecting a task but are in the chats tab, switch to tasks tab
|
| 693 |
+
if (isTask && activeTab === "chats") {
|
| 694 |
+
// Store this as the last selected task before switching
|
| 695 |
+
localStorage.setItem("lastSelectedTask", contextId);
|
| 696 |
+
activateTab("tasks");
|
| 697 |
+
return true;
|
| 698 |
+
}
|
| 699 |
+
|
| 700 |
+
// If we're selecting a chat but are in the tasks tab, switch to chats tab
|
| 701 |
+
if (!isTask && activeTab === "tasks") {
|
| 702 |
+
// Store this as the last selected chat before switching
|
| 703 |
+
localStorage.setItem("lastSelectedChat", contextId);
|
| 704 |
+
activateTab("chats");
|
| 705 |
+
return true;
|
| 706 |
+
}
|
| 707 |
+
|
| 708 |
+
return false;
|
| 709 |
+
}
|
| 710 |
+
|
| 711 |
+
globalThis.selectChat = async function (id) {
|
| 712 |
+
if (id === context) return; //already selected
|
| 713 |
+
|
| 714 |
+
// Check if we need to switch tabs based on the context type
|
| 715 |
+
const tabSwitched = ensureProperTabSelection(id);
|
| 716 |
+
|
| 717 |
+
// If we didn't switch tabs, proceed with normal selection
|
| 718 |
+
if (!tabSwitched) {
|
| 719 |
+
// Switch to the new context - this will clear chat history and reset tracking variables
|
| 720 |
+
setContext(id);
|
| 721 |
+
|
| 722 |
+
// Update both contexts and tasks lists to reflect the selected item
|
| 723 |
+
const chatsAD = Alpine.$data(chatsSection);
|
| 724 |
+
const tasksSection = document.getElementById("tasks-section");
|
| 725 |
+
if (tasksSection) {
|
| 726 |
+
const tasksAD = Alpine.$data(tasksSection);
|
| 727 |
+
tasksAD.selected = id;
|
| 728 |
+
}
|
| 729 |
+
chatsAD.selected = id;
|
| 730 |
+
|
| 731 |
+
// Store this selection in the appropriate localStorage key
|
| 732 |
+
const activeTab = localStorage.getItem("activeTab") || "chats";
|
| 733 |
+
if (activeTab === "chats") {
|
| 734 |
+
localStorage.setItem("lastSelectedChat", id);
|
| 735 |
+
} else if (activeTab === "tasks") {
|
| 736 |
+
localStorage.setItem("lastSelectedTask", id);
|
| 737 |
+
}
|
| 738 |
+
|
| 739 |
+
// Trigger an immediate poll to fetch content
|
| 740 |
+
poll();
|
| 741 |
+
}
|
| 742 |
+
|
| 743 |
+
updateAfterScroll();
|
| 744 |
+
};
|
| 745 |
+
|
| 746 |
+
export const setContext = function (id) {
|
| 747 |
+
if (id == context) return;
|
| 748 |
+
context = id;
|
| 749 |
+
// Always reset the log tracking variables when switching contexts
|
| 750 |
+
// This ensures we get fresh data from the backend
|
| 751 |
+
lastLogGuid = "";
|
| 752 |
+
lastLogVersion = 0;
|
| 753 |
+
lastSpokenNo = 0;
|
| 754 |
+
|
| 755 |
+
// Stop speech when switching chats
|
| 756 |
+
speechStore.stopAudio();
|
| 757 |
+
|
| 758 |
+
// Clear the chat history immediately to avoid showing stale content
|
| 759 |
+
chatHistory.innerHTML = "";
|
| 760 |
+
|
| 761 |
+
// Update both selected states
|
| 762 |
+
if (globalThis.Alpine) {
|
| 763 |
+
if (chatsSection) {
|
| 764 |
+
const chatsAD = Alpine.$data(chatsSection);
|
| 765 |
+
if (chatsAD) chatsAD.selected = id;
|
| 766 |
+
}
|
| 767 |
+
if (tasksSection) {
|
| 768 |
+
const tasksAD = Alpine.$data(tasksSection);
|
| 769 |
+
if (tasksAD) tasksAD.selected = id;
|
| 770 |
+
}
|
| 771 |
+
}
|
| 772 |
+
|
| 773 |
+
//skip one speech if enabled when switching context
|
| 774 |
+
if (localStorage.getItem("speech") == "true") skipOneSpeech = true;
|
| 775 |
+
};
|
| 776 |
+
|
| 777 |
+
export const getContext = function () {
|
| 778 |
+
return context;
|
| 779 |
+
};
|
| 780 |
+
|
| 781 |
+
export const getChatBasedId = function (id) {
|
| 782 |
+
return context + "-" + resetCounter + "-" + id;
|
| 783 |
+
};
|
| 784 |
+
|
| 785 |
+
globalThis.toggleAutoScroll = async function (_autoScroll) {
|
| 786 |
+
autoScroll = _autoScroll;
|
| 787 |
+
};
|
| 788 |
+
|
| 789 |
+
globalThis.toggleJson = async function (showJson) {
|
| 790 |
+
css.toggleCssProperty(".msg-json", "display", showJson ? "block" : "none");
|
| 791 |
+
};
|
| 792 |
+
|
| 793 |
+
globalThis.toggleThoughts = async function (showThoughts) {
|
| 794 |
+
css.toggleCssProperty(
|
| 795 |
+
".msg-thoughts",
|
| 796 |
+
"display",
|
| 797 |
+
showThoughts ? undefined : "none"
|
| 798 |
+
);
|
| 799 |
+
};
|
| 800 |
+
|
| 801 |
+
globalThis.toggleUtils = async function (showUtils) {
|
| 802 |
+
css.toggleCssProperty(
|
| 803 |
+
".message-util",
|
| 804 |
+
"display",
|
| 805 |
+
showUtils ? undefined : "none"
|
| 806 |
+
);
|
| 807 |
+
};
|
| 808 |
+
|
| 809 |
+
globalThis.toggleDarkMode = function (isDark) {
|
| 810 |
+
if (isDark) {
|
| 811 |
+
document.body.classList.remove("light-mode");
|
| 812 |
+
document.body.classList.add("dark-mode");
|
| 813 |
+
} else {
|
| 814 |
+
document.body.classList.remove("dark-mode");
|
| 815 |
+
document.body.classList.add("light-mode");
|
| 816 |
+
}
|
| 817 |
+
console.log("Dark mode:", isDark);
|
| 818 |
+
localStorage.setItem("darkMode", isDark);
|
| 819 |
+
};
|
| 820 |
+
|
| 821 |
+
globalThis.toggleSpeech = function (isOn) {
|
| 822 |
+
console.log("Speech:", isOn);
|
| 823 |
+
localStorage.setItem("speech", isOn);
|
| 824 |
+
if (!isOn) speechStore.stopAudio();
|
| 825 |
+
};
|
| 826 |
+
|
| 827 |
+
globalThis.nudge = async function () {
|
| 828 |
+
try {
|
| 829 |
+
const resp = await sendJsonData("/nudge", { ctxid: getContext() });
|
| 830 |
+
} catch (e) {
|
| 831 |
+
toastFetchError("Error nudging agent", e);
|
| 832 |
+
}
|
| 833 |
+
};
|
| 834 |
+
|
| 835 |
+
globalThis.restart = async function () {
|
| 836 |
+
try {
|
| 837 |
+
if (!getConnectionStatus()) {
|
| 838 |
+
await toastFrontendError(
|
| 839 |
+
"Backend disconnected, cannot restart.",
|
| 840 |
+
"Restart Error"
|
| 841 |
+
);
|
| 842 |
+
return;
|
| 843 |
+
}
|
| 844 |
+
// First try to initiate restart
|
| 845 |
+
const resp = await sendJsonData("/restart", {});
|
| 846 |
+
} catch (e) {
|
| 847 |
+
// Show restarting message with no timeout and restart group
|
| 848 |
+
await toastFrontendInfo("Restarting...", "System Restart", 9999, "restart");
|
| 849 |
+
|
| 850 |
+
let retries = 0;
|
| 851 |
+
const maxRetries = 240; // Maximum number of retries (60 seconds with 250ms interval)
|
| 852 |
+
|
| 853 |
+
while (retries < maxRetries) {
|
| 854 |
+
try {
|
| 855 |
+
const resp = await sendJsonData("/health", {});
|
| 856 |
+
// Server is back up, show success message that replaces the restarting message
|
| 857 |
+
await new Promise((resolve) => setTimeout(resolve, 250));
|
| 858 |
+
await toastFrontendSuccess("Restarted", "System Restart", 5, "restart");
|
| 859 |
+
return;
|
| 860 |
+
} catch (e) {
|
| 861 |
+
// Server still down, keep waiting
|
| 862 |
+
retries++;
|
| 863 |
+
await new Promise((resolve) => setTimeout(resolve, 250));
|
| 864 |
+
}
|
| 865 |
+
}
|
| 866 |
+
|
| 867 |
+
// If we get here, restart failed or took too long
|
| 868 |
+
await toastFrontendError(
|
| 869 |
+
"Restart timed out or failed",
|
| 870 |
+
"Restart Error",
|
| 871 |
+
8,
|
| 872 |
+
"restart"
|
| 873 |
+
);
|
| 874 |
+
}
|
| 875 |
+
};
|
| 876 |
+
|
| 877 |
+
// Modify this part
|
| 878 |
+
document.addEventListener("DOMContentLoaded", () => {
|
| 879 |
+
const isDarkMode = localStorage.getItem("darkMode") !== "false";
|
| 880 |
+
toggleDarkMode(isDarkMode);
|
| 881 |
+
});
|
| 882 |
+
|
| 883 |
+
globalThis.loadChats = async function () {
|
| 884 |
+
try {
|
| 885 |
+
const fileContents = await readJsonFiles();
|
| 886 |
+
const response = await sendJsonData("/chat_load", { chats: fileContents });
|
| 887 |
+
|
| 888 |
+
if (!response) {
|
| 889 |
+
toast("No response returned.", "error");
|
| 890 |
+
}
|
| 891 |
+
// else if (!response.ok) {
|
| 892 |
+
// if (response.message) {
|
| 893 |
+
// toast(response.message, "error")
|
| 894 |
+
// } else {
|
| 895 |
+
// toast("Undefined error.", "error")
|
| 896 |
+
// }
|
| 897 |
+
// }
|
| 898 |
+
else {
|
| 899 |
+
setContext(response.ctxids[0]);
|
| 900 |
+
toast("Chats loaded.", "success");
|
| 901 |
+
}
|
| 902 |
+
} catch (e) {
|
| 903 |
+
toastFetchError("Error loading chats", e);
|
| 904 |
+
}
|
| 905 |
+
};
|
| 906 |
+
|
| 907 |
+
globalThis.saveChat = async function () {
|
| 908 |
+
try {
|
| 909 |
+
const response = await sendJsonData("/chat_export", { ctxid: context });
|
| 910 |
+
|
| 911 |
+
if (!response) {
|
| 912 |
+
toast("No response returned.", "error");
|
| 913 |
+
}
|
| 914 |
+
// else if (!response.ok) {
|
| 915 |
+
// if (response.message) {
|
| 916 |
+
// toast(response.message, "error")
|
| 917 |
+
// } else {
|
| 918 |
+
// toast("Undefined error.", "error")
|
| 919 |
+
// }
|
| 920 |
+
// }
|
| 921 |
+
else {
|
| 922 |
+
downloadFile(response.ctxid + ".json", response.content);
|
| 923 |
+
toast("Chat file downloaded.", "success");
|
| 924 |
+
}
|
| 925 |
+
} catch (e) {
|
| 926 |
+
toastFetchError("Error saving chat", e);
|
| 927 |
+
}
|
| 928 |
+
};
|
| 929 |
+
|
| 930 |
+
function downloadFile(filename, content) {
|
| 931 |
+
// Create a Blob with the content to save
|
| 932 |
+
const blob = new Blob([content], { type: "application/json" });
|
| 933 |
+
|
| 934 |
+
// Create a link element
|
| 935 |
+
const link = document.createElement("a");
|
| 936 |
+
|
| 937 |
+
// Create a URL for the Blob
|
| 938 |
+
const url = URL.createObjectURL(blob);
|
| 939 |
+
link.href = url;
|
| 940 |
+
|
| 941 |
+
// Set the file name for download
|
| 942 |
+
link.download = filename;
|
| 943 |
+
|
| 944 |
+
// Programmatically click the link to trigger the download
|
| 945 |
+
link.click();
|
| 946 |
+
|
| 947 |
+
// Clean up by revoking the object URL
|
| 948 |
+
setTimeout(() => {
|
| 949 |
+
URL.revokeObjectURL(url);
|
| 950 |
+
}, 0);
|
| 951 |
+
}
|
| 952 |
+
|
| 953 |
+
function readJsonFiles() {
|
| 954 |
+
return new Promise((resolve, reject) => {
|
| 955 |
+
// Create an input element of type 'file'
|
| 956 |
+
const input = document.createElement("input");
|
| 957 |
+
input.type = "file";
|
| 958 |
+
input.accept = ".json"; // Only accept JSON files
|
| 959 |
+
input.multiple = true; // Allow multiple file selection
|
| 960 |
+
|
| 961 |
+
// Trigger the file dialog
|
| 962 |
+
input.click();
|
| 963 |
+
|
| 964 |
+
// When files are selected
|
| 965 |
+
input.onchange = async () => {
|
| 966 |
+
const files = input.files;
|
| 967 |
+
if (!files.length) {
|
| 968 |
+
resolve([]); // Return an empty array if no files are selected
|
| 969 |
+
return;
|
| 970 |
+
}
|
| 971 |
+
|
| 972 |
+
// Read each file as a string and store in an array
|
| 973 |
+
const filePromises = Array.from(files).map((file) => {
|
| 974 |
+
return new Promise((fileResolve, fileReject) => {
|
| 975 |
+
const reader = new FileReader();
|
| 976 |
+
reader.onload = () => fileResolve(reader.result);
|
| 977 |
+
reader.onerror = fileReject;
|
| 978 |
+
reader.readAsText(file);
|
| 979 |
+
});
|
| 980 |
+
});
|
| 981 |
+
|
| 982 |
+
try {
|
| 983 |
+
const fileContents = await Promise.all(filePromises);
|
| 984 |
+
resolve(fileContents);
|
| 985 |
+
} catch (error) {
|
| 986 |
+
reject(error); // In case of any file reading error
|
| 987 |
+
}
|
| 988 |
+
};
|
| 989 |
+
});
|
| 990 |
+
}
|
| 991 |
+
|
| 992 |
+
function addClassToElement(element, className) {
|
| 993 |
+
element.classList.add(className);
|
| 994 |
+
}
|
| 995 |
+
|
| 996 |
+
function removeClassFromElement(element, className) {
|
| 997 |
+
element.classList.remove(className);
|
| 998 |
+
}
|
| 999 |
+
|
| 1000 |
+
function justToast(text, type = "info", timeout = 5000, group = "") {
|
| 1001 |
+
notificationStore.addFrontendToastOnly(
|
| 1002 |
+
type,
|
| 1003 |
+
text,
|
| 1004 |
+
"",
|
| 1005 |
+
timeout / 1000,
|
| 1006 |
+
group
|
| 1007 |
+
)
|
| 1008 |
+
}
|
| 1009 |
+
|
| 1010 |
+
|
| 1011 |
+
function toast(text, type = "info", timeout = 5000) {
|
| 1012 |
+
// Convert timeout from milliseconds to seconds for new notification system
|
| 1013 |
+
const display_time = Math.max(timeout / 1000, 1); // Minimum 1 second
|
| 1014 |
+
|
| 1015 |
+
// Use new frontend notification system based on type
|
| 1016 |
+
switch (type.toLowerCase()) {
|
| 1017 |
+
case "error":
|
| 1018 |
+
return notificationStore.frontendError(text, "Error", display_time);
|
| 1019 |
+
case "success":
|
| 1020 |
+
return notificationStore.frontendInfo(text, "Success", display_time);
|
| 1021 |
+
case "warning":
|
| 1022 |
+
return notificationStore.frontendWarning(text, "Warning", display_time);
|
| 1023 |
+
case "info":
|
| 1024 |
+
default:
|
| 1025 |
+
return notificationStore.frontendInfo(text, "Info", display_time);
|
| 1026 |
+
}
|
| 1027 |
+
|
| 1028 |
+
}
|
| 1029 |
+
globalThis.toast = toast;
|
| 1030 |
+
|
| 1031 |
+
// OLD: hideToast function removed - now using new notification system
|
| 1032 |
+
|
| 1033 |
+
function scrollChanged(isAtBottom) {
|
| 1034 |
+
if (globalThis.Alpine && autoScrollSwitch) {
|
| 1035 |
+
const inputAS = Alpine.$data(autoScrollSwitch);
|
| 1036 |
+
if (inputAS) {
|
| 1037 |
+
inputAS.autoScroll = isAtBottom;
|
| 1038 |
+
}
|
| 1039 |
+
}
|
| 1040 |
+
// autoScrollSwitch.checked = isAtBottom
|
| 1041 |
+
}
|
| 1042 |
+
|
| 1043 |
+
function updateAfterScroll() {
|
| 1044 |
+
// const toleranceEm = 1; // Tolerance in em units
|
| 1045 |
+
// const tolerancePx = toleranceEm * parseFloat(getComputedStyle(document.documentElement).fontSize); // Convert em to pixels
|
| 1046 |
+
const tolerancePx = 10;
|
| 1047 |
+
const chatHistory = document.getElementById("chat-history");
|
| 1048 |
+
const isAtBottom =
|
| 1049 |
+
chatHistory.scrollHeight - chatHistory.scrollTop <=
|
| 1050 |
+
chatHistory.clientHeight + tolerancePx;
|
| 1051 |
+
|
| 1052 |
+
scrollChanged(isAtBottom);
|
| 1053 |
+
}
|
| 1054 |
+
|
| 1055 |
+
chatHistory.addEventListener("scroll", updateAfterScroll);
|
| 1056 |
+
|
| 1057 |
+
chatInput.addEventListener("input", adjustTextareaHeight);
|
| 1058 |
+
|
| 1059 |
+
// setInterval(poll, 250);
|
| 1060 |
+
|
| 1061 |
+
async function startPolling() {
|
| 1062 |
+
const shortInterval = 25;
|
| 1063 |
+
const longInterval = 250;
|
| 1064 |
+
const shortIntervalPeriod = 100;
|
| 1065 |
+
let shortIntervalCount = 0;
|
| 1066 |
+
|
| 1067 |
+
async function _doPoll() {
|
| 1068 |
+
let nextInterval = longInterval;
|
| 1069 |
+
|
| 1070 |
+
try {
|
| 1071 |
+
const result = await poll();
|
| 1072 |
+
if (result) shortIntervalCount = shortIntervalPeriod; // Reset the counter when the result is true
|
| 1073 |
+
if (shortIntervalCount > 0) shortIntervalCount--; // Decrease the counter on each call
|
| 1074 |
+
nextInterval = shortIntervalCount > 0 ? shortInterval : longInterval;
|
| 1075 |
+
} catch (error) {
|
| 1076 |
+
console.error("Error:", error);
|
| 1077 |
+
}
|
| 1078 |
+
|
| 1079 |
+
// Call the function again after the selected interval
|
| 1080 |
+
setTimeout(_doPoll.bind(this), nextInterval);
|
| 1081 |
+
}
|
| 1082 |
+
|
| 1083 |
+
_doPoll();
|
| 1084 |
+
}
|
| 1085 |
+
|
| 1086 |
+
document.addEventListener("DOMContentLoaded", startPolling);
|
| 1087 |
+
|
| 1088 |
+
// Setup event handlers once the DOM is fully loaded
|
| 1089 |
+
document.addEventListener("DOMContentLoaded", function () {
|
| 1090 |
+
setupSidebarToggle();
|
| 1091 |
+
setupTabs();
|
| 1092 |
+
initializeActiveTab();
|
| 1093 |
+
});
|
| 1094 |
+
|
| 1095 |
+
// Setup tabs functionality
|
| 1096 |
+
function setupTabs() {
|
| 1097 |
+
const chatsTab = document.getElementById("chats-tab");
|
| 1098 |
+
const tasksTab = document.getElementById("tasks-tab");
|
| 1099 |
+
|
| 1100 |
+
if (chatsTab && tasksTab) {
|
| 1101 |
+
chatsTab.addEventListener("click", function () {
|
| 1102 |
+
activateTab("chats");
|
| 1103 |
+
});
|
| 1104 |
+
|
| 1105 |
+
tasksTab.addEventListener("click", function () {
|
| 1106 |
+
activateTab("tasks");
|
| 1107 |
+
});
|
| 1108 |
+
} else {
|
| 1109 |
+
console.error("Tab elements not found");
|
| 1110 |
+
setTimeout(setupTabs, 100); // Retry setup
|
| 1111 |
+
}
|
| 1112 |
+
}
|
| 1113 |
+
|
| 1114 |
+
function activateTab(tabName) {
|
| 1115 |
+
const chatsTab = document.getElementById("chats-tab");
|
| 1116 |
+
const tasksTab = document.getElementById("tasks-tab");
|
| 1117 |
+
const chatsSection = document.getElementById("chats-section");
|
| 1118 |
+
const tasksSection = document.getElementById("tasks-section");
|
| 1119 |
+
|
| 1120 |
+
// Get current context to preserve before switching
|
| 1121 |
+
const currentContext = context;
|
| 1122 |
+
|
| 1123 |
+
// Store the current selection for the active tab before switching
|
| 1124 |
+
const previousTab = localStorage.getItem("activeTab");
|
| 1125 |
+
if (previousTab === "chats") {
|
| 1126 |
+
localStorage.setItem("lastSelectedChat", currentContext);
|
| 1127 |
+
} else if (previousTab === "tasks") {
|
| 1128 |
+
localStorage.setItem("lastSelectedTask", currentContext);
|
| 1129 |
+
}
|
| 1130 |
+
|
| 1131 |
+
// Reset all tabs and sections
|
| 1132 |
+
chatsTab.classList.remove("active");
|
| 1133 |
+
tasksTab.classList.remove("active");
|
| 1134 |
+
chatsSection.style.display = "none";
|
| 1135 |
+
tasksSection.style.display = "none";
|
| 1136 |
+
|
| 1137 |
+
// Remember the last active tab in localStorage
|
| 1138 |
+
localStorage.setItem("activeTab", tabName);
|
| 1139 |
+
|
| 1140 |
+
// Activate selected tab and section
|
| 1141 |
+
if (tabName === "chats") {
|
| 1142 |
+
chatsTab.classList.add("active");
|
| 1143 |
+
chatsSection.style.display = "";
|
| 1144 |
+
|
| 1145 |
+
// Get the available contexts from Alpine.js data
|
| 1146 |
+
const chatsAD = globalThis.Alpine ? Alpine.$data(chatsSection) : null;
|
| 1147 |
+
const availableContexts = chatsAD?.contexts || [];
|
| 1148 |
+
|
| 1149 |
+
// Restore previous chat selection
|
| 1150 |
+
const lastSelectedChat = localStorage.getItem("lastSelectedChat");
|
| 1151 |
+
|
| 1152 |
+
// Only switch if:
|
| 1153 |
+
// 1. lastSelectedChat exists AND
|
| 1154 |
+
// 2. It's different from current context AND
|
| 1155 |
+
// 3. The context actually exists in our contexts list OR there are no contexts yet
|
| 1156 |
+
if (
|
| 1157 |
+
lastSelectedChat &&
|
| 1158 |
+
lastSelectedChat !== currentContext &&
|
| 1159 |
+
(availableContexts.some((ctx) => ctx.id === lastSelectedChat) ||
|
| 1160 |
+
availableContexts.length === 0)
|
| 1161 |
+
) {
|
| 1162 |
+
setContext(lastSelectedChat);
|
| 1163 |
+
}
|
| 1164 |
+
} else if (tabName === "tasks") {
|
| 1165 |
+
tasksTab.classList.add("active");
|
| 1166 |
+
tasksSection.style.display = "flex";
|
| 1167 |
+
tasksSection.style.flexDirection = "column";
|
| 1168 |
+
|
| 1169 |
+
// Get the available tasks from Alpine.js data
|
| 1170 |
+
const tasksAD = globalThis.Alpine ? Alpine.$data(tasksSection) : null;
|
| 1171 |
+
const availableTasks = tasksAD?.tasks || [];
|
| 1172 |
+
|
| 1173 |
+
// Restore previous task selection
|
| 1174 |
+
const lastSelectedTask = localStorage.getItem("lastSelectedTask");
|
| 1175 |
+
|
| 1176 |
+
// Only switch if:
|
| 1177 |
+
// 1. lastSelectedTask exists AND
|
| 1178 |
+
// 2. It's different from current context AND
|
| 1179 |
+
// 3. The task actually exists in our tasks list
|
| 1180 |
+
if (
|
| 1181 |
+
lastSelectedTask &&
|
| 1182 |
+
lastSelectedTask !== currentContext &&
|
| 1183 |
+
availableTasks.some((task) => task.id === lastSelectedTask)
|
| 1184 |
+
) {
|
| 1185 |
+
setContext(lastSelectedTask);
|
| 1186 |
+
}
|
| 1187 |
+
}
|
| 1188 |
+
|
| 1189 |
+
// Request a poll update
|
| 1190 |
+
poll();
|
| 1191 |
+
}
|
| 1192 |
+
|
| 1193 |
+
// Add function to initialize active tab and selections from localStorage
|
| 1194 |
+
function initializeActiveTab() {
|
| 1195 |
+
// Initialize selection storage if not present
|
| 1196 |
+
if (!localStorage.getItem("lastSelectedChat")) {
|
| 1197 |
+
localStorage.setItem("lastSelectedChat", "");
|
| 1198 |
+
}
|
| 1199 |
+
if (!localStorage.getItem("lastSelectedTask")) {
|
| 1200 |
+
localStorage.setItem("lastSelectedTask", "");
|
| 1201 |
+
}
|
| 1202 |
+
|
| 1203 |
+
const activeTab = localStorage.getItem("activeTab") || "chats";
|
| 1204 |
+
activateTab(activeTab);
|
| 1205 |
+
}
|
| 1206 |
+
|
| 1207 |
+
/*
|
| 1208 |
+
* A0 Chat UI
|
| 1209 |
+
*
|
| 1210 |
+
* Tasks tab functionality:
|
| 1211 |
+
* - Tasks are displayed in the Tasks tab with the same mechanics as chats
|
| 1212 |
+
* - Both lists are sorted by creation time (newest first)
|
| 1213 |
+
* - Selection state is preserved across tab switches
|
| 1214 |
+
* - The active tab is remembered across sessions
|
| 1215 |
+
* - Tasks use the same context system as chats for communication with the backend
|
| 1216 |
+
* - Future support for renaming and deletion will be implemented later
|
| 1217 |
+
*/
|
| 1218 |
+
|
| 1219 |
+
// Open the scheduler detail view for a specific task
|
| 1220 |
+
function openTaskDetail(taskId) {
|
| 1221 |
+
// Wait for Alpine.js to be fully loaded
|
| 1222 |
+
if (globalThis.Alpine) {
|
| 1223 |
+
// Get the settings modal button and click it to ensure all init logic happens
|
| 1224 |
+
const settingsButton = document.getElementById("settings");
|
| 1225 |
+
if (settingsButton) {
|
| 1226 |
+
// Programmatically click the settings button
|
| 1227 |
+
settingsButton.click();
|
| 1228 |
+
|
| 1229 |
+
// Now get a reference to the modal element
|
| 1230 |
+
const modalEl = document.getElementById("settingsModal");
|
| 1231 |
+
if (!modalEl) {
|
| 1232 |
+
console.error("Settings modal element not found after clicking button");
|
| 1233 |
+
return;
|
| 1234 |
+
}
|
| 1235 |
+
|
| 1236 |
+
// Get the Alpine.js data for the modal
|
| 1237 |
+
const modalData = globalThis.Alpine ? Alpine.$data(modalEl) : null;
|
| 1238 |
+
|
| 1239 |
+
// Use a timeout to ensure the modal is fully rendered
|
| 1240 |
+
setTimeout(() => {
|
| 1241 |
+
// Switch to the scheduler tab first
|
| 1242 |
+
modalData.switchTab("scheduler");
|
| 1243 |
+
|
| 1244 |
+
// Use another timeout to ensure the scheduler component is initialized
|
| 1245 |
+
setTimeout(() => {
|
| 1246 |
+
// Get the scheduler component
|
| 1247 |
+
const schedulerComponent = document.querySelector(
|
| 1248 |
+
'[x-data="schedulerSettings"]'
|
| 1249 |
+
);
|
| 1250 |
+
if (!schedulerComponent) {
|
| 1251 |
+
console.error("Scheduler component not found");
|
| 1252 |
+
return;
|
| 1253 |
+
}
|
| 1254 |
+
|
| 1255 |
+
// Get the Alpine.js data for the scheduler component
|
| 1256 |
+
const schedulerData = globalThis.Alpine
|
| 1257 |
+
? Alpine.$data(schedulerComponent)
|
| 1258 |
+
: null;
|
| 1259 |
+
|
| 1260 |
+
// Show the task detail view for the specific task
|
| 1261 |
+
schedulerData.showTaskDetail(taskId);
|
| 1262 |
+
|
| 1263 |
+
console.log("Task detail view opened for task:", taskId);
|
| 1264 |
+
}, 50); // Give time for the scheduler tab to initialize
|
| 1265 |
+
}, 25); // Give time for the modal to render
|
| 1266 |
+
} else {
|
| 1267 |
+
console.error("Settings button not found");
|
| 1268 |
+
}
|
| 1269 |
+
} else {
|
| 1270 |
+
console.error("Alpine.js not loaded");
|
| 1271 |
+
}
|
| 1272 |
+
}
|
| 1273 |
+
|
| 1274 |
+
// Make the function available globally
|
| 1275 |
+
globalThis.openTaskDetail = openTaskDetail;
|