from __future__ import annotations # Project by Nymbo import json import os import sys import threading import time from datetime import datetime, timedelta from typing import Any import gradio as gr class RateLimiter: """Best-effort in-process rate limiter for HTTP-heavy tools.""" def __init__(self, requests_per_minute: int = 30) -> None: self.requests_per_minute = requests_per_minute self._requests: list[datetime] = [] self._lock = threading.Lock() def acquire(self) -> None: now = datetime.now() with self._lock: self._requests = [req for req in self._requests if now - req < timedelta(minutes=1)] if len(self._requests) >= self.requests_per_minute: wait_time = 60 - (now - self._requests[0]).total_seconds() if wait_time > 0: time.sleep(max(1, wait_time)) self._requests.append(now) _search_rate_limiter = RateLimiter(requests_per_minute=20) _fetch_rate_limiter = RateLimiter(requests_per_minute=25) def _truncate_for_log(value: Any, limit: int = 500) -> str: if not isinstance(value, str): value = str(value) if len(value) <= limit: return value return value[: limit - 1] + "…" def _serialize_input(val: Any) -> Any: try: if isinstance(val, (str, int, float, bool)) or val is None: return val if isinstance(val, (list, tuple)): return [_serialize_input(v) for v in list(val)[:10]] + (["…"] if len(val) > 10 else []) if isinstance(val, dict): out: dict[str, Any] = {} for i, (k, v) in enumerate(val.items()): if i >= 12: out["…"] = "…" break out[str(k)] = _serialize_input(v) return out return repr(val)[:120] except Exception: return "" def _log_call_start(func_name: str, **kwargs: Any) -> None: try: compact = {k: _serialize_input(v) for k, v in kwargs.items()} # Use sys.__stdout__ to avoid capturing logs in redirected output print(f"[TOOL CALL] {func_name} inputs: {json.dumps(compact, ensure_ascii=False)[:800]}", flush=True, file=sys.__stdout__) except Exception as exc: print(f"[TOOL CALL] {func_name} (failed to log inputs: {exc})", flush=True, file=sys.__stdout__) def _log_call_end(func_name: str, output_desc: str) -> None: try: # Use sys.__stdout__ to avoid capturing logs in redirected output print(f"[TOOL RESULT] {func_name} output: {output_desc}", flush=True, file=sys.__stdout__) except Exception as exc: print(f"[TOOL RESULT] {func_name} (failed to log output: {exc})", flush=True, file=sys.__stdout__) # Ensure Tools modules can import 'app' when this file is executed as a script # (their code does `from app import ...`). sys.modules.setdefault("app", sys.modules[__name__]) # Import per-tool interface builders from the Tools package from Modules.Web_Fetch import build_interface as build_fetch_interface from Modules.Web_Search import build_interface as build_search_interface from Modules.Agent_Terminal import build_interface as build_agent_terminal_interface from Modules.Code_Interpreter import build_interface as build_code_interface from Modules.Memory_Manager import build_interface as build_memory_interface from Modules.Generate_Speech import build_interface as build_speech_interface from Modules.Generate_Image import build_interface as build_image_interface from Modules.Generate_Video import build_interface as build_video_interface from Modules.Deep_Research import build_interface as build_research_interface from Modules.File_System import build_interface as build_fs_interface from Modules.Obsidian_Vault import build_interface as build_obsidian_interface from Modules.Shell_Command import build_interface as build_shell_interface # Optional environment flags used to conditionally show API schemas (unchanged behavior) HF_IMAGE_TOKEN = bool(os.getenv("HF_READ_TOKEN")) HF_VIDEO_TOKEN = bool(os.getenv("HF_READ_TOKEN") or os.getenv("HF_TOKEN")) HF_TEXTGEN_TOKEN = bool(os.getenv("HF_READ_TOKEN") or os.getenv("HF_TOKEN")) # Load CSS from external file _css_path = os.path.join(os.path.dirname(__file__), "styles.css") with open(_css_path, "r", encoding="utf-8") as _css_file: CSS_STYLES = _css_file.read() # Build each tab interface using modular builders fetch_interface = build_fetch_interface() web_search_interface = build_search_interface() agent_terminal_interface = build_agent_terminal_interface() code_interface = build_code_interface() memory_interface = build_memory_interface() kokoro_interface = build_speech_interface() image_generation_interface = build_image_interface() video_generation_interface = build_video_interface() deep_research_interface = build_research_interface() fs_interface = build_fs_interface() shell_interface = build_shell_interface() obsidian_interface = build_obsidian_interface() _interfaces = [ agent_terminal_interface, fetch_interface, web_search_interface, code_interface, shell_interface, fs_interface, obsidian_interface, memory_interface, kokoro_interface, image_generation_interface, video_generation_interface, deep_research_interface, ] _tab_names = [ "Agent Terminal", "Web Fetch", "Web Search", "Code Interpreter", "Shell Command", "File System", "Obsidian Vault", "Memory Manager", "Generate Speech", "Generate Image", "Generate Video", "Deep Research", ] with gr.Blocks(title="Nymbo/Tools MCP") as demo: with gr.Sidebar(width=300, elem_classes="app-sidebar"): gr.Markdown( "## Nymbo/Tools MCP\n" "

General purpose tools useful for any agent.

\n" "https://nymbo.net/gradio_api/mcp/\n" "Test with Nymbot" ) with gr.Accordion("Information", open=False): gr.HTML( """

Connecting from an MCP Client

This Space also runs as a Model Context Protocol (MCP) server. Point your client to:
https://nymbo.net/gradio_api/mcp/

Example client configuration:

{
  "mcpServers": {
    "nymbo-tools": {
      "url": "https://nymbo.net/gradio_api/mcp/"
    }
  }
}

Run the following commands in sequence to run the server locally:

git clone https://huggingface.co/spaces/Nymbo/Tools
cd Tools
python -m venv env
source env/bin/activate
pip install -r requirements.txt
python app.py

Enable Image Gen, Video Gen, and Deep Research

The Generate_Image, Generate_Video, and Deep_Research tools require a HF_READ_TOKEN set as a secret or environment variable.

  • Duplicate this Space and add a HF token with model read access.
  • Or run locally with HF_READ_TOKEN in your environment.
MCP clients can see these tools even without tokens, but calls will fail until a valid token is provided.

Persistent Memories and Files

In this public demo, memories and files created with the Memory_Manager and File_System are stored in the Space's running container and are cleared when the Space restarts. Content is visible to everyone—avoid personal data.

When running locally, memories are saved to memories.json at the repo root for privacy, and files are saved to the Tools/Filesystem directory on disk.

Tool Notes & Kokoro Voice Legend

No authentication required for:

  • Web_Fetch
  • Web_Search
  • Agent_Terminal
  • Code_Interpreter
  • Memory_Manager
  • Generate_Speech
  • File_System
  • Shell_Command

Kokoro voice prefixes

Accent Female Male
American af am
British bf bm
European ef em
French ff
Hindi hf hm
Italian if im
Japanese jf jm
Portuguese pf pm
Chinese zf zm
""" ) gr.Markdown("### Tools") tool_selector = gr.Radio( choices=_tab_names, value=_tab_names[0], label="Select Tool", show_label=False, container=False, elem_classes="sidebar-nav" ) with gr.Tabs(elem_classes="hidden-tabs", selected=_tab_names[0]) as tool_tabs: for name, interface in zip(_tab_names, _interfaces): with gr.TabItem(label=name, id=name, elem_id=f"tab-{name}"): interface.render() # Use JavaScript to click the hidden tab button when the radio selection changes tool_selector.change( fn=None, inputs=tool_selector, outputs=None, js="(selected_tool) => { const buttons = document.querySelectorAll('.hidden-tabs button'); buttons.forEach(btn => { if (btn.innerText.trim() === selected_tool) { btn.click(); } }); }" ) if __name__ == "__main__": demo.launch(mcp_server=True, theme="Nymbo/Nymbo_Theme", css=CSS_STYLES, ssr_mode=False)