Spaces:
Sleeping
Sleeping
push changes
Browse files- .gitignore +1 -0
- __pycache__/tools.cpython-311.pyc +0 -0
- agent_tools.py +215 -0
- app.py +114 -27
- tools.py +1 -1
.gitignore
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
.env
|
__pycache__/tools.cpython-311.pyc
ADDED
|
Binary file (7.64 kB). View file
|
|
|
agent_tools.py
ADDED
|
@@ -0,0 +1,215 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# %%
|
| 2 |
+
from io import BytesIO
|
| 3 |
+
import requests
|
| 4 |
+
from PIL import Image as PILImage
|
| 5 |
+
from transformers import BlipProcessor, BlipForConditionalGeneration
|
| 6 |
+
from langchain_core.messages import AnyMessage, HumanMessage, AIMessage
|
| 7 |
+
from huggingface_hub import list_models
|
| 8 |
+
import random
|
| 9 |
+
import pprint
|
| 10 |
+
from langchain_community.tools import DuckDuckGoSearchRun
|
| 11 |
+
from langchain_huggingface import HuggingFaceEndpoint, ChatHuggingFace
|
| 12 |
+
from langgraph.prebuilt import tools_condition
|
| 13 |
+
from langgraph.graph import START, StateGraph
|
| 14 |
+
from IPython.display import Image, display
|
| 15 |
+
|
| 16 |
+
from langgraph.prebuilt import ToolNode
|
| 17 |
+
from langchain_core.messages import AnyMessage, HumanMessage, AIMessage, SystemMessage
|
| 18 |
+
from langgraph.graph.message import add_messages
|
| 19 |
+
from typing import TypedDict, Annotated
|
| 20 |
+
from langchain.tools import Tool
|
| 21 |
+
from langchain_community.retrievers import BM25Retriever
|
| 22 |
+
from langchain.docstore.document import Document
|
| 23 |
+
import datasets
|
| 24 |
+
from langchain_openai import ChatOpenAI
|
| 25 |
+
from dotenv import load_dotenv
|
| 26 |
+
import os
|
| 27 |
+
import torch
|
| 28 |
+
import base64
|
| 29 |
+
|
| 30 |
+
# Load environment variables
|
| 31 |
+
load_dotenv()
|
| 32 |
+
|
| 33 |
+
# DEFINE HUB STAT TOOLS
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
def get_hub_stats(author: str) -> str:
|
| 37 |
+
"""Fetches the most downloaded model from a specific author on the Hugging Face Hub."""
|
| 38 |
+
try:
|
| 39 |
+
# List models from the specified author, sorted by downloads
|
| 40 |
+
models = list(list_models(
|
| 41 |
+
author=author, sort="downloads", direction=-1, limit=1))
|
| 42 |
+
|
| 43 |
+
if models:
|
| 44 |
+
model = models[0]
|
| 45 |
+
return f"The most downloaded model by {author} is {model.id} with {model.downloads:,} downloads."
|
| 46 |
+
else:
|
| 47 |
+
return f"No models found for author {author}."
|
| 48 |
+
except Exception as e:
|
| 49 |
+
return f"Error fetching models for {author}: {str(e)}"
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
# Initialize the tool
|
| 53 |
+
hub_stats_tool = Tool(
|
| 54 |
+
name="get_hub_stats",
|
| 55 |
+
func=get_hub_stats,
|
| 56 |
+
description="Search HuggingFace Hub for model statistics, downloads, and author information. Use this when asking about specific models, authors, or HuggingFace Hub data."
|
| 57 |
+
)
|
| 58 |
+
|
| 59 |
+
# DEFINE WEB SEARCH TOOLS
|
| 60 |
+
web_search_tool = Tool(
|
| 61 |
+
name="search_tool",
|
| 62 |
+
func=DuckDuckGoSearchRun(),
|
| 63 |
+
description="Search the general web for current information, news, and general knowledge. Use this for questions about companies, people, events, etc."
|
| 64 |
+
)
|
| 65 |
+
|
| 66 |
+
# REVERSE TOOLS
|
| 67 |
+
|
| 68 |
+
|
| 69 |
+
def ReverseTextTool(text: str) -> str:
|
| 70 |
+
"""Reverses the order of characters in a given text string."""
|
| 71 |
+
try:
|
| 72 |
+
return text[::-1]
|
| 73 |
+
except Exception as e:
|
| 74 |
+
return f"Error reversing text: {str(e)}"
|
| 75 |
+
|
| 76 |
+
|
| 77 |
+
reverse_text_tool = Tool(
|
| 78 |
+
name="reverse_text_tool",
|
| 79 |
+
func=ReverseTextTool,
|
| 80 |
+
description="Reverses the order of characters in a given text string. Use this when you need to reverse text."
|
| 81 |
+
)
|
| 82 |
+
|
| 83 |
+
# DOWNLOAD A FILE
|
| 84 |
+
|
| 85 |
+
|
| 86 |
+
def download_file(url: str) -> str:
|
| 87 |
+
"""Downloads a file from a given URL and returns the local file path."""
|
| 88 |
+
try:
|
| 89 |
+
response = requests.get(url, timeout=30)
|
| 90 |
+
response.raise_for_status()
|
| 91 |
+
|
| 92 |
+
# Define save_path - extract filename from URL
|
| 93 |
+
filename = url.split(
|
| 94 |
+
'/')[-1] if url.split('/')[-1] else 'downloaded_file'
|
| 95 |
+
save_path = f"./{filename}"
|
| 96 |
+
|
| 97 |
+
with open(save_path, "wb") as f:
|
| 98 |
+
f.write(response.content)
|
| 99 |
+
return save_path
|
| 100 |
+
except Exception as e:
|
| 101 |
+
return f"Failed to download: {e}"
|
| 102 |
+
|
| 103 |
+
|
| 104 |
+
download_file_tool = Tool(
|
| 105 |
+
name="download_file_tool",
|
| 106 |
+
func=download_file,
|
| 107 |
+
description="Downloads a file from a given URL and returns the local file path."
|
| 108 |
+
)
|
| 109 |
+
|
| 110 |
+
# DEFINE IMAGE RECOGNITION TOOLS
|
| 111 |
+
|
| 112 |
+
|
| 113 |
+
def create_vision_llm():
|
| 114 |
+
"""Creates a vision-capable LLM with proper error handling."""
|
| 115 |
+
try:
|
| 116 |
+
# Check if OpenAI API key is available
|
| 117 |
+
if not os.getenv("OPENAI_API_KEY"):
|
| 118 |
+
return None, "OpenAI API key not found. Please set OPENAI_API_KEY in your environment variables."
|
| 119 |
+
|
| 120 |
+
vision_llm = ChatOpenAI(model="gpt-4o")
|
| 121 |
+
return vision_llm, None
|
| 122 |
+
except Exception as e:
|
| 123 |
+
return None, f"Error creating vision LLM: {str(e)}"
|
| 124 |
+
|
| 125 |
+
|
| 126 |
+
def image_recognition(img_path: str) -> str:
|
| 127 |
+
"""Analyzes and describes the content of images using AI vision."""
|
| 128 |
+
try:
|
| 129 |
+
# Check if file exists
|
| 130 |
+
if not os.path.exists(img_path):
|
| 131 |
+
return f"Error: Image file not found at {img_path}"
|
| 132 |
+
|
| 133 |
+
# Create vision LLM
|
| 134 |
+
vision_llm, error = create_vision_llm()
|
| 135 |
+
if error:
|
| 136 |
+
return error
|
| 137 |
+
|
| 138 |
+
# Read image and encode as base64
|
| 139 |
+
with open(img_path, "rb") as image_file:
|
| 140 |
+
image_bytes = image_file.read()
|
| 141 |
+
|
| 142 |
+
image_base64 = base64.b64encode(image_bytes).decode("utf-8")
|
| 143 |
+
|
| 144 |
+
# Prepare the prompt including the base64 image data
|
| 145 |
+
message = [
|
| 146 |
+
HumanMessage(
|
| 147 |
+
content=[
|
| 148 |
+
{
|
| 149 |
+
"type": "text",
|
| 150 |
+
"text": (
|
| 151 |
+
"Describe the image or extract all the text from this image. "
|
| 152 |
+
"Return only the description or extracted text, no explanations."
|
| 153 |
+
),
|
| 154 |
+
},
|
| 155 |
+
{
|
| 156 |
+
"type": "image_url",
|
| 157 |
+
"image_url": {
|
| 158 |
+
"url": f"data:image/png;base64,{image_base64}"
|
| 159 |
+
},
|
| 160 |
+
},
|
| 161 |
+
]
|
| 162 |
+
)
|
| 163 |
+
]
|
| 164 |
+
|
| 165 |
+
# Call the vision-capable model
|
| 166 |
+
response = vision_llm.invoke(message)
|
| 167 |
+
return response.content.strip()
|
| 168 |
+
|
| 169 |
+
except Exception as e:
|
| 170 |
+
return f"Error analyzing image: {str(e)}"
|
| 171 |
+
|
| 172 |
+
|
| 173 |
+
image_recognition_tool = Tool(
|
| 174 |
+
name="image_recognition_tool",
|
| 175 |
+
func=image_recognition,
|
| 176 |
+
description="Analyzes and describes the content of images using AI vision. Use this when you need to understand what's in an image."
|
| 177 |
+
)
|
| 178 |
+
|
| 179 |
+
# Test functions (commented out to avoid side effects)
|
| 180 |
+
|
| 181 |
+
|
| 182 |
+
def test_tools():
|
| 183 |
+
"""Test all tools to ensure they work properly."""
|
| 184 |
+
print("Testing Hub Stats Tool:")
|
| 185 |
+
print(hub_stats_tool.invoke("google"))
|
| 186 |
+
print("\n" + "="*50 + "\n")
|
| 187 |
+
|
| 188 |
+
print("Testing Web Search Tool:")
|
| 189 |
+
results = web_search_tool.invoke("what is the matrix?")
|
| 190 |
+
pp = pprint.PrettyPrinter()
|
| 191 |
+
print(pp.pprint(results))
|
| 192 |
+
print("\n" + "="*50 + "\n")
|
| 193 |
+
|
| 194 |
+
print("Testing Reverse Text Tool:")
|
| 195 |
+
results = reverse_text_tool.invoke("what is the matrix?")
|
| 196 |
+
print(results)
|
| 197 |
+
print("\n" + "="*50 + "\n")
|
| 198 |
+
|
| 199 |
+
print("Testing Download File Tool:")
|
| 200 |
+
test_url = "https://www.google.com"
|
| 201 |
+
results = download_file_tool.invoke(test_url)
|
| 202 |
+
print(results)
|
| 203 |
+
print("\n" + "="*50 + "\n")
|
| 204 |
+
|
| 205 |
+
print("Testing Image Recognition Tool:")
|
| 206 |
+
test_url = "https://upload.wikimedia.org/wikipedia/commons/thumb/3/3a/Cat03.jpg/1200px-Cat03.jpg"
|
| 207 |
+
downloaded_file = download_file_tool.invoke(test_url)
|
| 208 |
+
if not downloaded_file.startswith("Failed"):
|
| 209 |
+
results = image_recognition_tool.invoke(downloaded_file)
|
| 210 |
+
print(results)
|
| 211 |
+
else:
|
| 212 |
+
print("Skipping image recognition test due to download failure")
|
| 213 |
+
|
| 214 |
+
# Uncomment the line below to run tests
|
| 215 |
+
# test_tools()
|
app.py
CHANGED
|
@@ -3,32 +3,111 @@ import gradio as gr
|
|
| 3 |
import requests
|
| 4 |
import inspect
|
| 5 |
import pandas as pd
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 6 |
|
| 7 |
# (Keep Constants as is)
|
| 8 |
# --- Constants ---
|
| 9 |
DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
|
| 10 |
|
| 11 |
-
#
|
| 12 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 13 |
class BasicAgent:
|
| 14 |
def __init__(self):
|
| 15 |
print("BasicAgent initialized.")
|
|
|
|
| 16 |
def __call__(self, question: str) -> str:
|
| 17 |
print(f"Agent received question (first 50 chars): {question[:50]}...")
|
| 18 |
-
fixed_answer = "This is a default answer."
|
| 19 |
-
print(f"Agent returning fixed answer: {fixed_answer}")
|
| 20 |
-
return fixed_answer
|
| 21 |
|
| 22 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 23 |
"""
|
| 24 |
-
Fetches all questions, runs
|
| 25 |
and displays the results.
|
| 26 |
"""
|
| 27 |
# --- Determine HF Space Runtime URL and Repo URL ---
|
| 28 |
-
|
|
|
|
| 29 |
|
| 30 |
if profile:
|
| 31 |
-
username= f"{profile.username}"
|
| 32 |
print(f"User logged in: {username}")
|
| 33 |
else:
|
| 34 |
print("User not logged in.")
|
|
@@ -55,16 +134,16 @@ def run_and_submit_all( profile: gr.OAuthProfile | None):
|
|
| 55 |
response.raise_for_status()
|
| 56 |
questions_data = response.json()
|
| 57 |
if not questions_data:
|
| 58 |
-
|
| 59 |
-
|
| 60 |
print(f"Fetched {len(questions_data)} questions.")
|
| 61 |
except requests.exceptions.RequestException as e:
|
| 62 |
print(f"Error fetching questions: {e}")
|
| 63 |
return f"Error fetching questions: {e}", None
|
| 64 |
except requests.exceptions.JSONDecodeError as e:
|
| 65 |
-
|
| 66 |
-
|
| 67 |
-
|
| 68 |
except Exception as e:
|
| 69 |
print(f"An unexpected error occurred fetching questions: {e}")
|
| 70 |
return f"An unexpected error occurred fetching questions: {e}", None
|
|
@@ -81,18 +160,22 @@ def run_and_submit_all( profile: gr.OAuthProfile | None):
|
|
| 81 |
continue
|
| 82 |
try:
|
| 83 |
submitted_answer = agent(question_text)
|
| 84 |
-
answers_payload.append(
|
| 85 |
-
|
|
|
|
|
|
|
| 86 |
except Exception as e:
|
| 87 |
-
|
| 88 |
-
|
|
|
|
| 89 |
|
| 90 |
if not answers_payload:
|
| 91 |
print("Agent did not produce any answers to submit.")
|
| 92 |
return "Agent did not produce any answers to submit.", pd.DataFrame(results_log)
|
| 93 |
|
| 94 |
-
# 4. Prepare Submission
|
| 95 |
-
submission_data = {"username": username.strip(
|
|
|
|
| 96 |
status_update = f"Agent finished. Submitting {len(answers_payload)} answers for user '{username}'..."
|
| 97 |
print(status_update)
|
| 98 |
|
|
@@ -162,9 +245,11 @@ with gr.Blocks() as demo:
|
|
| 162 |
|
| 163 |
run_button = gr.Button("Run Evaluation & Submit All Answers")
|
| 164 |
|
| 165 |
-
status_output = gr.Textbox(
|
|
|
|
| 166 |
# Removed max_rows=10 from DataFrame constructor
|
| 167 |
-
results_table = gr.DataFrame(
|
|
|
|
| 168 |
|
| 169 |
run_button.click(
|
| 170 |
fn=run_and_submit_all,
|
|
@@ -175,22 +260,24 @@ if __name__ == "__main__":
|
|
| 175 |
print("\n" + "-"*30 + " App Starting " + "-"*30)
|
| 176 |
# Check for SPACE_HOST and SPACE_ID at startup for information
|
| 177 |
space_host_startup = os.getenv("SPACE_HOST")
|
| 178 |
-
space_id_startup = os.getenv("SPACE_ID")
|
| 179 |
|
| 180 |
if space_host_startup:
|
| 181 |
print(f"✅ SPACE_HOST found: {space_host_startup}")
|
| 182 |
-
print(
|
|
|
|
| 183 |
else:
|
| 184 |
print("ℹ️ SPACE_HOST environment variable not found (running locally?).")
|
| 185 |
|
| 186 |
-
if space_id_startup:
|
| 187 |
print(f"✅ SPACE_ID found: {space_id_startup}")
|
| 188 |
print(f" Repo URL: https://huggingface.co/spaces/{space_id_startup}")
|
| 189 |
-
print(
|
|
|
|
| 190 |
else:
|
| 191 |
print("ℹ️ SPACE_ID environment variable not found (running locally?). Repo URL cannot be determined.")
|
| 192 |
|
| 193 |
print("-"*(60 + len(" App Starting ")) + "\n")
|
| 194 |
|
| 195 |
print("Launching Gradio Interface for Basic Agent Evaluation...")
|
| 196 |
-
demo.launch(debug=True, share=False)
|
|
|
|
| 3 |
import requests
|
| 4 |
import inspect
|
| 5 |
import pandas as pd
|
| 6 |
+
from langchain_openai import ChatOpenAI
|
| 7 |
+
from langchain_core.messages import AnyMessage, HumanMessage, AIMessage, SystemMessage
|
| 8 |
+
from langgraph.graph import START, StateGraph
|
| 9 |
+
from langgraph.prebuilt import ToolNode, tools_condition
|
| 10 |
+
from langgraph.graph.message import add_messages
|
| 11 |
+
from typing import TypedDict, Annotated
|
| 12 |
+
from agent_tools import image_recognition_tool, download_file_tool, reverse_text_tool, hub_stats_tool, web_search_tool
|
| 13 |
|
| 14 |
# (Keep Constants as is)
|
| 15 |
# --- Constants ---
|
| 16 |
DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
|
| 17 |
|
| 18 |
+
# Setting up the llm
|
| 19 |
+
llm = ChatOpenAI(model="gpt-3.5-turbo", temperature=0)
|
| 20 |
+
tools = [web_search_tool, hub_stats_tool, download_file_tool,
|
| 21 |
+
image_recognition_tool, reverse_text_tool]
|
| 22 |
+
chat_with_tools = llm.bind_tools(tools)
|
| 23 |
+
|
| 24 |
+
# Defining my agent
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
class MyAgent(TypedDict):
|
| 28 |
+
messages: Annotated[list[AnyMessage], add_messages]
|
| 29 |
+
|
| 30 |
+
|
| 31 |
class BasicAgent:
|
| 32 |
def __init__(self):
|
| 33 |
print("BasicAgent initialized.")
|
| 34 |
+
|
| 35 |
def __call__(self, question: str) -> str:
|
| 36 |
print(f"Agent received question (first 50 chars): {question[:50]}...")
|
|
|
|
|
|
|
|
|
|
| 37 |
|
| 38 |
+
# Use the LangGraph agent to process the question
|
| 39 |
+
try:
|
| 40 |
+
result = my_agent.invoke(
|
| 41 |
+
{"messages": [HumanMessage(content=question)]})
|
| 42 |
+
# Get the last message from the result
|
| 43 |
+
last_message = result["messages"][-1]
|
| 44 |
+
answer = last_message.content
|
| 45 |
+
print(f"Agent returning answer: {answer}")
|
| 46 |
+
return answer
|
| 47 |
+
except Exception as e:
|
| 48 |
+
print(f"Error in agent processing: {e}")
|
| 49 |
+
return f"Error processing question: {e}"
|
| 50 |
+
|
| 51 |
+
# set the main system prompt
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
def assistant(state: MyAgent):
|
| 55 |
+
# Add system message to instruct the agent to use the tool
|
| 56 |
+
system_message = SystemMessage(content="""You are a general AI assistant. I will ask you a question.
|
| 57 |
+
Report your thoughts, and finish your answer with just the answer — no prefixes like "FINAL ANSWER:".
|
| 58 |
+
Your answer should be a number OR as few words as possible OR a comma-separated list of numbers and/or strings.
|
| 59 |
+
If you're asked for a number, don't use commas or units like $ or %, unless specified.
|
| 60 |
+
If you're asked for a string, don't use articles or abbreviations (e.g. for cities), and write digits in plain text unless told otherwise.
|
| 61 |
+
|
| 62 |
+
Tool Use Guidelines:
|
| 63 |
+
1. Do **not** use any tools outside of the provided tools list.
|
| 64 |
+
2. Always use **only one tool at a time** in each step of your execution.
|
| 65 |
+
3. For HuggingFace Hub information (models, authors, downloads), use **get_hub_stats** tool.
|
| 66 |
+
4. For web searches and current information, use **web_search_tool** .
|
| 67 |
+
5. If the question looks reversed (starts with a period or reads backward), first use **reverse_text_tool** to reverse it, then process the question.
|
| 68 |
+
6. When you need to download files from URLs, use **download_file_tool**.
|
| 69 |
+
7. For image analysis and description, use **image_recognition_tool** (requires OpenAI API key).
|
| 70 |
+
8. Even for complex tasks, assume a solution exists. If one method fails, try another approach using different tools.
|
| 71 |
+
9. Keep responses concise and efficient.""")
|
| 72 |
+
|
| 73 |
+
# Combine system message with user messages
|
| 74 |
+
all_messages = [system_message] + state["messages"]
|
| 75 |
+
|
| 76 |
+
return {
|
| 77 |
+
"messages": [chat_with_tools.invoke(all_messages)],
|
| 78 |
+
}
|
| 79 |
+
|
| 80 |
+
|
| 81 |
+
# define the agent graph
|
| 82 |
+
builder = StateGraph(MyAgent)
|
| 83 |
+
|
| 84 |
+
# Define nodes: these do the work
|
| 85 |
+
builder.add_node("assistant", assistant)
|
| 86 |
+
builder.add_node("tools", ToolNode(tools))
|
| 87 |
+
|
| 88 |
+
# Define edges: these determine how the control flow moves
|
| 89 |
+
builder.add_edge(START, "assistant")
|
| 90 |
+
builder.add_conditional_edges(
|
| 91 |
+
"assistant",
|
| 92 |
+
tools_condition,
|
| 93 |
+
)
|
| 94 |
+
builder.add_edge("tools", "assistant")
|
| 95 |
+
my_agent = builder.compile()
|
| 96 |
+
|
| 97 |
+
# submit
|
| 98 |
+
|
| 99 |
+
|
| 100 |
+
def run_and_submit_all(profile: gr.OAuthProfile | None):
|
| 101 |
"""
|
| 102 |
+
Fetches all questions, runs MyAgent on them, submits all answers,
|
| 103 |
and displays the results.
|
| 104 |
"""
|
| 105 |
# --- Determine HF Space Runtime URL and Repo URL ---
|
| 106 |
+
# Get the SPACE_ID for sending link to the code
|
| 107 |
+
space_id = os.getenv("SPACE_ID")
|
| 108 |
|
| 109 |
if profile:
|
| 110 |
+
username = f"{profile.username}"
|
| 111 |
print(f"User logged in: {username}")
|
| 112 |
else:
|
| 113 |
print("User not logged in.")
|
|
|
|
| 134 |
response.raise_for_status()
|
| 135 |
questions_data = response.json()
|
| 136 |
if not questions_data:
|
| 137 |
+
print("Fetched questions list is empty.")
|
| 138 |
+
return "Fetched questions list is empty or invalid format.", None
|
| 139 |
print(f"Fetched {len(questions_data)} questions.")
|
| 140 |
except requests.exceptions.RequestException as e:
|
| 141 |
print(f"Error fetching questions: {e}")
|
| 142 |
return f"Error fetching questions: {e}", None
|
| 143 |
except requests.exceptions.JSONDecodeError as e:
|
| 144 |
+
print(f"Error decoding JSON response from questions endpoint: {e}")
|
| 145 |
+
print(f"Response text: {response.text[:500]}")
|
| 146 |
+
return f"Error decoding server response for questions: {e}", None
|
| 147 |
except Exception as e:
|
| 148 |
print(f"An unexpected error occurred fetching questions: {e}")
|
| 149 |
return f"An unexpected error occurred fetching questions: {e}", None
|
|
|
|
| 160 |
continue
|
| 161 |
try:
|
| 162 |
submitted_answer = agent(question_text)
|
| 163 |
+
answers_payload.append(
|
| 164 |
+
{"task_id": task_id, "submitted_answer": submitted_answer})
|
| 165 |
+
results_log.append(
|
| 166 |
+
{"Task ID": task_id, "Question": question_text, "Submitted Answer": submitted_answer})
|
| 167 |
except Exception as e:
|
| 168 |
+
print(f"Error running agent on task {task_id}: {e}")
|
| 169 |
+
results_log.append(
|
| 170 |
+
{"Task ID": task_id, "Question": question_text, "Submitted Answer": f"AGENT ERROR: {e}"})
|
| 171 |
|
| 172 |
if not answers_payload:
|
| 173 |
print("Agent did not produce any answers to submit.")
|
| 174 |
return "Agent did not produce any answers to submit.", pd.DataFrame(results_log)
|
| 175 |
|
| 176 |
+
# 4. Prepare Submission
|
| 177 |
+
submission_data = {"username": username.strip(
|
| 178 |
+
), "agent_code": agent_code, "answers": answers_payload}
|
| 179 |
status_update = f"Agent finished. Submitting {len(answers_payload)} answers for user '{username}'..."
|
| 180 |
print(status_update)
|
| 181 |
|
|
|
|
| 245 |
|
| 246 |
run_button = gr.Button("Run Evaluation & Submit All Answers")
|
| 247 |
|
| 248 |
+
status_output = gr.Textbox(
|
| 249 |
+
label="Run Status / Submission Result", lines=5, interactive=False)
|
| 250 |
# Removed max_rows=10 from DataFrame constructor
|
| 251 |
+
results_table = gr.DataFrame(
|
| 252 |
+
label="Questions and Agent Answers", wrap=True)
|
| 253 |
|
| 254 |
run_button.click(
|
| 255 |
fn=run_and_submit_all,
|
|
|
|
| 260 |
print("\n" + "-"*30 + " App Starting " + "-"*30)
|
| 261 |
# Check for SPACE_HOST and SPACE_ID at startup for information
|
| 262 |
space_host_startup = os.getenv("SPACE_HOST")
|
| 263 |
+
space_id_startup = os.getenv("SPACE_ID") # Get SPACE_ID at startup
|
| 264 |
|
| 265 |
if space_host_startup:
|
| 266 |
print(f"✅ SPACE_HOST found: {space_host_startup}")
|
| 267 |
+
print(
|
| 268 |
+
f" Runtime URL should be: https://{space_host_startup}.hf.space")
|
| 269 |
else:
|
| 270 |
print("ℹ️ SPACE_HOST environment variable not found (running locally?).")
|
| 271 |
|
| 272 |
+
if space_id_startup: # Print repo URLs if SPACE_ID is found
|
| 273 |
print(f"✅ SPACE_ID found: {space_id_startup}")
|
| 274 |
print(f" Repo URL: https://huggingface.co/spaces/{space_id_startup}")
|
| 275 |
+
print(
|
| 276 |
+
f" Repo Tree URL: https://huggingface.co/spaces/{space_id_startup}/tree/main")
|
| 277 |
else:
|
| 278 |
print("ℹ️ SPACE_ID environment variable not found (running locally?). Repo URL cannot be determined.")
|
| 279 |
|
| 280 |
print("-"*(60 + len(" App Starting ")) + "\n")
|
| 281 |
|
| 282 |
print("Launching Gradio Interface for Basic Agent Evaluation...")
|
| 283 |
+
demo.launch(debug=True, share=False)
|
tools.py
CHANGED
|
@@ -169,4 +169,4 @@ test_url = "https://upload.wikimedia.org/wikipedia/commons/thumb/3/3a/Cat03.jpg/
|
|
| 169 |
results = image_recognition_tool.invoke(download_file_tool.invoke(test_url))
|
| 170 |
print(results)
|
| 171 |
|
| 172 |
-
|
|
|
|
| 169 |
results = image_recognition_tool.invoke(download_file_tool.invoke(test_url))
|
| 170 |
print(results)
|
| 171 |
|
| 172 |
+
|