ryanDing26
App release
f2a52eb
import os
import re
import glob
import inspect
import pandas as pd
from pathlib import Path
from dotenv import load_dotenv
from collections.abc import Generator
from typing import Any, Literal, TypedDict
from langchain_core.messages import AIMessage, BaseMessage, HumanMessage, SystemMessage
from langchain_core.prompts import ChatPromptTemplate
from langgraph.checkpoint.memory import MemorySaver
from langgraph.graph import END, START, StateGraph
from histopath.env_desc import library_content_dict
from histopath.llm import SourceType, get_llm
from histopath.model.retriever import ToolRetriever
from histopath.tool.support_tools import run_python_repl
from histopath.tool.tool_registry import ToolRegistry
from histopath.utils import (
pretty_print,
read_module2api,
run_bash_script,
run_with_timeout,
textify_api_dict,
)
if os.path.exists(".env"):
load_dotenv(".env", override=False)
print("Loaded environment variables from .env")
class AgentState(TypedDict):
messages: list[BaseMessage]
next_step: str | None
class A1:
def __init__(
self,
path="./data",
llm="claude-sonnet-4-20250514",
source: SourceType | None = None,
use_tool_retriever=True,
timeout_seconds=600,
base_url: str | None = None,
api_key: str = "EMPTY",
):
"""Initialize the HistoPath agent.
Args:
path: Path to the data
llm: LLM to use for the agent
source (str): Source provider: "OpenAI", "AzureOpenAI", "Anthropic", "Ollama", "Gemini", "Bedrock", "HuggingFace", or "Custom"
use_tool_retriever: If True, use a tool retriever
timeout_seconds: Timeout for code execution in seconds
base_url: Base URL for custom model serving (e.g., "http://localhost:8000/v1")
api_key: API key for the custom LLM
"""
self.path = path
if not os.path.exists(path):
os.makedirs(path)
print(f"Created directory: {path}")
self.path = os.path.join(path, "histopath_data")
module2api = read_module2api()
self.llm = get_llm(
llm, stop_sequences=["</execute>", "</solution>"], source=source, base_url=base_url, api_key=api_key
)
self.module2api = module2api
self.use_tool_retriever = use_tool_retriever
if self.use_tool_retriever:
self.tool_registry = ToolRegistry(module2api)
self.retriever = ToolRetriever()
# Add timeout parameter
self.timeout_seconds = timeout_seconds # 10 minutes default timeout
self.configure()
###########################
# Agent Prompting Section #
###########################
def _generate_system_prompt(
self,
tool_desc,
library_content_list,
self_critic=False,
is_retrieval=False,
):
"""Generate the system prompt based on the provided resources.
Args:
tool_desc: Dictionary of tool descriptions
library_content_list: List of libraries
self_critic: Whether to include self-critic instructions
is_retrieval: Whether this is for retrieval (True) or initial configuration (False)
Returns:
The generated system prompt
"""
def format_item_with_description(name, description):
"""Format an item with its description in a readable way."""
# Handle None or empty descriptions
if not description:
description = f"Library or Tooling Item: {name}"
# Check if the item is already formatted (contains a colon)
if isinstance(name, str) and ": " in name:
return name
# Wrap long descriptions to make them more readable
max_line_length = 80
if len(description) > max_line_length:
# Simple wrapping for long descriptions
wrapped_desc = []
words = description.split()
current_line = ""
for word in words:
if len(current_line) + len(word) + 1 <= max_line_length:
if current_line:
current_line += " " + word
else:
current_line = word
else:
wrapped_desc.append(current_line)
current_line = word
if current_line:
wrapped_desc.append(current_line)
# Join with newlines and proper indentation
formatted_desc = f"{name}:\n " + "\n ".join(wrapped_desc)
return formatted_desc
else:
return f"{name}: {description}"
library_content_list = []
for lib in library_content_list:
if isinstance(lib, dict):
name = lib.get("name", "")
library_content_list.append(lib)
else:
library_content_list.append(lib)
# Format the default library content
if isinstance(library_content_list, list) and all(
isinstance(item, str) for item in library_content_list
):
if (
len(library_content_list) > 0
and isinstance(library_content_list[0], str)
and "," not in library_content_list[0]
):
# Simple list of strings
libraries_formatted = []
for lib in library_content_list:
description = self.library_content_dict.get(lib, f"Software library: {lib}")
libraries_formatted.append(format_item_with_description(lib, description))
else:
# Already formatted string
libraries_formatted = library_content_list
else:
# List with descriptions
libraries_formatted = []
for lib in library_content_list:
if isinstance(lib, dict):
name = lib.get("name", "")
description = self.library_content_dict.get(name, f"Software library: {name}")
libraries_formatted.append(format_item_with_description(name, description))
else:
description = self.library_content_dict.get(lib, f"Software library: {lib}")
libraries_formatted.append(format_item_with_description(lib, description))
# Base prompt
prompt_modifier = """
You are a helpful histopathology researcher assigned with the task of problem-solving.
To achieve this, you will be using an interactive coding environment equipped with a variety of tool functions and softwares to assist you throughout the process.
Given a task, make a plan first. The plan should be a numbered list of steps that you will take to solve the task. Be specific and detailed.
Format your plan as a checklist with empty checkboxes like this:
1. [ ] First step
2. [ ] Second step
3. [ ] Third step
Follow the plan step by step. After completing each step, update the checklist by replacing the empty checkbox with a checkmark:
1. [✓] First step (completed)
2. [ ] Second step
3. [ ] Third step
If a step fails or needs modification, mark it with an X and explain why:
1. [✓] First step (completed)
2. [✗] Second step (failed because...)
3. [ ] Modified second step
4. [ ] Third step
Always show the updated plan after each step so the user can track progress.
At each turn, you should first provide your thinking and reasoning given the conversation history.
After that, you have two options:
1) Interact with a programming environment and receive the corresponding output within <observe></observe>. Your code should be enclosed using "<execute>" tag, for example: <execute> print("Hello World!") </execute>. IMPORTANT: You must end the code block with </execute> tag.
- For Python code (default): <execute> print("Hello World!") </execute>
- For Bash scripts and commands: <execute> #!BASH\necho "Hello from Bash"\nls -la </execute>
- For CLI softwares, use Bash scripts.
2) When you think it is ready, directly provide a solution that adheres to the required format for the given task to the user. Your solution should be enclosed using "<solution>" tag, for example: The answer is <solution> A </solution>. IMPORTANT: You must end the solution block with </solution> tag.
You have many chances to interact with the environment to receive the observation. So you can decompose your code into multiple steps.
Don't overcomplicate the code. Keep it simple and easy to understand.
When writing the code, please print out the steps and results in a clear and concise manner, like a research log.
When calling the existing python functions in the function dictionary, YOU MUST SAVE THE OUTPUT and PRINT OUT the result.
For example, result = understand_scRNA(XXX) print(result)
Otherwise the system will not be able to know what has been done.
For Bash scripts and commands, use the #!BASH marker at the beginning of your code block. This allows for both simple commands and multi-line scripts with variables, loops, conditionals, loops, and other Bash features.
In each response, you must include EITHER <execute> or <solution> tag. Not both at the same time. Do not respond with messages without any tags. No empty messages.
If you feel that a task is not at all histopathology-related or related at all to any surrounding concepts within pathology, you should not execute your plan at all.
If you have no knowledge of a provided library that you feel is highly useful to a given task (such as the imperatively useful LazySlide package), please do a thorough exploration of the library's capabilities prior to experimentation
"""
# Add self-critic instructions if needed
if self_critic:
prompt_modifier += """
You may or may not receive feedbacks from human. If so, address the feedbacks by following the same procedure of multiple rounds of thinking, execution, and then coming up with a new solution.
"""
# Add environment resources
prompt_modifier += """
Environment Resources:
- Function Dictionary:
{function_intro}
---
{tool_desc}
---
{import_instruction}
- Software Library:
{library_intro}
Each library is listed with its description to help you understand its functionality.
----
{library_content_formatted}
----
- Note on using Bash scripts:
- Bash scripts and commands: Use the #!BASH marker in your execute block for both simple commands and complex shell scripts with variables, loops, conditionals, etc.
"""
# Set appropriate text based on whether this is initial configuration or after retrieval
if is_retrieval:
function_intro = "Based on your query, I've identified the following most relevant functions that you can use in your code:"
library_intro = (
"Based on your query, I've identified the following most relevant libraries that you can use:"
)
import_instruction = "IMPORTANT: When using any function, you MUST first import it from its module. For example:\nfrom [module_name] import [function_name]"
else:
function_intro = "In your code, you will need to import the function location using the following dictionary of functions:"
library_intro = "The environment supports a list of libraries that can be directly used. Do not forget the import statement:"
import_instruction = ""
# Format the content consistently for both initial and retrieval cases
library_content_formatted = "\n".join(libraries_formatted)
# Format the prompt with the appropriate values
format_dict = {
"function_intro": function_intro,
"tool_desc": textify_api_dict(tool_desc) if isinstance(tool_desc, dict) else tool_desc,
"import_instruction": import_instruction,
"library_intro": library_intro,
"library_content_formatted": library_content_formatted,
}
formatted_prompt = prompt_modifier.format(**format_dict)
return formatted_prompt
def configure(self, self_critic=False, test_time_scale_round=0):
"""Configure the agent with the initial system prompt and workflow.
Args:
self_critic: Whether to enable self-critic mode
test_time_scale_round: Number of rounds for test time scaling
"""
# Store self_critic for later use
self.self_critic = self_critic
# Store library_content_dict directly without library_content
self.library_content_dict = library_content_dict
# Prepare tool descriptions
tool_desc = {i: [x for x in j if x["name"] != "run_python_repl"] for i, j in self.module2api.items()}
# Prepare library content list
library_content_list = list(self.library_content_dict.keys())
self.system_prompt = self._generate_system_prompt(
tool_desc=tool_desc,
library_content_list=library_content_list,
self_critic=self_critic,
is_retrieval=False
)
# Define the nodes
def generate(state: AgentState) -> AgentState:
messages = [SystemMessage(content=self.system_prompt)] + state["messages"]
response = self.llm.invoke(messages)
# Parse the response
msg = str(response.content)
# Check for incomplete tags and fix them
if "<execute>" in msg and "</execute>" not in msg:
msg += "</execute>"
if "<solution>" in msg and "</solution>" not in msg:
msg += "</solution>"
if "<think>" in msg and "</think>" not in msg:
msg += "</think>"
think_match = re.search(r"<think>(.*?)</think>", msg, re.DOTALL)
execute_match = re.search(r"<execute>(.*?)</execute>", msg, re.DOTALL)
answer_match = re.search(r"<solution>(.*?)</solution>", msg, re.DOTALL)
# Add the message to the state before checking for errors
state["messages"].append(AIMessage(content=msg.strip()))
if answer_match:
state["next_step"] = "end"
elif execute_match:
state["next_step"] = "execute"
elif think_match:
state["next_step"] = "generate"
else:
print("parsing error...")
# Check if we already added an error message to avoid infinite loops
error_count = sum(
1 for m in state["messages"] if isinstance(m, AIMessage) and "There are no tags" in m.content
)
if error_count >= 2:
# If we've already tried to correct the model twice, just end the conversation
print("Detected repeated parsing errors, ending conversation")
state["next_step"] = "end"
# Add a final message explaining the termination
state["messages"].append(
AIMessage(
content="Execution terminated due to repeated parsing errors. Please check your input and try again."
)
)
else:
# Try to correct it
state["messages"].append(
HumanMessage(
content="Each response must include thinking process followed by either <execute> or <solution> tag. But there are no tags in the current response. Please follow the instruction, fix and regenerate the response again."
)
)
state["next_step"] = "generate"
return state
def execute(state: AgentState) -> AgentState:
last_message = state["messages"][-1].content
# Only add the closing tag if it's not already there
if "<execute>" in last_message and "</execute>" not in last_message:
last_message += "</execute>"
execute_match = re.search(r"<execute>(.*?)</execute>", last_message, re.DOTALL)
if execute_match:
code = execute_match.group(1)
# Set timeout duration (10 minutes = 600 seconds)
timeout = self.timeout_seconds
# Check if the code is a Bash script or CLI command
if (
code.strip().startswith("#!BASH")
or code.strip().startswith("# Bash script")
or code.strip().startswith("#!CLI")
):
# Handle both Bash scripts and CLI commands with the same function
if code.strip().startswith("#!CLI"):
# For CLI commands, extract the command and run it as a simple bash script
cli_command = re.sub(r"^#!CLI", "", code, 1).strip() # noqa: B034
# Remove any newlines to ensure it's a single command
cli_command = cli_command.replace("\n", " ")
result = run_with_timeout(run_bash_script, [cli_command], timeout=timeout)
else:
# For Bash scripts, remove the marker and run as a bash script
bash_script = re.sub(r"^#!BASH|^# Bash script", "", code, 1).strip() # noqa: B034
result = run_with_timeout(run_bash_script, [bash_script], timeout=timeout)
# Otherwise, run as Python code
else:
result = run_with_timeout(run_python_repl, [code], timeout=timeout)
if len(result) > 10000:
result = (
"The output is too long to be added to context. Here are the first 10K characters...\n"
+ result[:10000]
)
observation = f"\n<observation>{result}</observation>"
state["messages"].append(AIMessage(content=observation.strip()))
return state
def routing_function(
state: AgentState,
) -> Literal["execute", "generate", "end"]:
next_step = state.get("next_step")
if next_step == "execute":
return "execute"
elif next_step == "generate":
return "generate"
elif next_step == "end":
return "end"
else:
raise ValueError(f"Unexpected next_step: {next_step}")
def routing_function_self_critic(
state: AgentState,
) -> Literal["generate", "end"]:
next_step = state.get("next_step")
if next_step == "generate":
return "generate"
elif next_step == "end":
return "end"
else:
raise ValueError(f"Unexpected next_step: {next_step}")
def execute_self_critic(state: AgentState) -> AgentState:
if self.critic_count < test_time_scale_round:
# Generate feedback based on message history
messages = state["messages"]
feedback_prompt = f"""
Here is a reminder of what is the user requested: {self.user_task}
Examine the previous executions, reaosning, and solutions.
Critic harshly on what could be improved?
Be specific and constructive.
Think hard what are missing to solve the task.
No question asked, just feedbacks.
"""
feedback = self.llm.invoke(messages + [HumanMessage(content=feedback_prompt)])
# Add feedback as a new message
state["messages"].append(
HumanMessage(
content=f"Wait... this is not enough to solve the task. Here are some feedbacks for improvement:\n{feedback.content}"
)
)
self.critic_count += 1
state["next_step"] = "generate"
else:
state["next_step"] = "end"
return state
# Create the workflow
workflow = StateGraph(AgentState)
# Add nodes
workflow.add_node("generate", generate)
workflow.add_node("execute", execute)
if self_critic:
workflow.add_node("self_critic", execute_self_critic)
# Add conditional edges
workflow.add_conditional_edges(
"generate",
routing_function,
path_map={
"execute": "execute",
"generate": "generate",
"end": "self_critic",
},
)
workflow.add_conditional_edges(
"self_critic",
routing_function_self_critic,
path_map={"generate": "generate", "end": END},
)
else:
# Add conditional edges
workflow.add_conditional_edges(
"generate",
routing_function,
path_map={"execute": "execute", "generate": "generate", "end": END},
)
workflow.add_edge("execute", "generate")
workflow.add_edge(START, "generate")
# Compile the workflow
self.app = workflow.compile()
self.checkpointer = MemorySaver()
self.app.checkpointer = self.checkpointer
# display(Image(self.app.get_graph().draw_mermaid_png()))
def _prepare_resources_for_retrieval(self, prompt):
"""Prepare resources for retrieval and return selected resource names.
Args:
prompt: The user's query
Returns:
dict: Dictionary containing selected resource names for tools, data_lake, and libraries
"""
if not self.use_tool_retriever:
return None
# Gather all available resources
# 1. Tools from the registry
all_tools = self.tool_registry.tools if hasattr(self, "tool_registry") else []
# 2. Libraries with descriptions - use library_content_dict directly
library_descriptions = []
for lib_name, lib_desc in self.library_content_dict.items():
library_descriptions.append({"name": lib_name, "description": lib_desc})
# Use retrieval to get relevant resources
resources = {
"tools": all_tools,
"libraries": library_descriptions,
}
# Use prompt-based retrieval with the agent's LLM
selected_resources = self.retriever.prompt_based_retrieval(prompt, resources, llm=self.llm)
print("Using prompt-based retrieval with the agent's LLM")
# Extract the names from the selected resources for the system prompt
selected_resources_names = {
"tools": selected_resources["tools"],
"libraries": [lib["name"] if isinstance(lib, dict) else lib for lib in selected_resources["libraries"]],
}
return selected_resources_names
def go(self, prompt):
"""Execute the agent with the given prompt.
Args:
prompt: The user's query
"""
self.critic_count = 0
self.user_task = prompt
if self.use_tool_retriever:
selected_resources_names = self._prepare_resources_for_retrieval(prompt)
self.update_system_prompt_with_selected_resources(selected_resources_names)
inputs = {"messages": [HumanMessage(content=prompt)], "next_step": None}
config = {"recursion_limit": 500, "configurable": {"thread_id": 42}}
self.log = []
for s in self.app.stream(inputs, stream_mode="values", config=config):
message = s["messages"][-1]
out = pretty_print(message)
self.log.append(out)
return self.log, message.content
def go_stream(self, prompt, image_path=None) -> Generator[dict, None, None]:
"""Execute the agent with the given prompt and return a generator that yields each step.
This function returns a generator that yields each step of the agent's execution,
allowing for real-time monitoring of the agent's progress.
Args:
prompt: The user's query
Yields:
dict: Each step of the agent's execution containing the current message and state
"""
self.critic_count = 0
self.user_task = prompt
if image_path:
self.user_task += """
\nUser uploaded this file:\n
{image_path}
Please use it if needed.
"""
if self.use_tool_retriever:
selected_resources_names = self._prepare_resources_for_retrieval(prompt)
self.update_system_prompt_with_selected_resources(selected_resources_names)
inputs = {"messages": [HumanMessage(content=prompt)], "next_step": None}
config = {"recursion_limit": 500, "configurable": {"thread_id": 42}}
self.log = []
for s in self.app.stream(inputs, stream_mode="values", config=config):
message = s["messages"][-1]
out = pretty_print(message)
self.log.append(out)
# Yield the current step
yield {"output": out}
def update_system_prompt_with_selected_resources(self, selected_resources):
"""Update the system prompt with the selected resources."""
# Extract tool descriptions for the selected tools
tool_desc = {}
for tool in selected_resources["tools"]:
# Get the module name from the tool
if isinstance(tool, dict):
module_name = tool.get("module", None)
# If module is not specified, try to find it in the module2api
if not module_name and hasattr(self, "module2api"):
for mod, apis in self.module2api.items():
for api in apis:
if api.get("name") == tool.get("name"):
module_name = mod
# Update the tool with the module information
tool["module"] = module_name
break
if module_name:
break
# If still not found, use a default
if not module_name:
module_name = "histopath.tool.scRNA_tools" # Default to scRNA_tools as a fallback
tool["module"] = module_name
else:
module_name = getattr(tool, "module_name", None)
# If module is not specified, try to find it in the module2api
if not module_name and hasattr(self, "module2api"):
tool_name = getattr(tool, "name", str(tool))
for mod, apis in self.module2api.items():
for api in apis:
if api.get("name") == tool_name:
module_name = mod
# Set the module_name attribute
tool.module_name = module_name
break
if module_name:
break
# If still not found, use a default
if not module_name:
module_name = "histopath.tool.scRNA_tools" # Default to scRNA_tools as a fallback
tool.module_name = module_name
if module_name not in tool_desc:
tool_desc[module_name] = []
# Add the tool to the appropriate module
if isinstance(tool, dict):
# Ensure the module is included in the tool description
if "module" not in tool:
tool["module"] = module_name
tool_desc[module_name].append(tool)
else:
# Convert tool object to dictionary
tool_dict = {
"name": getattr(tool, "name", str(tool)),
"description": getattr(tool, "description", ""),
"parameters": getattr(tool, "parameters", {}),
"module": module_name, # Explicitly include the module
}
tool_desc[module_name].append(tool_dict)
self.system_prompt = self._generate_system_prompt(
tool_desc=tool_desc,
library_content_list=selected_resources["libraries"],
self_critic=getattr(self, "self_critic", False),
is_retrieval=True,
)
# Print the raw system prompt for debugging
# print("\n" + "="*20 + " RAW SYSTEM PROMPT FROM AGENT " + "="*20)
# print(self.system_prompt)
# print("="*70 + "\n")
def result_formatting(self, output_class, task_intention):
self.format_check_prompt = ChatPromptTemplate.from_messages(
[
(
"system",
(
"You are evaluateGPT, tasked with extract and parse the task output based on the history of an agent. "
"Review the entire history of messages provided. "
"Here is the task output requirement: \n"
f"'{task_intention.replace('{', '{{').replace('}', '}}')}'.\n"
),
),
("placeholder", "{messages}"),
]
)
checker_llm = self.format_check_prompt | self.llm.with_structured_output(output_class)
result = checker_llm.invoke({"messages": [("user", str(self.log))]}).dict()
return result