File size: 4,053 Bytes
ddaa1d6 3c23139 ddaa1d6 3c23139 ddaa1d6 90ea5c3 ddaa1d6 90ea5c3 3c23139 ddaa1d6 90ea5c3 b0f91a6 2dbf0a2 90ea5c3 a849a71 90ea5c3 2dbf0a2 90ea5c3 ddaa1d6 90ea5c3 ddaa1d6 2dbf0a2 ddaa1d6 90ea5c3 ddaa1d6 e1a463d ddaa1d6 e1a463d 90ea5c3 e1a463d 90ea5c3 ddaa1d6 2dbf0a2 90ea5c3 ddaa1d6 3c23139 ddaa1d6 2dbf0a2 ddaa1d6 90ea5c3 2dbf0a2 b7814c7 2dbf0a2 3c23139 90ea5c3 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 |
import os
import gradio as gr
import logging
from typing import Dict, List
from huggingface_hub import InferenceClient
from llama_index.core.tools import FunctionTool
from duckduckgo_search import DDGS
from sentence_transformers import SentenceTransformer
from sklearn.metrics.pairwise import cosine_similarity
from llama_index.llms.huggingface import HuggingFaceLLM
from llama_index.core.agent import ReActAgent
import numpy as np
import ast
import operator as op
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
HF_TOKEN = os.environ.get("HF_TOKEN")
llm = HuggingFaceLLM(
context_window=4096,
max_new_tokens=512,
generate_kwargs={"temperature": 0.7, "top_p": 0.95},
tokenizer_name="Gensyn/Qwen2.5-0.5B-Instruct",
model_name="Gensyn/Qwen2.5-0.5B-Instruct",
)
class QuestionValidation:
def __init__(self, llm_client):
self.client = llm_client
self.embedding_model = SentenceTransformer("all-MiniLM-L6-v2")
def guess_question(self, answer: str) -> str:
prompt = f"This was the answer: {answer}\nWhat question would likely have led to it?"
return self.client.complete(prompt).text.strip()
def compute_similarity(self, q1: str, q2: str) -> float:
embeddings = self.embedding_model.encode([q1, q2])
return cosine_similarity([embeddings[0]], [embeddings[1]])[0][0]
def validate_question_only(self, original_question: str, guessed_question: str) -> Dict[str, float]:
similarity = self.compute_similarity(original_question, guessed_question)
return {
"original_question": original_question,
"guessed_question": guessed_question,
"similarity": round(float(similarity), 4)
}
def search_web(query: str, max_results: int = 5) -> List[Dict[str, str]]:
try:
with DDGS() as ddgs:
return [r for r in ddgs.text(query, max_results=max_results)]
except Exception as e:
return [{"error": str(e)}]
OPERATORS = {
ast.Add: op.add,
ast.Sub: op.sub,
ast.Mult: op.mul,
ast.Div: op.truediv,
ast.Mod: op.mod,
ast.Pow: op.pow,
ast.USub: op.neg,
ast.UAdd: op.pos,
ast.FloorDiv: op.floordiv,
}
def evaluate_math_expression(expr: str) -> str:
try:
node = ast.parse(expr, mode="eval")
def _eval(node):
if isinstance(node, ast.Expression):
return _eval(node.body)
elif isinstance(node, ast.Constant):
return node.value
elif isinstance(node, ast.BinOp):
return OPERATORS[type(node.op)](_eval(node.left), _eval(node.right))
elif isinstance(node, ast.UnaryOp):
return OPERATORS[type(node.op)](_eval(node.operand))
else:
raise ValueError(f"Unsupported expression: {ast.dump(node)}")
return str(_eval(node))
except Exception as e:
return f"Error evaluating expression: {e}"
validator = QuestionValidation(llm)
validate_tool = FunctionTool.from_defaults(
fn=validator.validate_question_only,
name="validate_question",
description="Compares the similarity between two questions."
)
search_tool = FunctionTool.from_defaults(
fn=search_web,
name="search_web",
description="Searches the web using DuckDuckGo and returns results."
)
math_tool = FunctionTool.from_defaults(
fn=evaluate_math_expression,
name="math_tool",
description="Evaluates a basic Python math expression."
)
TOOLS = [validate_tool, search_tool, math_tool]
agent = ReActAgent.from_tools(
tools=TOOLS,
llm=llm,
verbose=True,
max_iterations=3
)
def respond(message: str, history: List[List[str]]) -> str:
response = agent.chat(message)
return response.response
with gr.Blocks() as app:
gr.ChatInterface(
respond,
chatbot=gr.Chatbot(),
title="Agent",
description="Ask me anything — math, web search, or guessing a question by an LLM.",
)
if __name__ == "__main__":
app.launch()
|