Spaces:
Build error
Build error
| from smolagents import CodeAgent, DuckDuckGoSearchTool, HfApiModel, load_tool, tool | |
| import datetime | |
| import requests | |
| import pytz | |
| import yaml | |
| from tools.final_answer import FinalAnswerTool | |
| import os | |
| from huggingface_hub import InferenceClient | |
| from Gradio_UI import GradioUI | |
| from dotenv import load_dotenv | |
| import random # Added for tell_joke() | |
| load_dotenv() | |
| hf_token = os.getenv("HF_TOKEN") | |
| alpha_vantage_api_key = os.getenv( | |
| "ALPHA_VANTAGE_API_KEY") # Load Alpha Vantage API key | |
| # Custom tool example | |
| def my_custom_tool(arg1: str, arg2: int) -> str: | |
| """A tool that does nothing yet | |
| Args: | |
| arg1: the first argument | |
| arg2: the second argument | |
| """ | |
| return "What magic will you build ?" | |
| def get_weather_report_at_coordinates(coordinates, date_time): | |
| # Dummy function, returns [temperature in °C, risk of rain 0-1, wave height in m] | |
| return [28.0, 0.35, 0.85] | |
| def convert_location_to_coordinates(location): | |
| # Returns dummy coordinates | |
| return [3.3, -42.0] | |
| def get_weather_api(location: str, date_time: str) -> str: | |
| """ | |
| Returns the weather report. | |
| Args: | |
| location: the name of the place that you want the weather for. | |
| date_time: the date and time for which you want the report. | |
| """ | |
| lon, lat = convert_location_to_coordinates(location) | |
| date_time = datetime.datetime.strptime(date_time, "%Y-%m-%d %H:%M:%S") | |
| return str(get_weather_report_at_coordinates((lon, lat), date_time)) | |
| user_data = {} | |
| def update_personality(name: str, personality: str) -> str: | |
| """Asks the user about his personality before predicting his future""" | |
| user_data[name] = personality | |
| return f"Great! Thanks {name}, I've updated your personality traits. Now ask me about your future." | |
| client = InferenceClient(model="Qwen/Qwen2.5-Coder-32B-Instruct") | |
| def predict_future_with_model(name: str, personality: str) -> str: | |
| """ | |
| Returns a fun and futuristic AI-generated prediction. | |
| Args: | |
| name: The user's name. | |
| personality: A description of the user's personality traits. | |
| """ | |
| prompt = f""" | |
| Given the name '{name}' and personality traits '{personality}', generate a fun, futuristic prediction for their life. | |
| Your response should include: | |
| - A career path | |
| - A major life event | |
| - The number of kids they might have | |
| - A quirky or funny twist related to their personality | |
| Keep it engaging, futuristic, and a little humorous! | |
| """ | |
| try: | |
| response = client.text_generation(prompt, max_new_tokens=100) | |
| return f"🔮 **Future Prediction for {name}:**\n{response}" | |
| except Exception as e: | |
| return f"Oops! I couldn't predict the future this time. Error: {str(e)}" | |
| def get_current_time_in_timezone(timezone: str) -> str: | |
| """A tool that fetches the current local time in a specified timezone. | |
| Args: | |
| timezone: A string representing a valid timezone (e.g., 'America/New_York'). | |
| """ | |
| try: | |
| tz = pytz.timezone(timezone) | |
| local_time = datetime.datetime.now(tz).strftime("%Y-%m-%d %H:%M:%S") | |
| return f"The current local time in {timezone} is: {local_time}" | |
| except Exception as e: | |
| return f"Error fetching time for timezone '{timezone}': {str(e)}" | |
| def get_financial_price(ticker: str) -> str: | |
| """ | |
| Fetches the real-time price of a stock, cryptocurrency, or financial product using Alpha Vantage API. | |
| Args: | |
| ticker: The ticker symbol (e.g., 'AAPL' for Apple stock, 'BTCUSD' for Bitcoin/USD). | |
| """ | |
| if not alpha_vantage_api_key: | |
| return "Error: Alpha Vantage API key not found. Please set ALPHA_VANTAGE_API_KEY in your .env file." | |
| # Determine if it's a crypto or stock based on ticker format (simplified logic) | |
| is_crypto = len(ticker) > 5 and ticker.endswith( | |
| ("USD", "BTC", "ETH")) # e.g., BTCUSD, ETHBTC | |
| if is_crypto: | |
| url = f"https://www.alphavantage.co/query?function=CURRENCY_EXCHANGE_RATE&from_currency={ticker[:3]}&to_currency={ticker[3:]}&apikey={alpha_vantage_api_key}" | |
| else: | |
| url = f"https://www.alphavantage.co/query?function=GLOBAL_QUOTE&symbol={ticker}&apikey={alpha_vantage_api_key}" | |
| try: | |
| response = requests.get(url) | |
| data = response.json() | |
| if is_crypto: | |
| if "Realtime Currency Exchange Rate" in data: | |
| price = data["Realtime Currency Exchange Rate"]["5. Exchange Rate"] | |
| return f"The current price of {ticker[:3]} in {ticker[3:]} is {float(price):.2f} {ticker[3:]}." | |
| else: | |
| return f"Error: Could not fetch crypto price for {ticker}. Check the ticker symbol." | |
| else: | |
| if "Global Quote" in data and "05. price" in data["Global Quote"]: | |
| price = data["Global Quote"]["05. price"] | |
| return f"The current price of {ticker} is ${float(price):.2f} USD." | |
| else: | |
| return f"Error: Could not fetch stock price for {ticker}. Check the ticker symbol or API limits." | |
| except Exception as e: | |
| return f"Error fetching price for {ticker}: {str(e)}" | |
| def tell_joke() -> str: | |
| """Returns a random stored joke.""" | |
| jokes = [ | |
| "Why do we tell actors to 'break a leg?' Because every play has a cast.", | |
| "I told my wife she should embrace her mistakes. She gave me a hug.", | |
| "I'm reading a book on the history of glue. I just can't seem to put it down.", | |
| "I would tell you a joke about an elevator, but it's an uplifting experience.", | |
| "I told my computer I needed a break and now it won't stop sending me vacation ads.", | |
| "I used to play piano by ear, but now I use my hands" | |
| ] | |
| return random.choice(jokes) | |
| final_answer = FinalAnswerTool() | |
| model = HfApiModel( | |
| max_tokens=2096, | |
| temperature=0.5, | |
| model_id='Qwen/Qwen2.5-Coder-32B-Instruct', | |
| custom_role_conversions=None, | |
| ) | |
| image_generation_tool = load_tool( | |
| "agents-course/text-to-image", trust_remote_code=True) | |
| with open("prompts.yaml", 'r') as stream: | |
| prompt_templates = yaml.safe_load(stream) | |
| agent = CodeAgent( | |
| model=model, | |
| tools=[ | |
| my_custom_tool, | |
| get_weather_api, | |
| predict_future_with_model, | |
| get_current_time_in_timezone, | |
| get_financial_price, # New tool added here | |
| tell_joke, | |
| final_answer | |
| ], | |
| max_steps=6, | |
| verbosity_level=1, | |
| grammar=None, | |
| planning_interval=None, | |
| name=None, | |
| description=None, | |
| prompt_templates=prompt_templates | |
| ) | |
| GradioUI(agent).launch() | |