from smolagents import ( CodeAgent, DuckDuckGoSearchTool, ) from ..llm import SwitchableOpenAIModel from openinference.instrumentation.smolagents import SmolagentsInstrumentor from langfuse import get_client import os # Get keys for your project from the project settings page: https://cloud.langfuse.com os.environ["LANGFUSE_PUBLIC_KEY"] = "pk-lf-..." os.environ["LANGFUSE_SECRET_KEY"] = "sk-lf-..." os.environ["LANGFUSE_HOST"] = "https://cloud.langfuse.com" # 🇪🇺 EU region langfuse = get_client() # Verify connection if langfuse.auth_check(): print("Langfuse client is authenticated and ready!") else: print("Authentication failed. Please check your credentials and host.") SmolagentsInstrumentor().instrument() prompt = """" You are a general AI assistant. I will ask you a question. Your answer must follow these rules: 1. If the answer is a number, do not use commas or units such as $ or percent sign unless specified otherwise. 2. If the answer is a string, do not use articles or abbreviations (e.g. for cities), and write digits in plain text unless specified otherwise. 3. If the answer is a comma-separated list, apply the above rules to each element. Reply with only the answer and nothing else. """ API_URL = "https://api-inference.modelscope.cn/v1" API_KEY = "ms-..." search_tool = DuckDuckGoSearchTool() llm = SwitchableOpenAIModel(model_list=[ 'deepseek-ai/DeepSeek-V3.2', 'PaddlePaddle/ERNIE-4.5-300B-A47B-PT', 'deepseek-ai/DeepSeek-V3.1', 'Qwen/Qwen3-Coder-480B-A35B-Instruct', 'deepseek-ai/DeepSeek-R1-0528', 'Qwen/Qwen3-235B-A22B-Thinking-2507', 'Qwen/Qwen3-235B-A22B-Instruct-2507', 'Qwen/Qwen3-235B-A22B', 'MiniMax/MiniMax-M1-80k', 'LLM-Research/Llama-4-Maverick-17B-128E-Instruct', ], api_base=API_URL, api_key=API_KEY, ) vllm = SwitchableOpenAIModel(model_list=[ 'Qwen/Qwen3-VL-235B-A22B-Instruct', 'Shanghai_AI_Laboratory/Intern-S1', 'OpenGVLab/InternVL3_5-241B-A28B', 'stepfun-ai/step3' ], api_base=API_URL, api_key=API_KEY, ) base_agent = CodeAgent(tools=[search_tool], model=llm, # stream_outputs=True, additional_authorized_imports=[ "math", "numpy", "pandas", "requests", "json", "re", "time", "datetime", "os", "openpyxl", "csv", "bs4"], instructions=prompt, # add_base_tools=True, ) vll_agent = CodeAgent(tools=[search_tool], model=vllm, # stream_outputs=True, additional_authorized_imports=[ "math", "numpy", "pandas", "requests", "json", "re", "time", "datetime", "os", "openpyxl", "csv", "bs4"], instructions=prompt, # add_base_tools=True, ) if __name__ == '__main__': # print(base_agent.prompt_templates["system_prompt"]) base_agent.run('现在最新的宝可梦是什么版本?')