Spaces:
Sleeping
Sleeping
| ''' | |
| Fintelligence (C) 2024 | |
| Intelligent Finance | |
| Released in Apache 2.0 license | |
| ''' | |
| import streamlit as st | |
| import os | |
| from openai import OpenAI | |
| import json | |
| # Initialize the client | |
| client = OpenAI( | |
| base_url="https://api-inference.huggingface.co/v1", | |
| api_key=os.environ.get('HUGGINGFACE_API_TOKEN') | |
| ) | |
| # Model configuration | |
| MODEL = "HuggingFaceH4/zephyr-7b-beta" | |
| # Define a custom function | |
| def get_current_weather(location, unit="celsius"): | |
| """Get the current weather in a given location""" | |
| weather_info = { | |
| "location": location, | |
| "temperature": "22", | |
| "unit": unit, | |
| "forecast": ["sunny", "windy"], | |
| } | |
| return weather_info | |
| # Set up the Streamlit app | |
| st.title("Chatbot with Hugging Face and Zephyr-7B-Beta") | |
| # Initialize chat history | |
| if "messages" not in st.session_state: | |
| st.session_state.messages = [] | |
| # Display chat messages from history on app rerun | |
| for message in st.session_state.messages: | |
| with st.chat_message(message["role"]): | |
| st.markdown(message["content"]) | |
| # Accept user input | |
| if prompt := st.chat_input("What is up?"): | |
| # Add user message to chat history | |
| st.session_state.messages.append({"role": "user", "content": prompt}) | |
| # Display user message in chat message container | |
| with st.chat_message("user"): | |
| st.markdown(prompt) | |
| # Prepare the messages for the API call | |
| messages = [ | |
| {"role": msg["role"], "content": msg["content"]} | |
| for msg in st.session_state.messages | |
| ] | |
| # Call the Hugging Face API | |
| try: | |
| response = client.chat.completions.create( | |
| model=MODEL, | |
| messages=messages, | |
| functions=[ | |
| { | |
| "name": "get_current_weather", | |
| "description": "Get the current weather in a given location", | |
| "parameters": { | |
| "type": "object", | |
| "properties": { | |
| "location": {"type": "string"}, | |
| "unit": {"type": "string", "enum": ["celsius", "fahrenheit"]}, | |
| }, | |
| "required": ["location"], | |
| }, | |
| } | |
| ], | |
| function_call="auto", | |
| ) | |
| # Process the response | |
| assistant_response = response.choices[0].message.content | |
| function_call = response.choices[0].message.function_call | |
| if function_call: | |
| function_name = function_call.name | |
| function_args = json.loads(function_call.arguments) | |
| if function_name == "get_current_weather": | |
| function_response = get_current_weather(**function_args) | |
| # Call the API again with the function response | |
| messages.append({"role": "function", "name": function_name, "content": json.dumps(function_response)}) | |
| final_response = client.chat.completions.create( | |
| model=MODEL, | |
| messages=messages | |
| ) | |
| assistant_response = final_response.choices[0].message.content | |
| # Display assistant response in chat message container | |
| with st.chat_message("assistant"): | |
| st.markdown(assistant_response) | |
| # Add assistant response to chat history | |
| st.session_state.messages.append({"role": "assistant", "content": assistant_response}) | |
| except Exception as e: | |
| st.error(f"An error occurred: {str(e)}") | |
| # Note: Token usage information is not available with the Hugging Face API | |
| st.sidebar.write("Note: Token usage information is not available with the Hugging Face API.") |