Gus9025's picture
history fix
5e48c39
import os
import re
import gradio as gr
from typing import List, Tuple, Optional, Dict, Any
from datetime import datetime
from dotenv import load_dotenv
from src.api.steam_api import SteamAPI
from src.utils.game_suggestions import what_to_play_today
from src.api.steam_store_api import SteamStoreAPI
# Load environment variables from .env file
load_dotenv()
# Try to import Ollama, but make it optional
# If OLLAMA_HOST is set in .env, it will be used automatically
ollama_client = None
try:
import ollama
OLLAMA_AVAILABLE = True
# Check if custom host is specified in environment
custom_host = os.getenv("OLLAMA_HOST")
if custom_host:
try:
ollama_client = ollama.Client(host=custom_host)
print(f"Ollama client initialized with host: {custom_host}")
except Exception as e:
print(f"Warning: Could not create Ollama client with {custom_host}: {e}")
print("Falling back to default localhost connection")
ollama_client = None
except ImportError:
OLLAMA_AVAILABLE = False
print("Warning: Ollama not available. Install with: pip install ollama")
# Initialize Steam Store API client (public API, no key required)
try:
steam_store_api = SteamStoreAPI()
except Exception as e:
print(f"Warning: Could not initialize Steam Store API: {e}")
steam_store_api = None
# Ollama model name
# Note: phi3:mini is a very small model (3.8B params) and may struggle with complex instructions.
# For better instruction following, consider using:
# - llama3 (8B or 70B) - excellent instruction following
# - mistral (7B) - good balance of quality and speed
# - qwen2.5 (7B or 14B) - excellent instruction following
# - phi3 (14B) - better than mini but still smaller
# You can set OLLAMA_MODEL in your .env file to override this default
OLLAMA_MODEL = os.getenv("OLLAMA_MODEL", "qwen2.5:7b")
# Hugging Face Inference API fallback
HF_INFERENCE_API_AVAILABLE = False
hf_client = None
HF_MODEL = None
try:
from huggingface_hub import InferenceClient
HF_INFERENCE_API_AVAILABLE = True
# Use a model that works well on Hugging Face Spaces
# On Hugging Face Spaces, token is available via HF_TOKEN or HUGGINGFACE_HUB_TOKEN
HF_MODEL = os.getenv("HF_MODEL", "mistralai/Mistral-7B-Instruct-v0.2")
# Try multiple environment variable names for the token
# On Hugging Face Spaces, the token is usually available
HF_TOKEN = (
os.getenv("HF_TOKEN") or
os.getenv("HUGGINGFACE_HUB_TOKEN") or
os.getenv("HUGGING_FACE_HUB_TOKEN") or
os.getenv("HF_API_TOKEN")
)
# Initialize client - always try with token first if available
# On Hugging Face Spaces, we should have a token
if HF_TOKEN:
try:
hf_client = InferenceClient(model=HF_MODEL, token=HF_TOKEN)
print(f"Hugging Face Inference API initialized with model: {HF_MODEL} (using token)")
except Exception as e:
print(f"Warning: Could not initialize with token, trying without: {e}")
# Try without explicit token (library might auto-detect)
try:
hf_client = InferenceClient(model=HF_MODEL)
print(f"Hugging Face Inference API initialized with model: {HF_MODEL} (auto-detected)")
except Exception as e2:
print(f"Warning: Could not initialize Hugging Face Inference API: {e2}")
hf_client = None
else:
# No token found, try without (for public models)
try:
hf_client = InferenceClient(model=HF_MODEL)
print(f"Hugging Face Inference API initialized with model: {HF_MODEL} (no token, public model)")
except Exception as e:
print(f"Warning: Could not initialize without token: {e}")
# Try with a smaller, publicly accessible model as fallback
try:
HF_MODEL = "gpt2" # Very basic fallback
hf_client = InferenceClient(model=HF_MODEL)
print(f"Hugging Face Inference API initialized with fallback model: {HF_MODEL}")
except Exception as e2:
print(f"Warning: Could not initialize Hugging Face Inference API with fallback: {e2}")
hf_client = None
except ImportError:
print("Warning: huggingface_hub not available. Install with: pip install huggingface_hub")
hf_client = None
except Exception as e:
print(f"Warning: Could not initialize Hugging Face Inference API: {e}")
hf_client = None
def chat_with_llm(messages: List[Dict[str, str]], max_tokens: int = 200, temperature: float = 0.7) -> str:
"""
Chat with LLM using Ollama if available, otherwise fallback to Hugging Face Inference API.
Args:
messages: List of message dicts with 'role' and 'content' keys
max_tokens: Maximum tokens to generate
temperature: Temperature for generation
Returns:
Response text from the LLM
"""
# Try Ollama first
if OLLAMA_AVAILABLE:
try:
if ollama_client:
response = ollama_client.chat(
model=OLLAMA_MODEL,
messages=messages,
options={
"temperature": temperature,
"num_predict": max_tokens
}
)
else:
response = ollama.chat(
model=OLLAMA_MODEL,
messages=messages,
options={
"temperature": temperature,
"num_predict": max_tokens
}
)
return response["message"]["content"]
except Exception as e:
print(f"Warning: Ollama call failed: {e}, trying Hugging Face Inference API")
# Fallback to Hugging Face Inference API
if HF_INFERENCE_API_AVAILABLE and hf_client:
try:
# Convert messages to the format expected by chat completion
chat_messages = []
for msg in messages:
role = msg.get("role", "user")
content = msg.get("content", "")
# Map roles: system -> system, user -> user, assistant -> assistant
if role == "system":
chat_messages.append({"role": "system", "content": content})
elif role == "user":
chat_messages.append({"role": "user", "content": content})
elif role == "assistant":
chat_messages.append({"role": "assistant", "content": content})
# Always try chat completion first (required for conversational models like Mistral)
# Mistral models support "conversational" task, not "text-generation"
# Check if model only supports conversational (like Mistral, Qwen, Llama)
model_supports_only_conversational = (
HF_MODEL and (
"mistral" in HF_MODEL.lower() or
"llama" in HF_MODEL.lower() or
"qwen" in HF_MODEL.lower() or
"chat" in HF_MODEL.lower() or
"instruct" in HF_MODEL.lower()
)
)
try:
# Limit conversation history to prevent context length issues
# Keep system message + last 10 message pairs (20 messages max)
if len(chat_messages) > 21: # 1 system + 20 conversation messages
# Keep system message and truncate to last 20 messages
system_msg = chat_messages[0] if chat_messages[0].get("role") == "system" else None
recent_messages = chat_messages[-20:] if not system_msg else [system_msg] + chat_messages[-20:]
chat_messages = recent_messages
print(f"DEBUG: Truncated conversation history from {len(chat_messages) + 1} to {len(chat_messages)} messages")
print(f"DEBUG: Sending {len(chat_messages)} messages to Hugging Face API")
response = hf_client.chat_completion(
messages=chat_messages,
max_tokens=max_tokens,
temperature=temperature
)
# Extract response text from various possible response formats
if isinstance(response, dict):
# OpenAI-style format
if "choices" in response and len(response["choices"]) > 0:
message = response["choices"][0].get("message", {})
if isinstance(message, dict):
return message.get("content", "").strip()
return str(message).strip()
# Direct message format
elif "message" in response:
msg_content = response["message"]
if isinstance(msg_content, dict):
return msg_content.get("content", "").strip()
return str(msg_content).strip()
# Generated text format
elif "generated_text" in response:
return response["generated_text"].strip()
# If response is a string, return it
elif isinstance(response, str):
return response.strip()
# Otherwise convert to string
return str(response).strip()
except Exception as chat_error:
# Chat completion failed - log the full error for debugging
error_str = str(chat_error).lower()
error_full = str(chat_error)
print(f"ERROR: Hugging Face chat_completion failed: {error_full}")
print(f"ERROR: Error type: {type(chat_error)}")
print(f"ERROR: Number of messages sent: {len(chat_messages)}")
# Check for specific error types
if "rate limit" in error_str or "429" in error_str or "quota" in error_str:
raise Exception(f"Rate limit exceeded. Please wait a moment and try again. Error: {error_full}")
elif "timeout" in error_str or "timed out" in error_str:
raise Exception(f"Request timed out. The conversation might be too long. Please try a shorter message. Error: {error_full}")
elif "context length" in error_str or "token" in error_str and "limit" in error_str:
raise Exception(f"Conversation too long. Please start a new conversation. Error: {error_full}")
# Never try text_generation for conversational-only models (Mistral, etc.)
if model_supports_only_conversational:
# Model only supports conversational/chat completion, don't try text generation
print(f"Chat completion failed for conversational-only model ({HF_MODEL}): {chat_error}")
raise Exception(f"Hugging Face API error with {HF_MODEL}: {error_full}")
# Also check error message for indicators that model only supports conversational
if "conversational" in error_str or ("text-generation" in error_str and "not supported" in error_str):
# Error indicates model only supports conversational, don't try text generation
print(f"Chat completion failed - model only supports conversational: {chat_error}")
raise chat_error
# Otherwise, try text generation as fallback (for models that support it)
print(f"Chat completion failed: {chat_error}, trying text generation as fallback")
# Format messages for instruction-tuned models
system_msg = ""
conversation = []
for msg in messages:
role = msg.get("role", "user")
content = msg.get("content", "")
if role == "system":
system_msg = content
elif role in ["user", "assistant"]:
conversation.append({"role": role, "content": content})
# Build prompt in instruction format
if system_msg:
prompt = f"<s>[INST] {system_msg}\n\n"
else:
prompt = "<s>[INST] "
# Add conversation history
for i, msg in enumerate(conversation):
if msg["role"] == "user":
if i > 0:
prompt += " [/INST] "
prompt += msg["content"]
elif msg["role"] == "assistant":
prompt += " " + msg["content"]
# Close instruction tag
prompt += " [/INST]"
# Call Hugging Face Inference API with text generation
response = hf_client.text_generation(
prompt,
max_new_tokens=max_tokens,
temperature=temperature,
return_full_text=False,
stop_sequences=["</s>", "[INST]"]
)
return response.strip()
except Exception as e:
print(f"Warning: Hugging Face Inference API call failed: {e}")
# Provide a more helpful error message - this is the actual error we want to show
error_details = str(e)
if "api_key" in error_details.lower() or "token" in error_details.lower() or "authentication" in error_details.lower():
raise Exception(f"Hugging Face Inference API authentication failed. Please check your HF_TOKEN secret in Space settings. Original error: {e}")
elif "conversational" in error_details.lower() or "text-generation" in error_details.lower() or "not supported" in error_details.lower():
raise Exception(f"Hugging Face model ({HF_MODEL}) compatibility error: {e}. The model may require a different API format or may not be accessible.")
elif "rate limit" in error_details.lower() or "quota" in error_details.lower():
raise Exception(f"Hugging Face Inference API rate limit exceeded: {e}. Please try again later or check your token permissions.")
else:
# Show the actual Hugging Face error, not the Ollama one
raise Exception(f"Hugging Face Inference API error: {e}")
# If neither is available
raise Exception("Neither Ollama nor Hugging Face Inference API is available. Please install one of them.")
def format_time_ago(timestamp: int) -> str:
"""
Format a Unix timestamp as a human-readable "time ago" string.
Args:
timestamp: Unix timestamp in seconds
Returns:
Human-readable time ago string (e.g., "2 hours ago", "3 days ago")
"""
if not timestamp:
return "unknown time ago"
now = datetime.now()
last_played = datetime.fromtimestamp(timestamp)
time_diff = now - last_played
if time_diff.days > 0:
if time_diff.days == 1:
return "1 day ago"
elif time_diff.days < 7:
return f"{time_diff.days} days ago"
elif time_diff.days < 30:
weeks = time_diff.days // 7
return f"{weeks} week{'s' if weeks > 1 else ''} ago"
else:
months = time_diff.days // 30
return f"{months} month{'s' if months > 1 else ''} ago"
elif time_diff.seconds >= 3600:
hours = time_diff.seconds // 3600
return f"{hours} hour{'s' if hours > 1 else ''} ago"
elif time_diff.seconds >= 60:
minutes = time_diff.seconds // 60
return f"{minutes} minute{'s' if minutes > 1 else ''} ago"
else:
return "just now"
def format_playtime(minutes: int) -> str:
"""
Format playtime in minutes as a human-readable string.
Args:
minutes: Playtime in minutes
Returns:
Formatted string (e.g., "2.5 hours", "45 minutes")
"""
if not minutes:
return "0 minutes"
if minutes < 60:
return f"{minutes} minute{'s' if minutes != 1 else ''}"
else:
hours = minutes / 60
if hours < 1:
return f"{minutes} minute{'s' if minutes != 1 else ''}"
elif hours == int(hours):
return f"{int(hours)} hour{'s' if hours != 1 else ''}"
else:
return f"{hours:.1f} hours"
def generate_greeting_with_ollama(steamid: str, steam_api_key: str = None) -> str:
"""
Generate a personalized greeting using Ollama based on the user's Steam data.
Args:
steamid: The Steam ID of the user
Returns:
Greeting message generated by Ollama
"""
# Gather Steam data
game_info = {}
try:
# Initialize Steam API with provided key
if not steam_api_key:
return "Hello! 👋\n\nPlease provide your Steam Web API key to get started. You can get one at https://steamcommunity.com/dev/apikey"
try:
steam_api = SteamAPI(api_key=steam_api_key)
except ValueError as e:
return f"Hello! 👋\n\nI couldn't connect to the Steam API: {str(e)}\n\nPlease check your Steam API key."
# Get recently played games
recent_result = steam_api.get_recently_played_games(steamid=steamid)
recent_response = recent_result.get("response", {})
recent_games = recent_response.get("games", [])
if not recent_games:
game_info["has_recent_games"] = False
else:
# Find the most recently played game (highest rtime_last_played)
last_game = max(recent_games, key=lambda g: g.get("rtime_last_played", 0))
game_info["has_recent_games"] = True
game_info["game_name"] = last_game.get("name", "Unknown Game")
game_info["rtime_last_played"] = last_game.get("rtime_last_played", 0)
game_info["playtime_2weeks"] = last_game.get("playtime_2weeks", 0)
# Get total playtime from owned games
playtime_forever = game_info["playtime_2weeks"] # Default to 2 weeks playtime
try:
owned_result = steam_api.get_owned_games(
steamid=steamid,
include_appinfo=False,
include_played_free_games=True
)
owned_response = owned_result.get("response", {})
owned_games = owned_response.get("games", [])
# Find the game in owned games to get total playtime
for game in owned_games:
if game.get("appid") == last_game.get("appid"):
playtime_forever = game.get("playtime_forever", game_info["playtime_2weeks"])
break
except Exception:
pass
game_info["playtime_forever"] = playtime_forever
game_info["time_ago"] = format_time_ago(game_info["rtime_last_played"])
game_info["total_playtime"] = format_playtime(playtime_forever)
except Exception as e:
return f"Hello! 👋\n\nI encountered an error while fetching your game data: {str(e)}\n\nPlease make sure your Steam ID is correct and your profile is set to public."
# Build prompt for Ollama
if game_info.get("has_recent_games"):
prompt = f"""You are a friendly Steam gaming assistant. Greet the user and tell them about their last played game.
User's last played game information:
- Game name: {game_info['game_name']}
- Last played: {game_info['time_ago']}
- Total playtime: {game_info['total_playtime']}
Write a warm, friendly greeting (2-3 sentences) that:
1. Greets them enthusiastically
2. Mentions the last game they played, when they played it, and how much time they've spent on it
3. Asks what you can do for them today
Be conversational, friendly, and enthusiastic. Use emojis sparingly. Don't be too formal."""
else:
prompt = """You are a friendly Steam gaming assistant. Greet the user.
The user doesn't have any recently played games visible (their profile might be private or they haven't played recently).
Write a warm, friendly greeting (2-3 sentences) that:
1. Greets them enthusiastically
2. Acknowledges that you couldn't see their recent games
3. Asks what you can do for them today
Be conversational, friendly, and enthusiastic. Use emojis sparingly."""
# Generate greeting with LLM (Ollama or Hugging Face)
try:
greeting = chat_with_llm(
messages=[
{
"role": "system",
"content": "You are a friendly, enthusiastic Steam gaming assistant. You help users discover games and manage their Steam library."
},
{
"role": "user",
"content": prompt
}
],
max_tokens=200,
temperature=0.8
)
return greeting
except Exception as e:
# Fallback to basic greeting if Ollama fails
if game_info.get("has_recent_games"):
return f"Hello! 👋\n\nI see the last game you played was **{game_info['game_name']}**. You played it {game_info['time_ago']}, and you've spent a total of **{game_info['total_playtime']}** on it.\n\nWhat can I do for you today? I can help you find games to play, get recommendations, and much more! 🎮"
else:
return "Hello! 👋\n\nI couldn't find any recently played games in your Steam library. This might be because your profile is private or you haven't played any games recently.\n\nWhat can I do for you today? I can help you find games to play, get recommendations, and much more! 🎮"
else:
# Fallback if Ollama is not available
if game_info.get("has_recent_games"):
return f"Hello! 👋\n\nI see the last game you played was **{game_info['game_name']}**. You played it {game_info['time_ago']}, and you've spent a total of **{game_info['total_playtime']}** on it.\n\nWhat can I do for you today? I can help you find games to play, get recommendations, and much more! 🎮"
else:
return "Hello! 👋\n\nI couldn't find any recently played games in your Steam library. This might be because your profile is private or you haven't played any games recently.\n\nWhat can I do for you today? I can help you find games to play, get recommendations, and much more! 🎮"
def on_steamid_change(steamid: str, steam_api_key: str) -> Tuple[List[List[str]], str, str, str]:
"""
Called when Steam ID is provided. Opens a chatbot window with a personalized greeting.
Returns:
Tuple of (chatbot history with greeting message, steamid for state, api_key for state, api_results_display)
"""
if not steamid or not steamid.strip():
return [], "", steam_api_key or "", "No API calls executed yet."
if not steam_api_key or not steam_api_key.strip():
return [[None, "Please provide your Steam Web API key. You can get one at https://steamcommunity.com/dev/apikey"]], steamid.strip(), "", "No API calls executed yet."
steamid = steamid.strip()
steam_api_key = steam_api_key.strip()
# Initialize Steam API with provided key
try:
steam_api = SteamAPI(api_key=steam_api_key)
except ValueError as e:
return [[None, f"I couldn't connect to the Steam API: {str(e)}\n\nPlease check your Steam API key."]], steamid, steam_api_key, "No API calls executed yet."
# Capture API calls made during greeting generation
api_results_raw = []
try:
# Get recently played games (same as in generate_greeting_with_ollama)
recent_result = steam_api.get_recently_played_games(steamid=steamid)
recent_response = recent_result.get("response", {})
recent_games = recent_response.get("games", [])
if recent_games:
# Store the API call result
api_results_raw.append({
"call": "GET_RECENTLY_PLAYED (for greeting)",
"endpoint": "GetRecentlyPlayedGames",
"raw_data": recent_result
})
# Get owned games to find total playtime
try:
owned_result = steam_api.get_owned_games(
steamid=steamid,
include_appinfo=False,
include_played_free_games=True
)
# Store the API call result
api_results_raw.append({
"call": "GET_OWNED_GAMES (for greeting)",
"endpoint": "GetOwnedGames",
"raw_data": owned_result
})
except Exception:
pass
except Exception:
pass
# Generate greeting using Ollama
greeting = generate_greeting_with_ollama(steamid, steam_api_key)
# Format API results for display
if api_results_raw:
api_results_display_text = format_api_results_for_display(api_results_raw)
else:
api_results_display_text = "No API calls executed yet."
return [[None, greeting]], steamid, steam_api_key, api_results_display_text
def parse_and_execute_api_calls(response_text: str, steamid: str, steam_api_key: str) -> Tuple[str, str, List[Dict[str, Any]]]:
"""
Parse API calls from Ollama response and execute them.
Args:
response_text: The response text that may contain API calls
steamid: User's Steam ID
steam_api_key: Steam Web API key
Returns:
Tuple of (cleaned_response_text, api_results_formatted, api_results_raw)
api_results_raw is a list of dicts with 'call', 'result', and 'raw_data' keys
"""
# Pattern to match [API]...[/API] blocks
# Use a more flexible pattern that handles various formats
# Match [API]...[/API] or [API]...[/API] with any whitespace
api_pattern = r'\[API\](.*?)\[/API\]'
# Find all API calls
api_calls = re.findall(api_pattern, response_text, re.DOTALL | re.IGNORECASE)
# Clean and deduplicate
seen = set()
unique_api_calls = []
for call in api_calls:
call_stripped = call.strip()
if call_stripped and call_stripped not in seen:
seen.add(call_stripped)
unique_api_calls.append(call_stripped)
api_calls = unique_api_calls
print(f"DEBUG parse_and_execute_api_calls: Found {len(api_calls)} API calls")
if api_calls:
print(f"DEBUG: API calls found: {api_calls}")
if not api_calls:
# No API calls found, return original text and empty results
return response_text, "", []
api_results = []
api_results_raw = []
for api_call in api_calls:
api_call = api_call.strip()
# Replace userID with actual steamid if present
if "userID" in api_call:
api_call = api_call.replace("userID", steamid)
print(f"DEBUG: Replaced userID with steamid in API call: {api_call[:100]}")
try:
# Parse GET_RECOMMENDATIONS: userID, 'survival', genre='horror'
if api_call.startswith("GET_RECOMMENDATIONS"):
# Extract parameters
params = api_call.replace("GET_RECOMMENDATIONS:", "").strip()
# Try to parse genre and other parameters
genre = None
category = None
# Look for genre parameter
genre_match = re.search(r"genre=['\"](\w+)['\"]", params)
if genre_match:
genre = genre_match.group(1)
# Look for category parameter
category_match = re.search(r"category=['\"](\w+)['\"]", params)
if category_match:
category = category_match.group(1)
# Execute recommendation
if steam_api_key:
try:
# Initialize Steam API with provided key
steam_api = SteamAPI(api_key=steam_api_key)
# Get raw API data first
recent_result = steam_api.get_recently_played_games(steamid=steamid)
owned_result = steam_api.get_owned_games(
steamid=steamid,
include_appinfo=True,
include_played_free_games=True
)
# Store raw API response
api_results_raw.append({
"call": api_call,
"endpoint": "GetRecentlyPlayedGames + GetOwnedGames",
"raw_data": {
"recently_played": recent_result,
"owned_games": owned_result
}
})
# Use what_to_play_today for formatted recommendations
recommendations = what_to_play_today(
steam_api,
steamid,
ollama_client,
OLLAMA_AVAILABLE,
OLLAMA_MODEL
)
api_results.append(f"GET_RECOMMENDATIONS result:\n{recommendations}")
except Exception as e:
api_results.append(f"GET_RECOMMENDATIONS error: {str(e)}")
api_results_raw.append({
"call": api_call,
"endpoint": "GetRecentlyPlayedGames + GetOwnedGames",
"error": str(e)
})
else:
api_results.append("GET_RECOMMENDATIONS error: Steam API not available")
api_results_raw.append({
"call": api_call,
"endpoint": "GetRecentlyPlayedGames + GetOwnedGames",
"error": "Steam API not available"
})
# Parse GET_LIBRARY: userID
elif api_call.startswith("GET_LIBRARY"):
if steam_api_key:
try:
steam_api = SteamAPI(api_key=steam_api_key)
owned_result = steam_api.get_owned_games(
steamid=steamid,
include_appinfo=True,
include_played_free_games=True
)
owned_response = owned_result.get("response", {})
owned_games = owned_response.get("games", [])
# Store raw API response
api_results_raw.append({
"call": api_call,
"endpoint": "GetOwnedGames",
"raw_data": owned_result
})
total_games = len(owned_games)
# Format games with all relevant data
game_list = []
game_list.append(f"User owns {total_games} games total. Sample games:\n")
for game in owned_games[:20]: # Show up to 20 games
appid = game.get("appid", "N/A")
name = game.get("name", "Unknown Game")
playtime_forever = game.get("playtime_forever", 0) / 60
game_list.append(f"• App ID: {appid} | Name: {name} | Total Playtime: {playtime_forever:.1f}h")
api_results.append("\n".join(game_list))
except Exception as e:
api_results.append(f"GET_LIBRARY error: {str(e)}")
api_results_raw.append({
"call": api_call,
"endpoint": "GetOwnedGames",
"error": str(e)
})
else:
api_results.append("GET_LIBRARY error: Steam API not available")
api_results_raw.append({
"call": api_call,
"endpoint": "GetOwnedGames",
"error": "Steam API not available"
})
# Parse GET_RECENT_GAMES or GET_RECENTLY_PLAYED: userID
elif api_call.startswith("GET_RECENT_GAMES") or api_call.startswith("GET_RECENTLY_PLAYED"):
if steam_api_key:
try:
steam_api = SteamAPI(api_key=steam_api_key)
recent_result = steam_api.get_recently_played_games(steamid=steamid)
recent_response = recent_result.get("response", {})
recent_games = recent_response.get("games", [])
# Store raw API response
api_results_raw.append({
"call": api_call,
"endpoint": "GetRecentlyPlayedGames",
"raw_data": recent_result
})
if recent_games:
# Format games with all relevant data
game_list = []
game_list.append(f"Found {len(recent_games)} recently played games:\n")
for game in recent_games[:10]: # Limit to 10
appid = game.get("appid", "N/A")
name = game.get("name", "Unknown Game")
playtime_2weeks = game.get("playtime_2weeks", 0) / 60
playtime_forever = game.get("playtime_forever", 0) / 60
game_list.append(f"• App ID: {appid} | Name: {name} | Total Playtime: {playtime_forever:.1f}h | Recent (2 weeks): {playtime_2weeks:.1f}h")
api_results.append("\n".join(game_list))
else:
api_results.append("GET_RECENT_GAMES result: No recently played games found.")
except Exception as e:
api_results.append(f"GET_RECENT_GAMES error: {str(e)}")
api_results_raw.append({
"call": api_call,
"endpoint": "GetRecentlyPlayedGames",
"error": str(e)
})
else:
api_results.append("GET_RECENT_GAMES error: Steam API not available")
api_results_raw.append({
"call": api_call,
"endpoint": "GetRecentlyPlayedGames",
"error": "Steam API not available"
})
else:
# Try to handle variations or unknown calls
api_results.append(f"Unknown API call: {api_call}")
api_results_raw.append({
"call": api_call,
"endpoint": "Unknown",
"error": f"Unknown API call format: {api_call}"
})
print(f"DEBUG: Unknown API call format: {api_call}")
except Exception as e:
error_msg = f"Error executing API call '{api_call}': {str(e)}"
api_results.append(error_msg)
api_results_raw.append({
"call": api_call,
"endpoint": "Unknown",
"error": str(e)
})
print(f"DEBUG: Error executing API call: {error_msg}")
# Remove API call blocks from response
cleaned_response = re.sub(api_pattern, '', response_text, flags=re.DOTALL | re.IGNORECASE).strip()
# Combine API results
api_results_text = "\n\n".join(api_results)
return cleaned_response, api_results_text, api_results_raw
def format_api_results_for_display(api_results_raw: List[Dict[str, Any]]) -> str:
"""
Format raw API results for display in the API results panel.
Displays games as a formatted list with appId, name, and playtime_forever.
Args:
api_results_raw: List of API call results with raw data
Returns:
Formatted string for display
"""
if not api_results_raw:
return "No API calls executed."
import json
formatted_output = []
for i, result in enumerate(api_results_raw, 1):
formatted_output.append(f"## API Call {i}")
formatted_output.append(f"**Call:** `{result.get('call', 'Unknown')}`")
formatted_output.append(f"**Endpoint:** `{result.get('endpoint', 'Unknown')}`")
if 'error' in result:
formatted_output.append(f"**Error:** {result['error']}")
elif 'raw_data' in result:
raw_data = result['raw_data']
# Try to extract and format games data
games_list = []
try:
# Handle different API response structures
if isinstance(raw_data, dict):
# Check for recently_played structure
if 'recently_played' in raw_data:
games_data = raw_data['recently_played'].get('response', {}).get('games', [])
games_list.extend(games_data)
# Check for owned_games structure
if 'owned_games' in raw_data:
games_data = raw_data['owned_games'].get('response', {}).get('games', [])
games_list.extend(games_data)
# Check for direct response.games structure
if 'response' in raw_data and 'games' in raw_data['response']:
games_data = raw_data['response']['games']
games_list.extend(games_data)
# If we found games, format them
if games_list:
formatted_output.append("**Games List:**")
formatted_output.append("")
formatted_output.append("| App ID | Name | Playtime 2 Weeks (hours) | Playtime Forever (hours) |")
formatted_output.append("|--------|------|------------------|------------------|")
games_list.sort(key=lambda x: x.get('playtime_2weeks', 0), reverse=True)
for game in games_list:
appid = game.get('appid', 'N/A')
name = game.get('name', 'Unknown')
playtime_2weeks_minutes = game.get('playtime_2weeks', 0)
playtime_forever_minutes = game.get('playtime_forever', 0)
playtime_2weeks_hours = playtime_2weeks_minutes / 60 if playtime_2weeks_minutes else 0
playtime_forever_hours = playtime_forever_minutes / 60 if playtime_forever_minutes else 0
formatted_output.append(f"| {appid} | {name} | {playtime_2weeks_hours:.1f} | {playtime_forever_hours:.1f} |")
formatted_output.append("")
formatted_output.append("**Raw JSON Response:**")
formatted_output.append("```json")
json_str = json.dumps(raw_data, indent=2, ensure_ascii=False)
# Truncate if too long (limit to 3000 characters for JSON)
if len(json_str) > 3000:
json_str = json_str[:3000] + "\n... (truncated - response too long)"
formatted_output.append(json_str)
formatted_output.append("```")
else:
# No games found, show raw JSON
formatted_output.append("**Raw API Response:**")
formatted_output.append("```json")
json_str = json.dumps(raw_data, indent=2, ensure_ascii=False)
if len(json_str) > 5000:
json_str = json_str[:5000] + "\n... (truncated - response too long)"
formatted_output.append(json_str)
formatted_output.append("```")
else:
formatted_output.append("**Raw API Response:**")
formatted_output.append(str(raw_data))
except Exception as e:
formatted_output.append(f"**Error formatting data:** {str(e)}")
formatted_output.append("**Raw API Response:**")
formatted_output.append("```json")
try:
json_str = json.dumps(raw_data, indent=2, ensure_ascii=False)
if len(json_str) > 5000:
json_str = json_str[:5000] + "\n... (truncated - response too long)"
formatted_output.append(json_str)
except:
formatted_output.append(str(raw_data))
formatted_output.append("```")
formatted_output.append("") # Empty line between calls
return "\n".join(formatted_output)
def detect_if_api_call_needed(message: str) -> Optional[str]:
"""
Detect if the user's message requires an API call and return the appropriate API call string.
Args:
message: User's message
Returns:
API call string if needed, None otherwise
"""
message_lower = message.lower()
# Check for recommendation requests (expanded patterns)
# Also check for genre exclusions or preferences (e.g., "not horror", "not action")
recommendation_patterns = [
'recommend', 'suggest', 'suggestion', 'what should i play', 'what to play',
'game suggestion', 'bored', 'what game', 'which game', 'help me choose',
'what can i play', 'what do you recommend', 'give me a game',
'pick a game', 'choose a game', 'find me a game', 'recommendation',
'suggest a game', 'recommend a game', 'what game should', 'which game should',
'in the mood for', 'mood for', 'want a game', 'want an game', 'want to play',
'looking for a game', 'looking for an game', 'need a game', 'need an game',
'feel like playing', 'want something', 'looking for something'
]
if any(pattern in message_lower for pattern in recommendation_patterns):
return "[API]GET_RECOMMENDATIONS: userID [/API]"
# Check for library requests
library_patterns = [
'library', 'games i own', 'my games', 'owned games', 'what games do i have',
'list my games', 'show my games', 'my library', 'games in my library'
]
if any(pattern in message_lower for pattern in library_patterns):
return "[API]GET_LIBRARY: userID [/API]"
# Check for recent games requests
recent_patterns = [
'recent', 'recently played', 'last played', 'what did i play',
'recent games', 'last games', 'what have i been playing'
]
if any(pattern in message_lower for pattern in recent_patterns):
return "[API]GET_RECENT_GAMES: userID [/API]"
return None
def chat_with_bot(message: str, history: List[List[str]], steamid: str, steam_api_key: str, api_results_display: str) -> Tuple[List[List[str]], str, str]:
"""
Handle chat messages from the user using Ollama.
Args:
message: User's message
history: Chat history
steamid: User's Steam ID (from state)
steam_api_key: Steam Web API key (from state)
Returns:
Tuple of (updated chat history, empty message string to clear input, api_results_display)
"""
if not message or not message.strip():
return history, "", api_results_display
if not steamid:
history.append([message, "I don't have your Steam ID. Please enter it first."])
return history, "", api_results_display
if not steam_api_key:
history.append([message, "I don't have your Steam API key. Please enter it first."])
return history, "", api_results_display
# Build context about user's Steam library for the assistant
context = ""
try:
if steam_api_key:
# Initialize Steam API with provided key
steam_api = SteamAPI(api_key=steam_api_key)
# Get basic info about user's library
recent_result = steam_api.get_recently_played_games(steamid=steamid)
recent_response = recent_result.get("response", {})
recent_games = recent_response.get("games", [])
if recent_games:
context = f"The user has {len(recent_games)} recently played games. "
context += f"Last played: {recent_games[0].get('name', 'Unknown')}. "
owned_result = steam_api.get_owned_games(
steamid=steamid,
include_appinfo=False,
include_played_free_games=True
)
owned_response = owned_result.get("response", {})
owned_games = owned_response.get("games", [])
if owned_games:
context += f"They own {len(owned_games)} games total. "
except Exception:
pass
# Add user message to history (will be updated with response)
history.append([message, None])
# Initialize API results display text
api_results_display_text = "No API calls executed yet."
# Generate response with LLM (Ollama or Hugging Face)
if OLLAMA_AVAILABLE or HF_INFERENCE_API_AVAILABLE:
try:
# Build messages for chat
messages = [
{
"role": "system",
"content": f"""You are a friendly, enthusiastic Steam gaming assistant. You help users discover games, get recommendations, and manage their Steam library.
User's Steam context: {context if context else "Limited information available."}
You can help with:
- Game recommendations based on their library
- Finding similar games
- Viewing recently played games
- Achievement tracking
- Game suggestions for what to play today
CRITICAL RULES - YOU MUST FOLLOW THESE EXACTLY:
1. RESPONSE LENGTH: Keep ALL responses SHORT - maximum 3-5 sentences. NO exceptions.
2. GAME DATA RESTRICTIONS:
- NEVER mention, suggest, or reference ANY game that is NOT explicitly listed in the API data provided to you
- ONLY use game names, App IDs, and playtime data from the API response
- If a game is not in the provided data, DO NOT mention it, suggest it, or reference it
- Do NOT hallucinate or invent game names
3. ANSWERING QUESTIONS PROPERLY:
- If asked "why", "motivate", "explain", or "reason" → Provide a brief explanation about WHY you suggested those games
- If asked to "list" or "show" games → Provide a formatted bullet list with game names and playtime.
- If asked to "suggest" → List 1-3 games from provided data and explain why you suggested them.
- Answer the user's question directly - don't just list games if they ask "why"
4. LIST FORMAT (when listing games):
- Game title (ONLY from provided data)
- Play time (if available in provided data)
- One line per game
Example format:
• Game Name 1 (5.2h)
• Game Name 2 (12.5h)
5. Be concise and direct.
CRITICAL API CALL RULES - YOU MUST FOLLOW THESE:
When the user asks for game recommendations, suggestions, or what to play, you MUST include an API call in your response. DO NOT ask follow-up questions. DO NOT ask for more details. Just execute the API call immediately.
API CALL FORMAT:
[API]GET_RECOMMENDATIONS: userID, genre='horror' [/API]
[API]GET_LIBRARY: userID [/API]
[API]GET_RECENT_GAMES: userID [/API]
WHEN TO USE API CALLS:
- User asks "what should I play", "recommend a game", "I'm bored", "suggest something", "what game", "which game", "suggest a game that is not X" → ALWAYS include [API]GET_RECOMMENDATIONS: userID [/API] in your response
- User says "I'm in the mood for X", "I want an X game", "looking for X game", "feel like playing X" → ALWAYS include [API]GET_RECOMMENDATIONS: userID [/API] in your response
- User asks about their library or owned games → ALWAYS include [API]GET_LIBRARY: userID [/API]
- User asks about recently played games → ALWAYS include [API]GET_RECENT_GAMES: userID [/API]
CRITICAL - DO NOT ASK QUESTIONS:
- NEVER ask "What genre do you prefer?" or "What type of game?" - just execute GET_RECOMMENDATIONS
- NEVER ask "What would you like to know?" - just execute the appropriate API call
- NEVER say "I can help you" or "I can provide recommendations" without immediately making an API call
- If user says "I'm in the mood for X", "I want an X game", "looking for X" → execute GET_RECOMMENDATIONS immediately
- If user gives ANY recommendation request (including genre preferences) → execute GET_RECOMMENDATIONS immediately, do NOT ask for more details
- NEVER respond with "Since you didn't specify" or similar - just execute the API call and provide recommendations
IMPORTANT:
- Include the API call syntax in your response when you need data
- The system will automatically execute the API call and give you the results
- After receiving API results, you will get a second chance to respond with the actual game data
- Do NOT include API call syntax in your final response to the user after you receive the data
Be conversational, helpful, and enthusiastic. When asked for recommendations, ALWAYS start with an API call. NEVER ask follow-up questions."""
}
]
# Add chat history (last 10 messages to keep context manageable)
# Filter out incomplete messages (where assistant_msg is None)
# Get all complete conversation pairs from history (excluding the current incomplete message)
complete_history = history[:-1] if len(history) > 1 else []
# Take last 10 complete pairs (20 messages total)
print(f"DEBUG: Including {len(complete_history)} conversation pairs in history")
for user_msg, assistant_msg in complete_history[-10:]:
if user_msg:
messages.append({"role": "user", "content": user_msg})
if assistant_msg: # Only add assistant messages that are complete
messages.append({"role": "assistant", "content": assistant_msg})
print(f"DEBUG: Total messages sent to Ollama: {len(messages)}")
# Add current user message
messages.append({"role": "user", "content": message})
# Check if we should automatically add an API call based on user's request
auto_api_call = detect_if_api_call_needed(message)
# If we detected a need for an API call, execute it directly without waiting for model response
if auto_api_call:
print(f"DEBUG: Auto-detected API call needed: {auto_api_call}")
# Replace userID with actual steamid in the API call
api_call_with_id = auto_api_call.replace("userID", steamid)
# Execute the API call directly
assistant_response = api_call_with_id
else:
# Get response from Ollama only if no auto API call was detected
if ollama_client:
response = ollama_client.chat(
model=OLLAMA_MODEL,
messages=messages,
options={
"temperature": 0.7,
"num_predict": 150 # Limit response length to enforce short messages
}
)
else:
response = ollama.chat(
model=OLLAMA_MODEL,
messages=messages,
options={
"temperature": 0.7,
"num_predict": 150 # Limit response length to enforce short messages
}
)
assistant_response = response["message"]["content"]
# If Ollama included an API call, use it; otherwise check if we should inject one
if '[API]' not in assistant_response:
# Check again in case the model response changed our mind
auto_api_call = detect_if_api_call_needed(message)
if auto_api_call:
print(f"DEBUG: Ollama didn't include API call, injecting: {auto_api_call}")
api_call_with_id = auto_api_call.replace("userID", steamid)
assistant_response = api_call_with_id
# Debug: Print the response to see if API calls are present
has_api_calls = '[API]' in assistant_response
print(f"DEBUG: Assistant response contains API calls: {has_api_calls}")
if has_api_calls:
print(f"DEBUG: Full response preview: {assistant_response[:500]}")
# Parse and execute API calls if any
cleaned_response, api_results, api_results_raw = parse_and_execute_api_calls(assistant_response, steamid, steam_api_key)
# Debug: Print what we found
print(f"DEBUG: Found {len(api_results_raw)} API calls")
if api_results_raw:
print(f"DEBUG: API calls: {[r.get('call', 'Unknown') for r in api_results_raw]}")
# Format API results for display
api_results_display_text = format_api_results_for_display(api_results_raw)
# If API calls were executed, get a final response from Ollama with the results
# Check if we have API results (even if empty, we still want to respond)
if api_results_raw:
# We have API call results (even if they're errors or empty)
# If api_results is empty but we have raw data, still generate a response
if not api_results:
# API call succeeded but returned no data - create a helpful message
api_results = ["No game data was returned from the API. This might mean your Steam profile is private or you have no games."]
# Build follow-up message with API results
# Include the full conversation history up to this point
follow_up_messages = [
{
"role": "system",
"content": f"""You are a friendly, enthusiastic Steam gaming assistant. You help users discover games, get recommendations, and manage their Steam library.
User's Steam context: {context if context else "Limited information available."}
You can help with:
- Game recommendations based on their library
- Finding similar games
- Viewing recently played games
- Achievement tracking
- Game suggestions for what to play today
CRITICAL RULES - YOU MUST FOLLOW THESE EXACTLY:
1. RESPONSE LENGTH: Keep ALL responses SHORT - maximum 3-5 sentences. NO exceptions.
2. GAME DATA RESTRICTIONS:
- NEVER mention, suggest, or reference ANY game that is NOT explicitly listed in the API data provided to you
- ONLY use game names, App IDs, and playtime data from the API response
- If a game is not in the provided data, DO NOT mention it, suggest it, or reference it
- Do NOT hallucinate or invent game names
3. ANSWERING QUESTIONS PROPERLY:
- If asked "why", "motivate", "explain", or "reason" → Provide a brief explanation about WHY you suggested those games
- If asked to "list" or "show" games → Provide a formatted bullet list with game names and playtime
- If asked to "suggest" → List 1-3 games from provided data and explain why you suggested them.
- Answer the user's question directly - don't just list games if they ask "why"
4. LIST FORMAT (when listing games):
- Game title (ONLY from provided data)
- Play time (if available in provided data)
- One line per game
Example format:
• Game Name 1 (5.2h)
• Game Name 2 (12.5h)
5. Be concise and direct.
Be conversational, helpful, and enthusiastic. Continue the conversation naturally based on the API results provided. Remember: SHORT responses only! ONLY mention games that are in the provided data!"""
}
]
# Add the conversation history (excluding the current incomplete message)
# Get all complete conversation pairs from history
complete_history = history[:-1] if len(history) > 1 else []
# Take last 10 complete pairs to keep context manageable
print(f"DEBUG: Follow-up - Including {len(complete_history)} conversation pairs in history")
for user_msg, assistant_msg in complete_history[-10:]:
if user_msg:
follow_up_messages.append({"role": "user", "content": user_msg})
if assistant_msg:
follow_up_messages.append({"role": "assistant", "content": assistant_msg})
print(f"DEBUG: Follow-up - Total messages sent to Ollama: {len(follow_up_messages)}")
# Add the current user message
follow_up_messages.append({"role": "user", "content": message})
# Add the assistant's initial response (with API calls removed)
if cleaned_response.strip():
follow_up_messages.append({
"role": "assistant",
"content": cleaned_response
})
# Parse genre exclusions from user message (e.g., "not horror", "not action")
excluded_genres = []
message_lower = message.lower()
genre_patterns = {
'horror': ['horror', 'scary', 'frightening', 'terror'],
'action': ['action', 'combat', 'fighting'],
'adventure': ['adventure', 'exploration'],
'rpg': ['rpg', 'role-playing', 'role playing'],
'strategy': ['strategy', 'tactical', 'tactics'],
'simulation': ['simulation', 'sim', 'simulator'],
'sports': ['sports', 'sport'],
'racing': ['racing', 'race', 'driving'],
'puzzle': ['puzzle', 'brain', 'logic'],
'indie': ['indie', 'independent'],
'casual': ['casual', 'relaxing'],
'shooter': ['shooter', 'fps', 'first-person', 'first person'],
'platformer': ['platformer', 'platform'],
'survival': ['survival', 'survive']
}
for genre, keywords in genre_patterns.items():
for keyword in keywords:
# Check for "not [genre]" patterns
if f'not {keyword}' in message_lower or f'no {keyword}' in message_lower or f'without {keyword}' in message_lower:
if genre not in excluded_genres:
excluded_genres.append(genre)
# Extract all games from the API results
all_games = []
for result in api_results_raw:
if 'raw_data' in result:
raw_data = result['raw_data']
# Extract games from various structures
games = []
if isinstance(raw_data, dict):
if 'recently_played' in raw_data:
games.extend(raw_data.get('recently_played', {}).get('response', {}).get('games', []))
if 'owned_games' in raw_data:
games.extend(raw_data.get('owned_games', {}).get('response', {}).get('games', []))
if 'response' in raw_data and 'games' in raw_data['response']:
games.extend(raw_data['response']['games'])
all_games.extend(games)
# Filter games by excluded genres if any
filtered_games = []
if excluded_genres and all_games and steam_store_api:
# Get app IDs for games
appids = [game.get('appid') for game in all_games if game.get('appid')]
if appids:
try:
# Fetch genre information from Steam Store API (batch in chunks of 20)
game_genres = {}
for i in range(0, len(appids), 20):
chunk = appids[i:i+20]
try:
details = steam_store_api.get_app_details(chunk)
for appid_str, app_data in details.items():
if app_data.get('success') and 'data' in app_data:
data = app_data['data']
# Extract genres from tags or categories
genres = []
if 'genres' in data:
genres = [g.get('description', '').lower() for g in data.get('genres', [])]
elif 'categories' in data:
# Sometimes genres are in categories
categories = [c.get('description', '').lower() for c in data.get('categories', [])]
genres = categories
game_genres[int(appid_str)] = genres
except Exception as e:
print(f"DEBUG: Error fetching genres for chunk: {e}")
continue
# Filter games based on excluded genres
for game in all_games:
appid = game.get('appid')
if appid in game_genres:
game_genre_list = game_genres[appid]
# Check if any excluded genre is in the game's genres
should_exclude = False
for excluded_genre in excluded_genres:
if any(excluded_genre in genre or genre in excluded_genre for genre in game_genre_list):
should_exclude = True
break
if not should_exclude:
filtered_games.append(game)
else:
# If we couldn't get genre info, include the game (better to show something than nothing)
filtered_games.append(game)
except Exception as e:
print(f"DEBUG: Error filtering games by genre: {e}")
# If filtering fails, use all games
filtered_games = all_games
else:
filtered_games = all_games
else:
filtered_games = all_games
# Extract game names for validation
game_names_in_data = set()
for game in filtered_games:
name = game.get('name', '').strip()
if name:
game_names_in_data.add(name)
games_list_str = ", ".join(sorted(game_names_in_data)) if game_names_in_data else "No games found"
# Regenerate formatted_api_data with filtered games if genre filtering was applied
if excluded_genres and filtered_games != all_games:
# Reformat the data to only include filtered games
filtered_api_results = []
for game in filtered_games[:20]: # Limit to 20 for display
appid = game.get('appid', 'N/A')
name = game.get('name', 'Unknown Game')
playtime_forever = game.get('playtime_forever', game.get('playtime_2weeks', 0)) / 60
filtered_api_results.append(f"• App ID: {appid} | Name: {name} | Total Playtime: {playtime_forever:.1f}h")
formatted_api_data = f"Found {len(filtered_games)} games (excluding {', '.join(excluded_genres)}):\n" + "\n".join(filtered_api_results)
else:
# Use original formatted data
formatted_api_data = "\n\n".join(api_results)
# Build a very explicit instruction message
instruction_text = f"""I executed the API calls you requested. Here is the game data:
{formatted_api_data}
CRITICAL INSTRUCTIONS - YOU MUST FOLLOW THESE EXACTLY:
1. ALLOWED GAMES LIST (ONLY mention these games):
{games_list_str}
2. STRICT RULES:
- You can ONLY mention games from the list above
- If a game is NOT in the list above, you CANNOT mention it, suggest it, or reference it
- If asked about a game not in the list, say: "I don't have information about that game in your library"
- Use EXACT game names from the data above (copy them exactly)
- Use EXACT playtime values from the data above
3. RESPONSE FORMAT:
- Keep responses SHORT (3-5 sentences maximum)
- If asked to list games, use bullet format: • Game Name (X.Xh total playtime)
- If asked to explain or motivate, provide a brief explanation (2-3 sentences)
- If asked to "suggest" or "recommend" → IMMEDIATELY provide 1-3 game suggestions from the allowed list
- If user mentions genre preferences (e.g., "not horror", "not action") → Filter suggestions accordingly but STILL provide recommendations
- Answer the user's question directly - don't just list games if they ask "why"
- NEVER ask follow-up questions - just provide recommendations from the available games
4. EXAMPLES:
- User asks "Suggest a game" → List 1-3 games from the allowed list with playtime
- User asks "Suggest a game that is not horror" → List 1-3 games from the allowed list that are NOT horror games
- User asks "Why did you suggest X?" → Explain your reasoning using only games from the allowed list
- User asks about a game not in the list → Say "I don't have information about that game in your library"
CRITICAL: When user asks for recommendations, DO NOT ask "What genre do you prefer?" or "What type of game?" - just provide suggestions from the available games immediately.
REMEMBER: The ONLY games that exist in this conversation are: {games_list_str}"""
follow_up_messages.append({
"role": "user",
"content": instruction_text
})
# Get final response from LLM (Ollama or Hugging Face)
assistant_response = chat_with_llm(
messages=follow_up_messages,
max_tokens=200,
temperature=0.7
)
# Clean any API call syntax that might have leaked into the final response
api_pattern = r'\[API\].*?\[/API\]'
assistant_response = re.sub(api_pattern, '', assistant_response, flags=re.DOTALL | re.IGNORECASE).strip()
# Keep the API results display from earlier
else:
# No API calls were executed or they failed
if not api_results_raw:
# No API calls were made at all
if cleaned_response and cleaned_response.strip():
assistant_response = cleaned_response
else:
# Fallback: generate a response even without API data
assistant_response = "I'm processing your request. Please wait a moment..."
api_results_display_text = "No API calls executed in this message."
else:
# API calls were made but no results (errors occurred)
if cleaned_response and cleaned_response.strip():
assistant_response = cleaned_response
else:
# Generate a helpful error message
error_messages = [r.get('error', 'Unknown error') for r in api_results_raw if 'error' in r]
if error_messages:
assistant_response = f"I encountered an error while fetching your game data: {error_messages[0]}. Please check your Steam API key and try again."
else:
assistant_response = "I couldn't retrieve your game data. Please make sure your Steam profile is public and your API key is correct."
# Ensure we always have a response
if not assistant_response or not assistant_response.strip():
assistant_response = "I'm having trouble processing your request. Please try again or check your Steam API key."
# Final cleanup: remove any API call syntax that might have leaked through (in all code paths)
api_pattern = r'\[API\].*?\[/API\]'
assistant_response = re.sub(api_pattern, '', assistant_response, flags=re.DOTALL | re.IGNORECASE).strip()
history[-1][1] = assistant_response
print(f"DEBUG: Final API results display text length: {len(api_results_display_text)}")
print(f"DEBUG: Final API results display preview: {api_results_display_text[:300]}")
except Exception as e:
error_msg = str(e)
# Extract the actual error - if it contains both Ollama and Hugging Face errors,
# prioritize showing the Hugging Face error since that's what we're actually using
if "Hugging Face" in error_msg or "HF_TOKEN" in error_msg:
# This is a Hugging Face error - show it directly
history[-1][1] = f"I encountered an error with the AI model: {error_msg}\n\nPlease check:\n- Your HF_TOKEN secret is set in Space settings\n- The token has proper permissions\n- The model ({HF_MODEL}) is accessible"
elif "Failed to connect to Ollama" in error_msg:
# Ollama failed, but we should have fallen back to Hugging Face
# If we're seeing this, it means Hugging Face also failed
# Try to extract any Hugging Face error from the message, or show a generic message
if "Hugging Face" in error_msg:
# There's a Hugging Face error in there, extract it
hf_error_part = error_msg.split("Hugging Face")[-1] if "Hugging Face" in error_msg else error_msg
history[-1][1] = f"I encountered an error with the AI model: Hugging Face{hf_error_part}\n\nPlease check your HF_TOKEN secret in Space settings."
else:
# Only Ollama error shown, but Hugging Face must have failed silently
history[-1][1] = f"I encountered an error with the Hugging Face Inference API.\n\nPlease check:\n- Your HF_TOKEN secret is set in Space settings (Settings → Variables and secrets)\n- The token is valid and has proper permissions\n- The model ({HF_MODEL}) is accessible\n\nIf the issue persists, try setting a different model in HF_MODEL (e.g., 'mistralai/Mistral-7B-Instruct-v0.2')"
elif "Ollama" in error_msg and "Hugging Face" not in error_msg:
# Only Ollama mentioned, but we should be using Hugging Face
# This means Hugging Face failed without a clear error
history[-1][1] = f"I encountered an error with the Hugging Face Inference API.\n\nPlease check:\n- Your HF_TOKEN secret is set in Space settings\n- The token has proper permissions\n- The model ({HF_MODEL}) is accessible"
else:
# Regular error, show as-is
history[-1][1] = f"I encountered an error while processing your message: {error_msg}\n\nPlease try again or check your HF_TOKEN secret in Space settings."
api_results_display_text = "Error occurred during API call processing."
else:
history[-1][1] = "I'm sorry, but neither Ollama nor Hugging Face Inference API is available. Please ensure one of them is configured."
api_results_display_text = "LLM not available."
return history, "", api_results_display_text
# Create Gradio interface with Blocks for more control
with gr.Blocks(title="Steam Game Recommendations") as demo:
gr.Markdown("# 🎮 Steam Game Recommendations")
gr.Markdown("Enter your Steam ID and API key to get personalized game suggestions and view your recently played games.")
gr.Markdown("**Note:** This application will not store any user data acquired from the Steam API.")
# Store steamid and api_key in state
steamid_state = gr.State(value="")
steam_api_key_state = gr.State(value="")
with gr.Row():
steamid_input = gr.Textbox(
label="Steam ID",
placeholder="Enter 64-bit Steam ID (e.g., 76561198000000000)",
info="You can find your Steam ID at https://steamid.io/",
scale=1
)
steam_api_key_input = gr.Textbox(
label="Steam Web API Key",
placeholder="Enter your Steam Web API key",
info="Get your API key at https://steamcommunity.com/dev/apikey",
type="password",
scale=1
)
submit_btn = gr.Button("Submit", variant="primary", scale=1)
with gr.Row():
with gr.Column(scale=1):
chatbot = gr.Chatbot(
label="Chat with your Steam Assistant",
height=500,
show_copy_button=True
)
with gr.Column(scale=1):
api_results_display = gr.Markdown(
label="Steam API Results (Raw Data)",
value="API results will appear here when API calls are made.",
height=500
)
with gr.Row():
msg_input = gr.Textbox(
label="Message",
placeholder="Type your message here...",
scale=4,
container=False
)
send_btn = gr.Button("Send", variant="primary", scale=1)
# Auto-trigger when Steam ID is entered
steamid_input.submit(
fn=on_steamid_change,
inputs=[steamid_input, steam_api_key_input],
outputs=[chatbot, steamid_state, steam_api_key_state, api_results_display]
)
# Also trigger on button click
submit_btn.click(
fn=on_steamid_change,
inputs=[steamid_input, steam_api_key_input],
outputs=[chatbot, steamid_state, steam_api_key_state, api_results_display]
)
# Handle chat messages
def chat_wrapper(message, history, steamid, steam_api_key, api_display):
return chat_with_bot(message, history, steamid, steam_api_key, api_display)
msg_input.submit(
fn=chat_wrapper,
inputs=[msg_input, chatbot, steamid_state, steam_api_key_state, api_results_display],
outputs=[chatbot, msg_input, api_results_display]
)
send_btn.click(
fn=chat_wrapper,
inputs=[msg_input, chatbot, steamid_state, steam_api_key_state, api_results_display],
outputs=[chatbot, msg_input, api_results_display]
)
gr.Examples(
examples=[["76561198000000000"]],
inputs=[steamid_input]
)
if __name__ == "__main__":
demo.launch(mcp_server=True)