|
|
import os |
|
|
import re |
|
|
import gradio as gr |
|
|
from typing import List, Tuple, Optional, Dict, Any |
|
|
from datetime import datetime |
|
|
from dotenv import load_dotenv |
|
|
from src.api.steam_api import SteamAPI |
|
|
from src.utils.game_suggestions import what_to_play_today |
|
|
from src.api.steam_store_api import SteamStoreAPI |
|
|
|
|
|
|
|
|
load_dotenv() |
|
|
|
|
|
|
|
|
|
|
|
ollama_client = None |
|
|
try: |
|
|
import ollama |
|
|
OLLAMA_AVAILABLE = True |
|
|
|
|
|
custom_host = os.getenv("OLLAMA_HOST") |
|
|
if custom_host: |
|
|
try: |
|
|
ollama_client = ollama.Client(host=custom_host) |
|
|
print(f"Ollama client initialized with host: {custom_host}") |
|
|
except Exception as e: |
|
|
print(f"Warning: Could not create Ollama client with {custom_host}: {e}") |
|
|
print("Falling back to default localhost connection") |
|
|
ollama_client = None |
|
|
except ImportError: |
|
|
OLLAMA_AVAILABLE = False |
|
|
print("Warning: Ollama not available. Install with: pip install ollama") |
|
|
|
|
|
|
|
|
try: |
|
|
steam_store_api = SteamStoreAPI() |
|
|
except Exception as e: |
|
|
print(f"Warning: Could not initialize Steam Store API: {e}") |
|
|
steam_store_api = None |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
OLLAMA_MODEL = os.getenv("OLLAMA_MODEL", "qwen2.5:7b") |
|
|
|
|
|
|
|
|
HF_INFERENCE_API_AVAILABLE = False |
|
|
hf_client = None |
|
|
HF_MODEL = None |
|
|
try: |
|
|
from huggingface_hub import InferenceClient |
|
|
HF_INFERENCE_API_AVAILABLE = True |
|
|
|
|
|
|
|
|
HF_MODEL = os.getenv("HF_MODEL", "mistralai/Mistral-7B-Instruct-v0.2") |
|
|
|
|
|
|
|
|
|
|
|
HF_TOKEN = ( |
|
|
os.getenv("HF_TOKEN") or |
|
|
os.getenv("HUGGINGFACE_HUB_TOKEN") or |
|
|
os.getenv("HUGGING_FACE_HUB_TOKEN") or |
|
|
os.getenv("HF_API_TOKEN") |
|
|
) |
|
|
|
|
|
|
|
|
|
|
|
if HF_TOKEN: |
|
|
try: |
|
|
hf_client = InferenceClient(model=HF_MODEL, token=HF_TOKEN) |
|
|
print(f"Hugging Face Inference API initialized with model: {HF_MODEL} (using token)") |
|
|
except Exception as e: |
|
|
print(f"Warning: Could not initialize with token, trying without: {e}") |
|
|
|
|
|
try: |
|
|
hf_client = InferenceClient(model=HF_MODEL) |
|
|
print(f"Hugging Face Inference API initialized with model: {HF_MODEL} (auto-detected)") |
|
|
except Exception as e2: |
|
|
print(f"Warning: Could not initialize Hugging Face Inference API: {e2}") |
|
|
hf_client = None |
|
|
else: |
|
|
|
|
|
try: |
|
|
hf_client = InferenceClient(model=HF_MODEL) |
|
|
print(f"Hugging Face Inference API initialized with model: {HF_MODEL} (no token, public model)") |
|
|
except Exception as e: |
|
|
print(f"Warning: Could not initialize without token: {e}") |
|
|
|
|
|
try: |
|
|
HF_MODEL = "gpt2" |
|
|
hf_client = InferenceClient(model=HF_MODEL) |
|
|
print(f"Hugging Face Inference API initialized with fallback model: {HF_MODEL}") |
|
|
except Exception as e2: |
|
|
print(f"Warning: Could not initialize Hugging Face Inference API with fallback: {e2}") |
|
|
hf_client = None |
|
|
except ImportError: |
|
|
print("Warning: huggingface_hub not available. Install with: pip install huggingface_hub") |
|
|
hf_client = None |
|
|
except Exception as e: |
|
|
print(f"Warning: Could not initialize Hugging Face Inference API: {e}") |
|
|
hf_client = None |
|
|
|
|
|
def chat_with_llm(messages: List[Dict[str, str]], max_tokens: int = 200, temperature: float = 0.7) -> str: |
|
|
""" |
|
|
Chat with LLM using Ollama if available, otherwise fallback to Hugging Face Inference API. |
|
|
|
|
|
Args: |
|
|
messages: List of message dicts with 'role' and 'content' keys |
|
|
max_tokens: Maximum tokens to generate |
|
|
temperature: Temperature for generation |
|
|
|
|
|
Returns: |
|
|
Response text from the LLM |
|
|
""" |
|
|
|
|
|
if OLLAMA_AVAILABLE: |
|
|
try: |
|
|
if ollama_client: |
|
|
response = ollama_client.chat( |
|
|
model=OLLAMA_MODEL, |
|
|
messages=messages, |
|
|
options={ |
|
|
"temperature": temperature, |
|
|
"num_predict": max_tokens |
|
|
} |
|
|
) |
|
|
else: |
|
|
response = ollama.chat( |
|
|
model=OLLAMA_MODEL, |
|
|
messages=messages, |
|
|
options={ |
|
|
"temperature": temperature, |
|
|
"num_predict": max_tokens |
|
|
} |
|
|
) |
|
|
return response["message"]["content"] |
|
|
except Exception as e: |
|
|
print(f"Warning: Ollama call failed: {e}, trying Hugging Face Inference API") |
|
|
|
|
|
|
|
|
if HF_INFERENCE_API_AVAILABLE and hf_client: |
|
|
try: |
|
|
|
|
|
chat_messages = [] |
|
|
for msg in messages: |
|
|
role = msg.get("role", "user") |
|
|
content = msg.get("content", "") |
|
|
|
|
|
if role == "system": |
|
|
chat_messages.append({"role": "system", "content": content}) |
|
|
elif role == "user": |
|
|
chat_messages.append({"role": "user", "content": content}) |
|
|
elif role == "assistant": |
|
|
chat_messages.append({"role": "assistant", "content": content}) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
model_supports_only_conversational = ( |
|
|
HF_MODEL and ( |
|
|
"mistral" in HF_MODEL.lower() or |
|
|
"llama" in HF_MODEL.lower() or |
|
|
"qwen" in HF_MODEL.lower() or |
|
|
"chat" in HF_MODEL.lower() or |
|
|
"instruct" in HF_MODEL.lower() |
|
|
) |
|
|
) |
|
|
|
|
|
try: |
|
|
|
|
|
|
|
|
if len(chat_messages) > 21: |
|
|
|
|
|
system_msg = chat_messages[0] if chat_messages[0].get("role") == "system" else None |
|
|
recent_messages = chat_messages[-20:] if not system_msg else [system_msg] + chat_messages[-20:] |
|
|
chat_messages = recent_messages |
|
|
print(f"DEBUG: Truncated conversation history from {len(chat_messages) + 1} to {len(chat_messages)} messages") |
|
|
|
|
|
print(f"DEBUG: Sending {len(chat_messages)} messages to Hugging Face API") |
|
|
response = hf_client.chat_completion( |
|
|
messages=chat_messages, |
|
|
max_tokens=max_tokens, |
|
|
temperature=temperature |
|
|
) |
|
|
|
|
|
if isinstance(response, dict): |
|
|
|
|
|
if "choices" in response and len(response["choices"]) > 0: |
|
|
message = response["choices"][0].get("message", {}) |
|
|
if isinstance(message, dict): |
|
|
return message.get("content", "").strip() |
|
|
return str(message).strip() |
|
|
|
|
|
elif "message" in response: |
|
|
msg_content = response["message"] |
|
|
if isinstance(msg_content, dict): |
|
|
return msg_content.get("content", "").strip() |
|
|
return str(msg_content).strip() |
|
|
|
|
|
elif "generated_text" in response: |
|
|
return response["generated_text"].strip() |
|
|
|
|
|
elif isinstance(response, str): |
|
|
return response.strip() |
|
|
|
|
|
return str(response).strip() |
|
|
except Exception as chat_error: |
|
|
|
|
|
error_str = str(chat_error).lower() |
|
|
error_full = str(chat_error) |
|
|
print(f"ERROR: Hugging Face chat_completion failed: {error_full}") |
|
|
print(f"ERROR: Error type: {type(chat_error)}") |
|
|
print(f"ERROR: Number of messages sent: {len(chat_messages)}") |
|
|
|
|
|
|
|
|
if "rate limit" in error_str or "429" in error_str or "quota" in error_str: |
|
|
raise Exception(f"Rate limit exceeded. Please wait a moment and try again. Error: {error_full}") |
|
|
elif "timeout" in error_str or "timed out" in error_str: |
|
|
raise Exception(f"Request timed out. The conversation might be too long. Please try a shorter message. Error: {error_full}") |
|
|
elif "context length" in error_str or "token" in error_str and "limit" in error_str: |
|
|
raise Exception(f"Conversation too long. Please start a new conversation. Error: {error_full}") |
|
|
|
|
|
|
|
|
if model_supports_only_conversational: |
|
|
|
|
|
print(f"Chat completion failed for conversational-only model ({HF_MODEL}): {chat_error}") |
|
|
raise Exception(f"Hugging Face API error with {HF_MODEL}: {error_full}") |
|
|
|
|
|
|
|
|
if "conversational" in error_str or ("text-generation" in error_str and "not supported" in error_str): |
|
|
|
|
|
print(f"Chat completion failed - model only supports conversational: {chat_error}") |
|
|
raise chat_error |
|
|
|
|
|
|
|
|
print(f"Chat completion failed: {chat_error}, trying text generation as fallback") |
|
|
|
|
|
|
|
|
system_msg = "" |
|
|
conversation = [] |
|
|
for msg in messages: |
|
|
role = msg.get("role", "user") |
|
|
content = msg.get("content", "") |
|
|
if role == "system": |
|
|
system_msg = content |
|
|
elif role in ["user", "assistant"]: |
|
|
conversation.append({"role": role, "content": content}) |
|
|
|
|
|
|
|
|
if system_msg: |
|
|
prompt = f"<s>[INST] {system_msg}\n\n" |
|
|
else: |
|
|
prompt = "<s>[INST] " |
|
|
|
|
|
|
|
|
for i, msg in enumerate(conversation): |
|
|
if msg["role"] == "user": |
|
|
if i > 0: |
|
|
prompt += " [/INST] " |
|
|
prompt += msg["content"] |
|
|
elif msg["role"] == "assistant": |
|
|
prompt += " " + msg["content"] |
|
|
|
|
|
|
|
|
prompt += " [/INST]" |
|
|
|
|
|
|
|
|
response = hf_client.text_generation( |
|
|
prompt, |
|
|
max_new_tokens=max_tokens, |
|
|
temperature=temperature, |
|
|
return_full_text=False, |
|
|
stop_sequences=["</s>", "[INST]"] |
|
|
) |
|
|
return response.strip() |
|
|
except Exception as e: |
|
|
print(f"Warning: Hugging Face Inference API call failed: {e}") |
|
|
|
|
|
error_details = str(e) |
|
|
if "api_key" in error_details.lower() or "token" in error_details.lower() or "authentication" in error_details.lower(): |
|
|
raise Exception(f"Hugging Face Inference API authentication failed. Please check your HF_TOKEN secret in Space settings. Original error: {e}") |
|
|
elif "conversational" in error_details.lower() or "text-generation" in error_details.lower() or "not supported" in error_details.lower(): |
|
|
raise Exception(f"Hugging Face model ({HF_MODEL}) compatibility error: {e}. The model may require a different API format or may not be accessible.") |
|
|
elif "rate limit" in error_details.lower() or "quota" in error_details.lower(): |
|
|
raise Exception(f"Hugging Face Inference API rate limit exceeded: {e}. Please try again later or check your token permissions.") |
|
|
else: |
|
|
|
|
|
raise Exception(f"Hugging Face Inference API error: {e}") |
|
|
|
|
|
|
|
|
raise Exception("Neither Ollama nor Hugging Face Inference API is available. Please install one of them.") |
|
|
|
|
|
|
|
|
def format_time_ago(timestamp: int) -> str: |
|
|
""" |
|
|
Format a Unix timestamp as a human-readable "time ago" string. |
|
|
|
|
|
Args: |
|
|
timestamp: Unix timestamp in seconds |
|
|
|
|
|
Returns: |
|
|
Human-readable time ago string (e.g., "2 hours ago", "3 days ago") |
|
|
""" |
|
|
if not timestamp: |
|
|
return "unknown time ago" |
|
|
|
|
|
now = datetime.now() |
|
|
last_played = datetime.fromtimestamp(timestamp) |
|
|
time_diff = now - last_played |
|
|
|
|
|
if time_diff.days > 0: |
|
|
if time_diff.days == 1: |
|
|
return "1 day ago" |
|
|
elif time_diff.days < 7: |
|
|
return f"{time_diff.days} days ago" |
|
|
elif time_diff.days < 30: |
|
|
weeks = time_diff.days // 7 |
|
|
return f"{weeks} week{'s' if weeks > 1 else ''} ago" |
|
|
else: |
|
|
months = time_diff.days // 30 |
|
|
return f"{months} month{'s' if months > 1 else ''} ago" |
|
|
elif time_diff.seconds >= 3600: |
|
|
hours = time_diff.seconds // 3600 |
|
|
return f"{hours} hour{'s' if hours > 1 else ''} ago" |
|
|
elif time_diff.seconds >= 60: |
|
|
minutes = time_diff.seconds // 60 |
|
|
return f"{minutes} minute{'s' if minutes > 1 else ''} ago" |
|
|
else: |
|
|
return "just now" |
|
|
|
|
|
|
|
|
def format_playtime(minutes: int) -> str: |
|
|
""" |
|
|
Format playtime in minutes as a human-readable string. |
|
|
|
|
|
Args: |
|
|
minutes: Playtime in minutes |
|
|
|
|
|
Returns: |
|
|
Formatted string (e.g., "2.5 hours", "45 minutes") |
|
|
""" |
|
|
if not minutes: |
|
|
return "0 minutes" |
|
|
|
|
|
if minutes < 60: |
|
|
return f"{minutes} minute{'s' if minutes != 1 else ''}" |
|
|
else: |
|
|
hours = minutes / 60 |
|
|
if hours < 1: |
|
|
return f"{minutes} minute{'s' if minutes != 1 else ''}" |
|
|
elif hours == int(hours): |
|
|
return f"{int(hours)} hour{'s' if hours != 1 else ''}" |
|
|
else: |
|
|
return f"{hours:.1f} hours" |
|
|
|
|
|
|
|
|
def generate_greeting_with_ollama(steamid: str, steam_api_key: str = None) -> str: |
|
|
""" |
|
|
Generate a personalized greeting using Ollama based on the user's Steam data. |
|
|
|
|
|
Args: |
|
|
steamid: The Steam ID of the user |
|
|
|
|
|
Returns: |
|
|
Greeting message generated by Ollama |
|
|
""" |
|
|
|
|
|
game_info = {} |
|
|
|
|
|
try: |
|
|
|
|
|
if not steam_api_key: |
|
|
return "Hello! 👋\n\nPlease provide your Steam Web API key to get started. You can get one at https://steamcommunity.com/dev/apikey" |
|
|
|
|
|
try: |
|
|
steam_api = SteamAPI(api_key=steam_api_key) |
|
|
except ValueError as e: |
|
|
return f"Hello! 👋\n\nI couldn't connect to the Steam API: {str(e)}\n\nPlease check your Steam API key." |
|
|
|
|
|
|
|
|
recent_result = steam_api.get_recently_played_games(steamid=steamid) |
|
|
recent_response = recent_result.get("response", {}) |
|
|
recent_games = recent_response.get("games", []) |
|
|
|
|
|
if not recent_games: |
|
|
game_info["has_recent_games"] = False |
|
|
else: |
|
|
|
|
|
last_game = max(recent_games, key=lambda g: g.get("rtime_last_played", 0)) |
|
|
|
|
|
game_info["has_recent_games"] = True |
|
|
game_info["game_name"] = last_game.get("name", "Unknown Game") |
|
|
game_info["rtime_last_played"] = last_game.get("rtime_last_played", 0) |
|
|
game_info["playtime_2weeks"] = last_game.get("playtime_2weeks", 0) |
|
|
|
|
|
|
|
|
playtime_forever = game_info["playtime_2weeks"] |
|
|
try: |
|
|
owned_result = steam_api.get_owned_games( |
|
|
steamid=steamid, |
|
|
include_appinfo=False, |
|
|
include_played_free_games=True |
|
|
) |
|
|
owned_response = owned_result.get("response", {}) |
|
|
owned_games = owned_response.get("games", []) |
|
|
|
|
|
|
|
|
for game in owned_games: |
|
|
if game.get("appid") == last_game.get("appid"): |
|
|
playtime_forever = game.get("playtime_forever", game_info["playtime_2weeks"]) |
|
|
break |
|
|
except Exception: |
|
|
pass |
|
|
|
|
|
game_info["playtime_forever"] = playtime_forever |
|
|
game_info["time_ago"] = format_time_ago(game_info["rtime_last_played"]) |
|
|
game_info["total_playtime"] = format_playtime(playtime_forever) |
|
|
|
|
|
except Exception as e: |
|
|
return f"Hello! 👋\n\nI encountered an error while fetching your game data: {str(e)}\n\nPlease make sure your Steam ID is correct and your profile is set to public." |
|
|
|
|
|
|
|
|
if game_info.get("has_recent_games"): |
|
|
prompt = f"""You are a friendly Steam gaming assistant. Greet the user and tell them about their last played game. |
|
|
|
|
|
User's last played game information: |
|
|
- Game name: {game_info['game_name']} |
|
|
- Last played: {game_info['time_ago']} |
|
|
- Total playtime: {game_info['total_playtime']} |
|
|
|
|
|
Write a warm, friendly greeting (2-3 sentences) that: |
|
|
1. Greets them enthusiastically |
|
|
2. Mentions the last game they played, when they played it, and how much time they've spent on it |
|
|
3. Asks what you can do for them today |
|
|
|
|
|
Be conversational, friendly, and enthusiastic. Use emojis sparingly. Don't be too formal.""" |
|
|
else: |
|
|
prompt = """You are a friendly Steam gaming assistant. Greet the user. |
|
|
|
|
|
The user doesn't have any recently played games visible (their profile might be private or they haven't played recently). |
|
|
|
|
|
Write a warm, friendly greeting (2-3 sentences) that: |
|
|
1. Greets them enthusiastically |
|
|
2. Acknowledges that you couldn't see their recent games |
|
|
3. Asks what you can do for them today |
|
|
|
|
|
Be conversational, friendly, and enthusiastic. Use emojis sparingly.""" |
|
|
|
|
|
|
|
|
try: |
|
|
greeting = chat_with_llm( |
|
|
messages=[ |
|
|
{ |
|
|
"role": "system", |
|
|
"content": "You are a friendly, enthusiastic Steam gaming assistant. You help users discover games and manage their Steam library." |
|
|
}, |
|
|
{ |
|
|
"role": "user", |
|
|
"content": prompt |
|
|
} |
|
|
], |
|
|
max_tokens=200, |
|
|
temperature=0.8 |
|
|
) |
|
|
return greeting |
|
|
except Exception as e: |
|
|
|
|
|
if game_info.get("has_recent_games"): |
|
|
return f"Hello! 👋\n\nI see the last game you played was **{game_info['game_name']}**. You played it {game_info['time_ago']}, and you've spent a total of **{game_info['total_playtime']}** on it.\n\nWhat can I do for you today? I can help you find games to play, get recommendations, and much more! 🎮" |
|
|
else: |
|
|
return "Hello! 👋\n\nI couldn't find any recently played games in your Steam library. This might be because your profile is private or you haven't played any games recently.\n\nWhat can I do for you today? I can help you find games to play, get recommendations, and much more! 🎮" |
|
|
else: |
|
|
|
|
|
if game_info.get("has_recent_games"): |
|
|
return f"Hello! 👋\n\nI see the last game you played was **{game_info['game_name']}**. You played it {game_info['time_ago']}, and you've spent a total of **{game_info['total_playtime']}** on it.\n\nWhat can I do for you today? I can help you find games to play, get recommendations, and much more! 🎮" |
|
|
else: |
|
|
return "Hello! 👋\n\nI couldn't find any recently played games in your Steam library. This might be because your profile is private or you haven't played any games recently.\n\nWhat can I do for you today? I can help you find games to play, get recommendations, and much more! 🎮" |
|
|
|
|
|
|
|
|
def on_steamid_change(steamid: str, steam_api_key: str) -> Tuple[List[List[str]], str, str, str]: |
|
|
""" |
|
|
Called when Steam ID is provided. Opens a chatbot window with a personalized greeting. |
|
|
|
|
|
Returns: |
|
|
Tuple of (chatbot history with greeting message, steamid for state, api_key for state, api_results_display) |
|
|
""" |
|
|
if not steamid or not steamid.strip(): |
|
|
return [], "", steam_api_key or "", "No API calls executed yet." |
|
|
|
|
|
if not steam_api_key or not steam_api_key.strip(): |
|
|
return [[None, "Please provide your Steam Web API key. You can get one at https://steamcommunity.com/dev/apikey"]], steamid.strip(), "", "No API calls executed yet." |
|
|
|
|
|
steamid = steamid.strip() |
|
|
steam_api_key = steam_api_key.strip() |
|
|
|
|
|
|
|
|
try: |
|
|
steam_api = SteamAPI(api_key=steam_api_key) |
|
|
except ValueError as e: |
|
|
return [[None, f"I couldn't connect to the Steam API: {str(e)}\n\nPlease check your Steam API key."]], steamid, steam_api_key, "No API calls executed yet." |
|
|
|
|
|
|
|
|
api_results_raw = [] |
|
|
|
|
|
try: |
|
|
|
|
|
recent_result = steam_api.get_recently_played_games(steamid=steamid) |
|
|
recent_response = recent_result.get("response", {}) |
|
|
recent_games = recent_response.get("games", []) |
|
|
|
|
|
if recent_games: |
|
|
|
|
|
api_results_raw.append({ |
|
|
"call": "GET_RECENTLY_PLAYED (for greeting)", |
|
|
"endpoint": "GetRecentlyPlayedGames", |
|
|
"raw_data": recent_result |
|
|
}) |
|
|
|
|
|
|
|
|
try: |
|
|
owned_result = steam_api.get_owned_games( |
|
|
steamid=steamid, |
|
|
include_appinfo=False, |
|
|
include_played_free_games=True |
|
|
) |
|
|
|
|
|
api_results_raw.append({ |
|
|
"call": "GET_OWNED_GAMES (for greeting)", |
|
|
"endpoint": "GetOwnedGames", |
|
|
"raw_data": owned_result |
|
|
}) |
|
|
except Exception: |
|
|
pass |
|
|
except Exception: |
|
|
pass |
|
|
|
|
|
|
|
|
greeting = generate_greeting_with_ollama(steamid, steam_api_key) |
|
|
|
|
|
|
|
|
if api_results_raw: |
|
|
api_results_display_text = format_api_results_for_display(api_results_raw) |
|
|
else: |
|
|
api_results_display_text = "No API calls executed yet." |
|
|
|
|
|
return [[None, greeting]], steamid, steam_api_key, api_results_display_text |
|
|
|
|
|
|
|
|
def parse_and_execute_api_calls(response_text: str, steamid: str, steam_api_key: str) -> Tuple[str, str, List[Dict[str, Any]]]: |
|
|
""" |
|
|
Parse API calls from Ollama response and execute them. |
|
|
|
|
|
Args: |
|
|
response_text: The response text that may contain API calls |
|
|
steamid: User's Steam ID |
|
|
steam_api_key: Steam Web API key |
|
|
|
|
|
Returns: |
|
|
Tuple of (cleaned_response_text, api_results_formatted, api_results_raw) |
|
|
api_results_raw is a list of dicts with 'call', 'result', and 'raw_data' keys |
|
|
""" |
|
|
|
|
|
|
|
|
|
|
|
api_pattern = r'\[API\](.*?)\[/API\]' |
|
|
|
|
|
|
|
|
api_calls = re.findall(api_pattern, response_text, re.DOTALL | re.IGNORECASE) |
|
|
|
|
|
|
|
|
seen = set() |
|
|
unique_api_calls = [] |
|
|
for call in api_calls: |
|
|
call_stripped = call.strip() |
|
|
if call_stripped and call_stripped not in seen: |
|
|
seen.add(call_stripped) |
|
|
unique_api_calls.append(call_stripped) |
|
|
|
|
|
api_calls = unique_api_calls |
|
|
|
|
|
print(f"DEBUG parse_and_execute_api_calls: Found {len(api_calls)} API calls") |
|
|
if api_calls: |
|
|
print(f"DEBUG: API calls found: {api_calls}") |
|
|
|
|
|
if not api_calls: |
|
|
|
|
|
return response_text, "", [] |
|
|
|
|
|
api_results = [] |
|
|
api_results_raw = [] |
|
|
|
|
|
for api_call in api_calls: |
|
|
api_call = api_call.strip() |
|
|
|
|
|
|
|
|
if "userID" in api_call: |
|
|
api_call = api_call.replace("userID", steamid) |
|
|
print(f"DEBUG: Replaced userID with steamid in API call: {api_call[:100]}") |
|
|
|
|
|
try: |
|
|
|
|
|
if api_call.startswith("GET_RECOMMENDATIONS"): |
|
|
|
|
|
params = api_call.replace("GET_RECOMMENDATIONS:", "").strip() |
|
|
|
|
|
|
|
|
genre = None |
|
|
category = None |
|
|
|
|
|
|
|
|
genre_match = re.search(r"genre=['\"](\w+)['\"]", params) |
|
|
if genre_match: |
|
|
genre = genre_match.group(1) |
|
|
|
|
|
|
|
|
category_match = re.search(r"category=['\"](\w+)['\"]", params) |
|
|
if category_match: |
|
|
category = category_match.group(1) |
|
|
|
|
|
|
|
|
if steam_api_key: |
|
|
try: |
|
|
|
|
|
steam_api = SteamAPI(api_key=steam_api_key) |
|
|
|
|
|
recent_result = steam_api.get_recently_played_games(steamid=steamid) |
|
|
owned_result = steam_api.get_owned_games( |
|
|
steamid=steamid, |
|
|
include_appinfo=True, |
|
|
include_played_free_games=True |
|
|
) |
|
|
|
|
|
|
|
|
api_results_raw.append({ |
|
|
"call": api_call, |
|
|
"endpoint": "GetRecentlyPlayedGames + GetOwnedGames", |
|
|
"raw_data": { |
|
|
"recently_played": recent_result, |
|
|
"owned_games": owned_result |
|
|
} |
|
|
}) |
|
|
|
|
|
|
|
|
recommendations = what_to_play_today( |
|
|
steam_api, |
|
|
steamid, |
|
|
ollama_client, |
|
|
OLLAMA_AVAILABLE, |
|
|
OLLAMA_MODEL |
|
|
) |
|
|
api_results.append(f"GET_RECOMMENDATIONS result:\n{recommendations}") |
|
|
except Exception as e: |
|
|
api_results.append(f"GET_RECOMMENDATIONS error: {str(e)}") |
|
|
api_results_raw.append({ |
|
|
"call": api_call, |
|
|
"endpoint": "GetRecentlyPlayedGames + GetOwnedGames", |
|
|
"error": str(e) |
|
|
}) |
|
|
else: |
|
|
api_results.append("GET_RECOMMENDATIONS error: Steam API not available") |
|
|
api_results_raw.append({ |
|
|
"call": api_call, |
|
|
"endpoint": "GetRecentlyPlayedGames + GetOwnedGames", |
|
|
"error": "Steam API not available" |
|
|
}) |
|
|
|
|
|
|
|
|
elif api_call.startswith("GET_LIBRARY"): |
|
|
if steam_api_key: |
|
|
try: |
|
|
steam_api = SteamAPI(api_key=steam_api_key) |
|
|
owned_result = steam_api.get_owned_games( |
|
|
steamid=steamid, |
|
|
include_appinfo=True, |
|
|
include_played_free_games=True |
|
|
) |
|
|
owned_response = owned_result.get("response", {}) |
|
|
owned_games = owned_response.get("games", []) |
|
|
|
|
|
|
|
|
api_results_raw.append({ |
|
|
"call": api_call, |
|
|
"endpoint": "GetOwnedGames", |
|
|
"raw_data": owned_result |
|
|
}) |
|
|
|
|
|
total_games = len(owned_games) |
|
|
|
|
|
game_list = [] |
|
|
game_list.append(f"User owns {total_games} games total. Sample games:\n") |
|
|
for game in owned_games[:20]: |
|
|
appid = game.get("appid", "N/A") |
|
|
name = game.get("name", "Unknown Game") |
|
|
playtime_forever = game.get("playtime_forever", 0) / 60 |
|
|
game_list.append(f"• App ID: {appid} | Name: {name} | Total Playtime: {playtime_forever:.1f}h") |
|
|
|
|
|
api_results.append("\n".join(game_list)) |
|
|
except Exception as e: |
|
|
api_results.append(f"GET_LIBRARY error: {str(e)}") |
|
|
api_results_raw.append({ |
|
|
"call": api_call, |
|
|
"endpoint": "GetOwnedGames", |
|
|
"error": str(e) |
|
|
}) |
|
|
else: |
|
|
api_results.append("GET_LIBRARY error: Steam API not available") |
|
|
api_results_raw.append({ |
|
|
"call": api_call, |
|
|
"endpoint": "GetOwnedGames", |
|
|
"error": "Steam API not available" |
|
|
}) |
|
|
|
|
|
|
|
|
elif api_call.startswith("GET_RECENT_GAMES") or api_call.startswith("GET_RECENTLY_PLAYED"): |
|
|
if steam_api_key: |
|
|
try: |
|
|
steam_api = SteamAPI(api_key=steam_api_key) |
|
|
recent_result = steam_api.get_recently_played_games(steamid=steamid) |
|
|
recent_response = recent_result.get("response", {}) |
|
|
recent_games = recent_response.get("games", []) |
|
|
|
|
|
|
|
|
api_results_raw.append({ |
|
|
"call": api_call, |
|
|
"endpoint": "GetRecentlyPlayedGames", |
|
|
"raw_data": recent_result |
|
|
}) |
|
|
|
|
|
if recent_games: |
|
|
|
|
|
game_list = [] |
|
|
game_list.append(f"Found {len(recent_games)} recently played games:\n") |
|
|
for game in recent_games[:10]: |
|
|
appid = game.get("appid", "N/A") |
|
|
name = game.get("name", "Unknown Game") |
|
|
playtime_2weeks = game.get("playtime_2weeks", 0) / 60 |
|
|
playtime_forever = game.get("playtime_forever", 0) / 60 |
|
|
game_list.append(f"• App ID: {appid} | Name: {name} | Total Playtime: {playtime_forever:.1f}h | Recent (2 weeks): {playtime_2weeks:.1f}h") |
|
|
|
|
|
api_results.append("\n".join(game_list)) |
|
|
else: |
|
|
api_results.append("GET_RECENT_GAMES result: No recently played games found.") |
|
|
except Exception as e: |
|
|
api_results.append(f"GET_RECENT_GAMES error: {str(e)}") |
|
|
api_results_raw.append({ |
|
|
"call": api_call, |
|
|
"endpoint": "GetRecentlyPlayedGames", |
|
|
"error": str(e) |
|
|
}) |
|
|
else: |
|
|
api_results.append("GET_RECENT_GAMES error: Steam API not available") |
|
|
api_results_raw.append({ |
|
|
"call": api_call, |
|
|
"endpoint": "GetRecentlyPlayedGames", |
|
|
"error": "Steam API not available" |
|
|
}) |
|
|
|
|
|
else: |
|
|
|
|
|
api_results.append(f"Unknown API call: {api_call}") |
|
|
api_results_raw.append({ |
|
|
"call": api_call, |
|
|
"endpoint": "Unknown", |
|
|
"error": f"Unknown API call format: {api_call}" |
|
|
}) |
|
|
print(f"DEBUG: Unknown API call format: {api_call}") |
|
|
|
|
|
except Exception as e: |
|
|
error_msg = f"Error executing API call '{api_call}': {str(e)}" |
|
|
api_results.append(error_msg) |
|
|
api_results_raw.append({ |
|
|
"call": api_call, |
|
|
"endpoint": "Unknown", |
|
|
"error": str(e) |
|
|
}) |
|
|
print(f"DEBUG: Error executing API call: {error_msg}") |
|
|
|
|
|
|
|
|
cleaned_response = re.sub(api_pattern, '', response_text, flags=re.DOTALL | re.IGNORECASE).strip() |
|
|
|
|
|
|
|
|
api_results_text = "\n\n".join(api_results) |
|
|
|
|
|
return cleaned_response, api_results_text, api_results_raw |
|
|
|
|
|
|
|
|
def format_api_results_for_display(api_results_raw: List[Dict[str, Any]]) -> str: |
|
|
""" |
|
|
Format raw API results for display in the API results panel. |
|
|
Displays games as a formatted list with appId, name, and playtime_forever. |
|
|
|
|
|
Args: |
|
|
api_results_raw: List of API call results with raw data |
|
|
|
|
|
Returns: |
|
|
Formatted string for display |
|
|
""" |
|
|
if not api_results_raw: |
|
|
return "No API calls executed." |
|
|
|
|
|
import json |
|
|
formatted_output = [] |
|
|
|
|
|
for i, result in enumerate(api_results_raw, 1): |
|
|
formatted_output.append(f"## API Call {i}") |
|
|
formatted_output.append(f"**Call:** `{result.get('call', 'Unknown')}`") |
|
|
formatted_output.append(f"**Endpoint:** `{result.get('endpoint', 'Unknown')}`") |
|
|
|
|
|
if 'error' in result: |
|
|
formatted_output.append(f"**Error:** {result['error']}") |
|
|
elif 'raw_data' in result: |
|
|
raw_data = result['raw_data'] |
|
|
|
|
|
|
|
|
games_list = [] |
|
|
|
|
|
try: |
|
|
|
|
|
if isinstance(raw_data, dict): |
|
|
|
|
|
if 'recently_played' in raw_data: |
|
|
games_data = raw_data['recently_played'].get('response', {}).get('games', []) |
|
|
games_list.extend(games_data) |
|
|
|
|
|
|
|
|
if 'owned_games' in raw_data: |
|
|
games_data = raw_data['owned_games'].get('response', {}).get('games', []) |
|
|
games_list.extend(games_data) |
|
|
|
|
|
|
|
|
if 'response' in raw_data and 'games' in raw_data['response']: |
|
|
games_data = raw_data['response']['games'] |
|
|
games_list.extend(games_data) |
|
|
|
|
|
|
|
|
if games_list: |
|
|
formatted_output.append("**Games List:**") |
|
|
formatted_output.append("") |
|
|
formatted_output.append("| App ID | Name | Playtime 2 Weeks (hours) | Playtime Forever (hours) |") |
|
|
formatted_output.append("|--------|------|------------------|------------------|") |
|
|
|
|
|
games_list.sort(key=lambda x: x.get('playtime_2weeks', 0), reverse=True) |
|
|
|
|
|
for game in games_list: |
|
|
appid = game.get('appid', 'N/A') |
|
|
name = game.get('name', 'Unknown') |
|
|
playtime_2weeks_minutes = game.get('playtime_2weeks', 0) |
|
|
playtime_forever_minutes = game.get('playtime_forever', 0) |
|
|
playtime_2weeks_hours = playtime_2weeks_minutes / 60 if playtime_2weeks_minutes else 0 |
|
|
playtime_forever_hours = playtime_forever_minutes / 60 if playtime_forever_minutes else 0 |
|
|
formatted_output.append(f"| {appid} | {name} | {playtime_2weeks_hours:.1f} | {playtime_forever_hours:.1f} |") |
|
|
|
|
|
formatted_output.append("") |
|
|
formatted_output.append("**Raw JSON Response:**") |
|
|
formatted_output.append("```json") |
|
|
json_str = json.dumps(raw_data, indent=2, ensure_ascii=False) |
|
|
|
|
|
if len(json_str) > 3000: |
|
|
json_str = json_str[:3000] + "\n... (truncated - response too long)" |
|
|
formatted_output.append(json_str) |
|
|
formatted_output.append("```") |
|
|
else: |
|
|
|
|
|
formatted_output.append("**Raw API Response:**") |
|
|
formatted_output.append("```json") |
|
|
json_str = json.dumps(raw_data, indent=2, ensure_ascii=False) |
|
|
if len(json_str) > 5000: |
|
|
json_str = json_str[:5000] + "\n... (truncated - response too long)" |
|
|
formatted_output.append(json_str) |
|
|
formatted_output.append("```") |
|
|
else: |
|
|
formatted_output.append("**Raw API Response:**") |
|
|
formatted_output.append(str(raw_data)) |
|
|
except Exception as e: |
|
|
formatted_output.append(f"**Error formatting data:** {str(e)}") |
|
|
formatted_output.append("**Raw API Response:**") |
|
|
formatted_output.append("```json") |
|
|
try: |
|
|
json_str = json.dumps(raw_data, indent=2, ensure_ascii=False) |
|
|
if len(json_str) > 5000: |
|
|
json_str = json_str[:5000] + "\n... (truncated - response too long)" |
|
|
formatted_output.append(json_str) |
|
|
except: |
|
|
formatted_output.append(str(raw_data)) |
|
|
formatted_output.append("```") |
|
|
|
|
|
formatted_output.append("") |
|
|
|
|
|
return "\n".join(formatted_output) |
|
|
|
|
|
|
|
|
def detect_if_api_call_needed(message: str) -> Optional[str]: |
|
|
""" |
|
|
Detect if the user's message requires an API call and return the appropriate API call string. |
|
|
|
|
|
Args: |
|
|
message: User's message |
|
|
|
|
|
Returns: |
|
|
API call string if needed, None otherwise |
|
|
""" |
|
|
message_lower = message.lower() |
|
|
|
|
|
|
|
|
|
|
|
recommendation_patterns = [ |
|
|
'recommend', 'suggest', 'suggestion', 'what should i play', 'what to play', |
|
|
'game suggestion', 'bored', 'what game', 'which game', 'help me choose', |
|
|
'what can i play', 'what do you recommend', 'give me a game', |
|
|
'pick a game', 'choose a game', 'find me a game', 'recommendation', |
|
|
'suggest a game', 'recommend a game', 'what game should', 'which game should', |
|
|
'in the mood for', 'mood for', 'want a game', 'want an game', 'want to play', |
|
|
'looking for a game', 'looking for an game', 'need a game', 'need an game', |
|
|
'feel like playing', 'want something', 'looking for something' |
|
|
] |
|
|
if any(pattern in message_lower for pattern in recommendation_patterns): |
|
|
return "[API]GET_RECOMMENDATIONS: userID [/API]" |
|
|
|
|
|
|
|
|
library_patterns = [ |
|
|
'library', 'games i own', 'my games', 'owned games', 'what games do i have', |
|
|
'list my games', 'show my games', 'my library', 'games in my library' |
|
|
] |
|
|
if any(pattern in message_lower for pattern in library_patterns): |
|
|
return "[API]GET_LIBRARY: userID [/API]" |
|
|
|
|
|
|
|
|
recent_patterns = [ |
|
|
'recent', 'recently played', 'last played', 'what did i play', |
|
|
'recent games', 'last games', 'what have i been playing' |
|
|
] |
|
|
if any(pattern in message_lower for pattern in recent_patterns): |
|
|
return "[API]GET_RECENT_GAMES: userID [/API]" |
|
|
|
|
|
return None |
|
|
|
|
|
|
|
|
def chat_with_bot(message: str, history: List[List[str]], steamid: str, steam_api_key: str, api_results_display: str) -> Tuple[List[List[str]], str, str]: |
|
|
""" |
|
|
Handle chat messages from the user using Ollama. |
|
|
|
|
|
Args: |
|
|
message: User's message |
|
|
history: Chat history |
|
|
steamid: User's Steam ID (from state) |
|
|
steam_api_key: Steam Web API key (from state) |
|
|
|
|
|
Returns: |
|
|
Tuple of (updated chat history, empty message string to clear input, api_results_display) |
|
|
""" |
|
|
if not message or not message.strip(): |
|
|
return history, "", api_results_display |
|
|
|
|
|
if not steamid: |
|
|
history.append([message, "I don't have your Steam ID. Please enter it first."]) |
|
|
return history, "", api_results_display |
|
|
|
|
|
if not steam_api_key: |
|
|
history.append([message, "I don't have your Steam API key. Please enter it first."]) |
|
|
return history, "", api_results_display |
|
|
|
|
|
|
|
|
context = "" |
|
|
try: |
|
|
if steam_api_key: |
|
|
|
|
|
steam_api = SteamAPI(api_key=steam_api_key) |
|
|
|
|
|
recent_result = steam_api.get_recently_played_games(steamid=steamid) |
|
|
recent_response = recent_result.get("response", {}) |
|
|
recent_games = recent_response.get("games", []) |
|
|
|
|
|
if recent_games: |
|
|
context = f"The user has {len(recent_games)} recently played games. " |
|
|
context += f"Last played: {recent_games[0].get('name', 'Unknown')}. " |
|
|
|
|
|
owned_result = steam_api.get_owned_games( |
|
|
steamid=steamid, |
|
|
include_appinfo=False, |
|
|
include_played_free_games=True |
|
|
) |
|
|
owned_response = owned_result.get("response", {}) |
|
|
owned_games = owned_response.get("games", []) |
|
|
|
|
|
if owned_games: |
|
|
context += f"They own {len(owned_games)} games total. " |
|
|
except Exception: |
|
|
pass |
|
|
|
|
|
|
|
|
history.append([message, None]) |
|
|
|
|
|
|
|
|
api_results_display_text = "No API calls executed yet." |
|
|
|
|
|
|
|
|
if OLLAMA_AVAILABLE or HF_INFERENCE_API_AVAILABLE: |
|
|
try: |
|
|
|
|
|
messages = [ |
|
|
{ |
|
|
"role": "system", |
|
|
"content": f"""You are a friendly, enthusiastic Steam gaming assistant. You help users discover games, get recommendations, and manage their Steam library. |
|
|
|
|
|
User's Steam context: {context if context else "Limited information available."} |
|
|
|
|
|
You can help with: |
|
|
- Game recommendations based on their library |
|
|
- Finding similar games |
|
|
- Viewing recently played games |
|
|
- Achievement tracking |
|
|
- Game suggestions for what to play today |
|
|
|
|
|
CRITICAL RULES - YOU MUST FOLLOW THESE EXACTLY: |
|
|
|
|
|
1. RESPONSE LENGTH: Keep ALL responses SHORT - maximum 3-5 sentences. NO exceptions. |
|
|
|
|
|
2. GAME DATA RESTRICTIONS: |
|
|
- NEVER mention, suggest, or reference ANY game that is NOT explicitly listed in the API data provided to you |
|
|
- ONLY use game names, App IDs, and playtime data from the API response |
|
|
- If a game is not in the provided data, DO NOT mention it, suggest it, or reference it |
|
|
- Do NOT hallucinate or invent game names |
|
|
|
|
|
3. ANSWERING QUESTIONS PROPERLY: |
|
|
- If asked "why", "motivate", "explain", or "reason" → Provide a brief explanation about WHY you suggested those games |
|
|
- If asked to "list" or "show" games → Provide a formatted bullet list with game names and playtime. |
|
|
- If asked to "suggest" → List 1-3 games from provided data and explain why you suggested them. |
|
|
- Answer the user's question directly - don't just list games if they ask "why" |
|
|
|
|
|
4. LIST FORMAT (when listing games): |
|
|
- Game title (ONLY from provided data) |
|
|
- Play time (if available in provided data) |
|
|
- One line per game |
|
|
Example format: |
|
|
• Game Name 1 (5.2h) |
|
|
• Game Name 2 (12.5h) |
|
|
|
|
|
5. Be concise and direct. |
|
|
|
|
|
CRITICAL API CALL RULES - YOU MUST FOLLOW THESE: |
|
|
|
|
|
When the user asks for game recommendations, suggestions, or what to play, you MUST include an API call in your response. DO NOT ask follow-up questions. DO NOT ask for more details. Just execute the API call immediately. |
|
|
|
|
|
API CALL FORMAT: |
|
|
[API]GET_RECOMMENDATIONS: userID, genre='horror' [/API] |
|
|
[API]GET_LIBRARY: userID [/API] |
|
|
[API]GET_RECENT_GAMES: userID [/API] |
|
|
|
|
|
WHEN TO USE API CALLS: |
|
|
- User asks "what should I play", "recommend a game", "I'm bored", "suggest something", "what game", "which game", "suggest a game that is not X" → ALWAYS include [API]GET_RECOMMENDATIONS: userID [/API] in your response |
|
|
- User says "I'm in the mood for X", "I want an X game", "looking for X game", "feel like playing X" → ALWAYS include [API]GET_RECOMMENDATIONS: userID [/API] in your response |
|
|
- User asks about their library or owned games → ALWAYS include [API]GET_LIBRARY: userID [/API] |
|
|
- User asks about recently played games → ALWAYS include [API]GET_RECENT_GAMES: userID [/API] |
|
|
|
|
|
CRITICAL - DO NOT ASK QUESTIONS: |
|
|
- NEVER ask "What genre do you prefer?" or "What type of game?" - just execute GET_RECOMMENDATIONS |
|
|
- NEVER ask "What would you like to know?" - just execute the appropriate API call |
|
|
- NEVER say "I can help you" or "I can provide recommendations" without immediately making an API call |
|
|
- If user says "I'm in the mood for X", "I want an X game", "looking for X" → execute GET_RECOMMENDATIONS immediately |
|
|
- If user gives ANY recommendation request (including genre preferences) → execute GET_RECOMMENDATIONS immediately, do NOT ask for more details |
|
|
- NEVER respond with "Since you didn't specify" or similar - just execute the API call and provide recommendations |
|
|
|
|
|
IMPORTANT: |
|
|
- Include the API call syntax in your response when you need data |
|
|
- The system will automatically execute the API call and give you the results |
|
|
- After receiving API results, you will get a second chance to respond with the actual game data |
|
|
- Do NOT include API call syntax in your final response to the user after you receive the data |
|
|
|
|
|
Be conversational, helpful, and enthusiastic. When asked for recommendations, ALWAYS start with an API call. NEVER ask follow-up questions.""" |
|
|
} |
|
|
] |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
complete_history = history[:-1] if len(history) > 1 else [] |
|
|
|
|
|
print(f"DEBUG: Including {len(complete_history)} conversation pairs in history") |
|
|
for user_msg, assistant_msg in complete_history[-10:]: |
|
|
if user_msg: |
|
|
messages.append({"role": "user", "content": user_msg}) |
|
|
if assistant_msg: |
|
|
messages.append({"role": "assistant", "content": assistant_msg}) |
|
|
print(f"DEBUG: Total messages sent to Ollama: {len(messages)}") |
|
|
|
|
|
|
|
|
messages.append({"role": "user", "content": message}) |
|
|
|
|
|
|
|
|
auto_api_call = detect_if_api_call_needed(message) |
|
|
|
|
|
|
|
|
if auto_api_call: |
|
|
print(f"DEBUG: Auto-detected API call needed: {auto_api_call}") |
|
|
|
|
|
api_call_with_id = auto_api_call.replace("userID", steamid) |
|
|
|
|
|
assistant_response = api_call_with_id |
|
|
else: |
|
|
|
|
|
if ollama_client: |
|
|
response = ollama_client.chat( |
|
|
model=OLLAMA_MODEL, |
|
|
messages=messages, |
|
|
options={ |
|
|
"temperature": 0.7, |
|
|
"num_predict": 150 |
|
|
} |
|
|
) |
|
|
else: |
|
|
response = ollama.chat( |
|
|
model=OLLAMA_MODEL, |
|
|
messages=messages, |
|
|
options={ |
|
|
"temperature": 0.7, |
|
|
"num_predict": 150 |
|
|
} |
|
|
) |
|
|
|
|
|
assistant_response = response["message"]["content"] |
|
|
|
|
|
|
|
|
if '[API]' not in assistant_response: |
|
|
|
|
|
auto_api_call = detect_if_api_call_needed(message) |
|
|
if auto_api_call: |
|
|
print(f"DEBUG: Ollama didn't include API call, injecting: {auto_api_call}") |
|
|
api_call_with_id = auto_api_call.replace("userID", steamid) |
|
|
assistant_response = api_call_with_id |
|
|
|
|
|
|
|
|
has_api_calls = '[API]' in assistant_response |
|
|
print(f"DEBUG: Assistant response contains API calls: {has_api_calls}") |
|
|
if has_api_calls: |
|
|
print(f"DEBUG: Full response preview: {assistant_response[:500]}") |
|
|
|
|
|
|
|
|
cleaned_response, api_results, api_results_raw = parse_and_execute_api_calls(assistant_response, steamid, steam_api_key) |
|
|
|
|
|
|
|
|
print(f"DEBUG: Found {len(api_results_raw)} API calls") |
|
|
if api_results_raw: |
|
|
print(f"DEBUG: API calls: {[r.get('call', 'Unknown') for r in api_results_raw]}") |
|
|
|
|
|
|
|
|
api_results_display_text = format_api_results_for_display(api_results_raw) |
|
|
|
|
|
|
|
|
|
|
|
if api_results_raw: |
|
|
|
|
|
|
|
|
if not api_results: |
|
|
|
|
|
api_results = ["No game data was returned from the API. This might mean your Steam profile is private or you have no games."] |
|
|
|
|
|
|
|
|
follow_up_messages = [ |
|
|
{ |
|
|
"role": "system", |
|
|
"content": f"""You are a friendly, enthusiastic Steam gaming assistant. You help users discover games, get recommendations, and manage their Steam library. |
|
|
|
|
|
User's Steam context: {context if context else "Limited information available."} |
|
|
|
|
|
You can help with: |
|
|
- Game recommendations based on their library |
|
|
- Finding similar games |
|
|
- Viewing recently played games |
|
|
- Achievement tracking |
|
|
- Game suggestions for what to play today |
|
|
|
|
|
CRITICAL RULES - YOU MUST FOLLOW THESE EXACTLY: |
|
|
|
|
|
1. RESPONSE LENGTH: Keep ALL responses SHORT - maximum 3-5 sentences. NO exceptions. |
|
|
|
|
|
2. GAME DATA RESTRICTIONS: |
|
|
- NEVER mention, suggest, or reference ANY game that is NOT explicitly listed in the API data provided to you |
|
|
- ONLY use game names, App IDs, and playtime data from the API response |
|
|
- If a game is not in the provided data, DO NOT mention it, suggest it, or reference it |
|
|
- Do NOT hallucinate or invent game names |
|
|
|
|
|
3. ANSWERING QUESTIONS PROPERLY: |
|
|
- If asked "why", "motivate", "explain", or "reason" → Provide a brief explanation about WHY you suggested those games |
|
|
- If asked to "list" or "show" games → Provide a formatted bullet list with game names and playtime |
|
|
- If asked to "suggest" → List 1-3 games from provided data and explain why you suggested them. |
|
|
- Answer the user's question directly - don't just list games if they ask "why" |
|
|
|
|
|
4. LIST FORMAT (when listing games): |
|
|
- Game title (ONLY from provided data) |
|
|
- Play time (if available in provided data) |
|
|
- One line per game |
|
|
Example format: |
|
|
• Game Name 1 (5.2h) |
|
|
• Game Name 2 (12.5h) |
|
|
|
|
|
5. Be concise and direct. |
|
|
|
|
|
Be conversational, helpful, and enthusiastic. Continue the conversation naturally based on the API results provided. Remember: SHORT responses only! ONLY mention games that are in the provided data!""" |
|
|
} |
|
|
] |
|
|
|
|
|
|
|
|
|
|
|
complete_history = history[:-1] if len(history) > 1 else [] |
|
|
|
|
|
print(f"DEBUG: Follow-up - Including {len(complete_history)} conversation pairs in history") |
|
|
for user_msg, assistant_msg in complete_history[-10:]: |
|
|
if user_msg: |
|
|
follow_up_messages.append({"role": "user", "content": user_msg}) |
|
|
if assistant_msg: |
|
|
follow_up_messages.append({"role": "assistant", "content": assistant_msg}) |
|
|
print(f"DEBUG: Follow-up - Total messages sent to Ollama: {len(follow_up_messages)}") |
|
|
|
|
|
|
|
|
follow_up_messages.append({"role": "user", "content": message}) |
|
|
|
|
|
|
|
|
if cleaned_response.strip(): |
|
|
follow_up_messages.append({ |
|
|
"role": "assistant", |
|
|
"content": cleaned_response |
|
|
}) |
|
|
|
|
|
|
|
|
excluded_genres = [] |
|
|
message_lower = message.lower() |
|
|
genre_patterns = { |
|
|
'horror': ['horror', 'scary', 'frightening', 'terror'], |
|
|
'action': ['action', 'combat', 'fighting'], |
|
|
'adventure': ['adventure', 'exploration'], |
|
|
'rpg': ['rpg', 'role-playing', 'role playing'], |
|
|
'strategy': ['strategy', 'tactical', 'tactics'], |
|
|
'simulation': ['simulation', 'sim', 'simulator'], |
|
|
'sports': ['sports', 'sport'], |
|
|
'racing': ['racing', 'race', 'driving'], |
|
|
'puzzle': ['puzzle', 'brain', 'logic'], |
|
|
'indie': ['indie', 'independent'], |
|
|
'casual': ['casual', 'relaxing'], |
|
|
'shooter': ['shooter', 'fps', 'first-person', 'first person'], |
|
|
'platformer': ['platformer', 'platform'], |
|
|
'survival': ['survival', 'survive'] |
|
|
} |
|
|
|
|
|
for genre, keywords in genre_patterns.items(): |
|
|
for keyword in keywords: |
|
|
|
|
|
if f'not {keyword}' in message_lower or f'no {keyword}' in message_lower or f'without {keyword}' in message_lower: |
|
|
if genre not in excluded_genres: |
|
|
excluded_genres.append(genre) |
|
|
|
|
|
|
|
|
all_games = [] |
|
|
for result in api_results_raw: |
|
|
if 'raw_data' in result: |
|
|
raw_data = result['raw_data'] |
|
|
|
|
|
games = [] |
|
|
if isinstance(raw_data, dict): |
|
|
if 'recently_played' in raw_data: |
|
|
games.extend(raw_data.get('recently_played', {}).get('response', {}).get('games', [])) |
|
|
if 'owned_games' in raw_data: |
|
|
games.extend(raw_data.get('owned_games', {}).get('response', {}).get('games', [])) |
|
|
if 'response' in raw_data and 'games' in raw_data['response']: |
|
|
games.extend(raw_data['response']['games']) |
|
|
all_games.extend(games) |
|
|
|
|
|
|
|
|
filtered_games = [] |
|
|
if excluded_genres and all_games and steam_store_api: |
|
|
|
|
|
appids = [game.get('appid') for game in all_games if game.get('appid')] |
|
|
if appids: |
|
|
try: |
|
|
|
|
|
game_genres = {} |
|
|
for i in range(0, len(appids), 20): |
|
|
chunk = appids[i:i+20] |
|
|
try: |
|
|
details = steam_store_api.get_app_details(chunk) |
|
|
for appid_str, app_data in details.items(): |
|
|
if app_data.get('success') and 'data' in app_data: |
|
|
data = app_data['data'] |
|
|
|
|
|
genres = [] |
|
|
if 'genres' in data: |
|
|
genres = [g.get('description', '').lower() for g in data.get('genres', [])] |
|
|
elif 'categories' in data: |
|
|
|
|
|
categories = [c.get('description', '').lower() for c in data.get('categories', [])] |
|
|
genres = categories |
|
|
|
|
|
game_genres[int(appid_str)] = genres |
|
|
except Exception as e: |
|
|
print(f"DEBUG: Error fetching genres for chunk: {e}") |
|
|
continue |
|
|
|
|
|
|
|
|
for game in all_games: |
|
|
appid = game.get('appid') |
|
|
if appid in game_genres: |
|
|
game_genre_list = game_genres[appid] |
|
|
|
|
|
should_exclude = False |
|
|
for excluded_genre in excluded_genres: |
|
|
if any(excluded_genre in genre or genre in excluded_genre for genre in game_genre_list): |
|
|
should_exclude = True |
|
|
break |
|
|
if not should_exclude: |
|
|
filtered_games.append(game) |
|
|
else: |
|
|
|
|
|
filtered_games.append(game) |
|
|
except Exception as e: |
|
|
print(f"DEBUG: Error filtering games by genre: {e}") |
|
|
|
|
|
filtered_games = all_games |
|
|
else: |
|
|
filtered_games = all_games |
|
|
else: |
|
|
filtered_games = all_games |
|
|
|
|
|
|
|
|
game_names_in_data = set() |
|
|
for game in filtered_games: |
|
|
name = game.get('name', '').strip() |
|
|
if name: |
|
|
game_names_in_data.add(name) |
|
|
|
|
|
games_list_str = ", ".join(sorted(game_names_in_data)) if game_names_in_data else "No games found" |
|
|
|
|
|
|
|
|
if excluded_genres and filtered_games != all_games: |
|
|
|
|
|
filtered_api_results = [] |
|
|
for game in filtered_games[:20]: |
|
|
appid = game.get('appid', 'N/A') |
|
|
name = game.get('name', 'Unknown Game') |
|
|
playtime_forever = game.get('playtime_forever', game.get('playtime_2weeks', 0)) / 60 |
|
|
filtered_api_results.append(f"• App ID: {appid} | Name: {name} | Total Playtime: {playtime_forever:.1f}h") |
|
|
formatted_api_data = f"Found {len(filtered_games)} games (excluding {', '.join(excluded_genres)}):\n" + "\n".join(filtered_api_results) |
|
|
else: |
|
|
|
|
|
formatted_api_data = "\n\n".join(api_results) |
|
|
|
|
|
|
|
|
instruction_text = f"""I executed the API calls you requested. Here is the game data: |
|
|
|
|
|
{formatted_api_data} |
|
|
|
|
|
CRITICAL INSTRUCTIONS - YOU MUST FOLLOW THESE EXACTLY: |
|
|
|
|
|
1. ALLOWED GAMES LIST (ONLY mention these games): |
|
|
{games_list_str} |
|
|
|
|
|
2. STRICT RULES: |
|
|
- You can ONLY mention games from the list above |
|
|
- If a game is NOT in the list above, you CANNOT mention it, suggest it, or reference it |
|
|
- If asked about a game not in the list, say: "I don't have information about that game in your library" |
|
|
- Use EXACT game names from the data above (copy them exactly) |
|
|
- Use EXACT playtime values from the data above |
|
|
|
|
|
3. RESPONSE FORMAT: |
|
|
- Keep responses SHORT (3-5 sentences maximum) |
|
|
- If asked to list games, use bullet format: • Game Name (X.Xh total playtime) |
|
|
- If asked to explain or motivate, provide a brief explanation (2-3 sentences) |
|
|
- If asked to "suggest" or "recommend" → IMMEDIATELY provide 1-3 game suggestions from the allowed list |
|
|
- If user mentions genre preferences (e.g., "not horror", "not action") → Filter suggestions accordingly but STILL provide recommendations |
|
|
- Answer the user's question directly - don't just list games if they ask "why" |
|
|
- NEVER ask follow-up questions - just provide recommendations from the available games |
|
|
|
|
|
4. EXAMPLES: |
|
|
- User asks "Suggest a game" → List 1-3 games from the allowed list with playtime |
|
|
- User asks "Suggest a game that is not horror" → List 1-3 games from the allowed list that are NOT horror games |
|
|
- User asks "Why did you suggest X?" → Explain your reasoning using only games from the allowed list |
|
|
- User asks about a game not in the list → Say "I don't have information about that game in your library" |
|
|
|
|
|
CRITICAL: When user asks for recommendations, DO NOT ask "What genre do you prefer?" or "What type of game?" - just provide suggestions from the available games immediately. |
|
|
|
|
|
REMEMBER: The ONLY games that exist in this conversation are: {games_list_str}""" |
|
|
|
|
|
follow_up_messages.append({ |
|
|
"role": "user", |
|
|
"content": instruction_text |
|
|
}) |
|
|
|
|
|
|
|
|
assistant_response = chat_with_llm( |
|
|
messages=follow_up_messages, |
|
|
max_tokens=200, |
|
|
temperature=0.7 |
|
|
) |
|
|
|
|
|
api_pattern = r'\[API\].*?\[/API\]' |
|
|
assistant_response = re.sub(api_pattern, '', assistant_response, flags=re.DOTALL | re.IGNORECASE).strip() |
|
|
|
|
|
else: |
|
|
|
|
|
if not api_results_raw: |
|
|
|
|
|
if cleaned_response and cleaned_response.strip(): |
|
|
assistant_response = cleaned_response |
|
|
else: |
|
|
|
|
|
assistant_response = "I'm processing your request. Please wait a moment..." |
|
|
api_results_display_text = "No API calls executed in this message." |
|
|
else: |
|
|
|
|
|
if cleaned_response and cleaned_response.strip(): |
|
|
assistant_response = cleaned_response |
|
|
else: |
|
|
|
|
|
error_messages = [r.get('error', 'Unknown error') for r in api_results_raw if 'error' in r] |
|
|
if error_messages: |
|
|
assistant_response = f"I encountered an error while fetching your game data: {error_messages[0]}. Please check your Steam API key and try again." |
|
|
else: |
|
|
assistant_response = "I couldn't retrieve your game data. Please make sure your Steam profile is public and your API key is correct." |
|
|
|
|
|
|
|
|
if not assistant_response or not assistant_response.strip(): |
|
|
assistant_response = "I'm having trouble processing your request. Please try again or check your Steam API key." |
|
|
|
|
|
|
|
|
api_pattern = r'\[API\].*?\[/API\]' |
|
|
assistant_response = re.sub(api_pattern, '', assistant_response, flags=re.DOTALL | re.IGNORECASE).strip() |
|
|
|
|
|
history[-1][1] = assistant_response |
|
|
print(f"DEBUG: Final API results display text length: {len(api_results_display_text)}") |
|
|
print(f"DEBUG: Final API results display preview: {api_results_display_text[:300]}") |
|
|
|
|
|
except Exception as e: |
|
|
error_msg = str(e) |
|
|
|
|
|
|
|
|
|
|
|
if "Hugging Face" in error_msg or "HF_TOKEN" in error_msg: |
|
|
|
|
|
history[-1][1] = f"I encountered an error with the AI model: {error_msg}\n\nPlease check:\n- Your HF_TOKEN secret is set in Space settings\n- The token has proper permissions\n- The model ({HF_MODEL}) is accessible" |
|
|
elif "Failed to connect to Ollama" in error_msg: |
|
|
|
|
|
|
|
|
|
|
|
if "Hugging Face" in error_msg: |
|
|
|
|
|
hf_error_part = error_msg.split("Hugging Face")[-1] if "Hugging Face" in error_msg else error_msg |
|
|
history[-1][1] = f"I encountered an error with the AI model: Hugging Face{hf_error_part}\n\nPlease check your HF_TOKEN secret in Space settings." |
|
|
else: |
|
|
|
|
|
history[-1][1] = f"I encountered an error with the Hugging Face Inference API.\n\nPlease check:\n- Your HF_TOKEN secret is set in Space settings (Settings → Variables and secrets)\n- The token is valid and has proper permissions\n- The model ({HF_MODEL}) is accessible\n\nIf the issue persists, try setting a different model in HF_MODEL (e.g., 'mistralai/Mistral-7B-Instruct-v0.2')" |
|
|
elif "Ollama" in error_msg and "Hugging Face" not in error_msg: |
|
|
|
|
|
|
|
|
history[-1][1] = f"I encountered an error with the Hugging Face Inference API.\n\nPlease check:\n- Your HF_TOKEN secret is set in Space settings\n- The token has proper permissions\n- The model ({HF_MODEL}) is accessible" |
|
|
else: |
|
|
|
|
|
history[-1][1] = f"I encountered an error while processing your message: {error_msg}\n\nPlease try again or check your HF_TOKEN secret in Space settings." |
|
|
api_results_display_text = "Error occurred during API call processing." |
|
|
else: |
|
|
history[-1][1] = "I'm sorry, but neither Ollama nor Hugging Face Inference API is available. Please ensure one of them is configured." |
|
|
api_results_display_text = "LLM not available." |
|
|
|
|
|
return history, "", api_results_display_text |
|
|
|
|
|
|
|
|
|
|
|
with gr.Blocks(title="Steam Game Recommendations") as demo: |
|
|
gr.Markdown("# 🎮 Steam Game Recommendations") |
|
|
gr.Markdown("Enter your Steam ID and API key to get personalized game suggestions and view your recently played games.") |
|
|
gr.Markdown("**Note:** This application will not store any user data acquired from the Steam API.") |
|
|
|
|
|
|
|
|
steamid_state = gr.State(value="") |
|
|
steam_api_key_state = gr.State(value="") |
|
|
|
|
|
with gr.Row(): |
|
|
steamid_input = gr.Textbox( |
|
|
label="Steam ID", |
|
|
placeholder="Enter 64-bit Steam ID (e.g., 76561198000000000)", |
|
|
info="You can find your Steam ID at https://steamid.io/", |
|
|
scale=1 |
|
|
) |
|
|
steam_api_key_input = gr.Textbox( |
|
|
label="Steam Web API Key", |
|
|
placeholder="Enter your Steam Web API key", |
|
|
info="Get your API key at https://steamcommunity.com/dev/apikey", |
|
|
type="password", |
|
|
scale=1 |
|
|
) |
|
|
submit_btn = gr.Button("Submit", variant="primary", scale=1) |
|
|
|
|
|
with gr.Row(): |
|
|
with gr.Column(scale=1): |
|
|
chatbot = gr.Chatbot( |
|
|
label="Chat with your Steam Assistant", |
|
|
height=500, |
|
|
show_copy_button=True |
|
|
) |
|
|
|
|
|
with gr.Column(scale=1): |
|
|
api_results_display = gr.Markdown( |
|
|
label="Steam API Results (Raw Data)", |
|
|
value="API results will appear here when API calls are made.", |
|
|
height=500 |
|
|
) |
|
|
|
|
|
with gr.Row(): |
|
|
msg_input = gr.Textbox( |
|
|
label="Message", |
|
|
placeholder="Type your message here...", |
|
|
scale=4, |
|
|
container=False |
|
|
) |
|
|
send_btn = gr.Button("Send", variant="primary", scale=1) |
|
|
|
|
|
|
|
|
steamid_input.submit( |
|
|
fn=on_steamid_change, |
|
|
inputs=[steamid_input, steam_api_key_input], |
|
|
outputs=[chatbot, steamid_state, steam_api_key_state, api_results_display] |
|
|
) |
|
|
|
|
|
|
|
|
submit_btn.click( |
|
|
fn=on_steamid_change, |
|
|
inputs=[steamid_input, steam_api_key_input], |
|
|
outputs=[chatbot, steamid_state, steam_api_key_state, api_results_display] |
|
|
) |
|
|
|
|
|
|
|
|
def chat_wrapper(message, history, steamid, steam_api_key, api_display): |
|
|
return chat_with_bot(message, history, steamid, steam_api_key, api_display) |
|
|
|
|
|
msg_input.submit( |
|
|
fn=chat_wrapper, |
|
|
inputs=[msg_input, chatbot, steamid_state, steam_api_key_state, api_results_display], |
|
|
outputs=[chatbot, msg_input, api_results_display] |
|
|
) |
|
|
|
|
|
send_btn.click( |
|
|
fn=chat_wrapper, |
|
|
inputs=[msg_input, chatbot, steamid_state, steam_api_key_state, api_results_display], |
|
|
outputs=[chatbot, msg_input, api_results_display] |
|
|
) |
|
|
|
|
|
gr.Examples( |
|
|
examples=[["76561198000000000"]], |
|
|
inputs=[steamid_input] |
|
|
) |
|
|
|
|
|
if __name__ == "__main__": |
|
|
demo.launch(mcp_server=True) |
|
|
|