Spaces:
Running
Running
File size: 1,752 Bytes
b281c61 f499a2f b281c61 a4172e2 b281c61 f499a2f b281c61 8f7bac8 b281c61 f499a2f b281c61 f499a2f b281c61 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 |
# config.py
# This is the central "control panel" for MudabbirAI.
# --- 1. MODEL CONFIGURATION ---
MODELS = {
"Gemini": {
"default": "gemini-2.0-flash", # 1M Context
"judge": "gemini-2.0-flash",
"classifier": "gemini-2.0-flash",
"manager": "gemini-2.0-flash"
},
"Anthropic": {
"default": "claude-3-5-haiku-20241022" # 200K Context
},
"SambaNova": {
"default": "Llama-4-Maverick-17B-128E-Instruct" # 512K Context
},
"OpenAI": {
"default": "gpt-4o-mini" # 128K Context
},
# Updated: Using Qwen via Nebius
"Nebius": {
"default": "Qwen/Qwen3-235B-A22B-Thinking-2507" # 262K Context
}
}
# --- 2. PRICING (USD per 1 Million Tokens) ---
PRICING = {
"Gemini": {"input": 0.10, "output": 0.40},
"Anthropic": {"input": 0.25, "output": 1.25},
"SambaNova": {"input": 0.00, "output": 0.00},
"OpenAI": {"input": 0.15, "output": 0.60},
# Updated Pricing for Qwen (based on your screenshot)
"Nebius": {"input": 0.20, "output": 0.60}
}
# --- 3. CALIBRATION CONFIGURATION ---
CALIBRATION_CONFIG = {
"roles_to_test": {
"Plant": "Culture_5",
"Implementer": "Culture_Expert",
"Monitor": "Culture_11"
},
"role_metrics": {
"Plant": "Novelty",
"Implementer": "Usefulness_Feasibility",
"Monitor": "Cultural_Appropriateness"
}
}
# --- 4. PROMPT FILE PATHS ---
PROMPT_FILES = {
"evaluator": "prompts/evaluator_judge.txt",
"classifier": "prompts/classifier.txt",
"manager_homogeneous": "prompts/manager_homogeneous.txt",
"manager_heterogeneous": "prompts/manager_heterogeneous.txt"
}
# --- 5. STATIC CONFIGS ---
DEFAULT_PERSONA_KEY = "Culture_Expert" |