Spaces:
Running
Running
add minimax 2.5
Browse files- backend_api.py +3 -2
- backend_models.py +5 -1
- frontend/src/app/page.tsx +1 -1
- frontend/src/components/ControlPanel.tsx +1 -1
- frontend/src/components/LandingPage.tsx +6 -6
backend_api.py
CHANGED
|
@@ -100,7 +100,8 @@ def get_cached_client(model_id: str, provider: str = "auto"):
|
|
| 100 |
|
| 101 |
# Define models and languages here to avoid importing Gradio UI
|
| 102 |
AVAILABLE_MODELS = [
|
| 103 |
-
{"name": "
|
|
|
|
| 104 |
{"name": "Qwen3-Coder-Next 🤖", "id": "Qwen/Qwen3-Coder-Next", "description": "Qwen3-Coder-Next - Latest powerful coder model via HuggingFace Router with Novita provider", "supports_images": False},
|
| 105 |
{"name": "Kimi-K2.5 🧠", "id": "moonshotai/Kimi-K2.5", "description": "Kimi-K2.5 - New powerful reasoning model via HuggingFace Router with Novita provider", "supports_images": True},
|
| 106 |
{"name": "GLM-4.7-Flash ⚡", "id": "zai-org/GLM-4.7-Flash", "description": "GLM-4.7-Flash - Ultra-fast GLM model via HuggingFace Router with Novita provider", "supports_images": False},
|
|
@@ -198,7 +199,7 @@ async def startup_event():
|
|
| 198 |
class CodeGenerationRequest(BaseModel):
|
| 199 |
query: str
|
| 200 |
language: str = "html"
|
| 201 |
-
model_id: str = "
|
| 202 |
provider: str = "auto"
|
| 203 |
history: List[List[str]] = []
|
| 204 |
agent_mode: bool = False
|
|
|
|
| 100 |
|
| 101 |
# Define models and languages here to avoid importing Gradio UI
|
| 102 |
AVAILABLE_MODELS = [
|
| 103 |
+
{"name": "MiniMax-M2.5 🤖", "id": "MiniMaxAI/MiniMax-M2.5", "description": "MiniMax-M2.5 - Latest powerful coder model via HuggingFace Router with fastest provider (Default)", "supports_images": False},
|
| 104 |
+
{"name": "GLM-5 🧠", "id": "zai-org/GLM-5", "description": "GLM-5 - New powerful reasoning model via HuggingFace Router", "supports_images": False},
|
| 105 |
{"name": "Qwen3-Coder-Next 🤖", "id": "Qwen/Qwen3-Coder-Next", "description": "Qwen3-Coder-Next - Latest powerful coder model via HuggingFace Router with Novita provider", "supports_images": False},
|
| 106 |
{"name": "Kimi-K2.5 🧠", "id": "moonshotai/Kimi-K2.5", "description": "Kimi-K2.5 - New powerful reasoning model via HuggingFace Router with Novita provider", "supports_images": True},
|
| 107 |
{"name": "GLM-4.7-Flash ⚡", "id": "zai-org/GLM-4.7-Flash", "description": "GLM-4.7-Flash - Ultra-fast GLM model via HuggingFace Router with Novita provider", "supports_images": False},
|
|
|
|
| 199 |
class CodeGenerationRequest(BaseModel):
|
| 200 |
query: str
|
| 201 |
language: str = "html"
|
| 202 |
+
model_id: str = "MiniMaxAI/MiniMax-M2.5"
|
| 203 |
provider: str = "auto"
|
| 204 |
history: List[List[str]] = []
|
| 205 |
agent_mode: bool = False
|
backend_models.py
CHANGED
|
@@ -13,7 +13,7 @@ def get_inference_client(model_id: str, provider: str = "auto"):
|
|
| 13 |
|
| 14 |
Returns OpenAI-compatible client for all models or raises error if not configured.
|
| 15 |
"""
|
| 16 |
-
if model_id == "MiniMaxAI/MiniMax-M2" or model_id == "MiniMaxAI/MiniMax-M2.1":
|
| 17 |
# Use HuggingFace Router with Novita provider for MiniMax M2 models
|
| 18 |
return OpenAI(
|
| 19 |
base_url="https://router.huggingface.co/v1",
|
|
@@ -87,6 +87,10 @@ def get_real_model_id(model_id: str) -> str:
|
|
| 87 |
# MiniMax M2 and M2.1 need Novita provider suffix
|
| 88 |
return f"{model_id}:novita"
|
| 89 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 90 |
elif model_id == "moonshotai/Kimi-K2-Thinking":
|
| 91 |
# Kimi K2 Thinking needs Together AI provider
|
| 92 |
return "moonshotai/Kimi-K2-Thinking:together"
|
|
|
|
| 13 |
|
| 14 |
Returns OpenAI-compatible client for all models or raises error if not configured.
|
| 15 |
"""
|
| 16 |
+
if model_id == "MiniMaxAI/MiniMax-M2" or model_id == "MiniMaxAI/MiniMax-M2.1" or model_id == "MiniMaxAI/MiniMax-M2.5":
|
| 17 |
# Use HuggingFace Router with Novita provider for MiniMax M2 models
|
| 18 |
return OpenAI(
|
| 19 |
base_url="https://router.huggingface.co/v1",
|
|
|
|
| 87 |
# MiniMax M2 and M2.1 need Novita provider suffix
|
| 88 |
return f"{model_id}:novita"
|
| 89 |
|
| 90 |
+
elif model_id == "MiniMaxAI/MiniMax-M2.5":
|
| 91 |
+
# MiniMax M2.5 needs fastest provider suffix
|
| 92 |
+
return "MiniMaxAI/MiniMax-M2.5:fastest"
|
| 93 |
+
|
| 94 |
elif model_id == "moonshotai/Kimi-K2-Thinking":
|
| 95 |
# Kimi K2 Thinking needs Together AI provider
|
| 96 |
return "moonshotai/Kimi-K2-Thinking:together"
|
frontend/src/app/page.tsx
CHANGED
|
@@ -17,7 +17,7 @@ export default function Home() {
|
|
| 17 |
|
| 18 |
const [generatedCode, setGeneratedCode] = useState('');
|
| 19 |
const [selectedLanguage, setSelectedLanguage] = useState<Language>('html');
|
| 20 |
-
const [selectedModel, setSelectedModel] = useState('
|
| 21 |
const [models, setModels] = useState<Model[]>([]);
|
| 22 |
const [isGenerating, setIsGenerating] = useState(false);
|
| 23 |
const [isAuthenticated, setIsAuthenticated] = useState(false);
|
|
|
|
| 17 |
|
| 18 |
const [generatedCode, setGeneratedCode] = useState('');
|
| 19 |
const [selectedLanguage, setSelectedLanguage] = useState<Language>('html');
|
| 20 |
+
const [selectedModel, setSelectedModel] = useState('MiniMaxAI/MiniMax-M2.5');
|
| 21 |
const [models, setModels] = useState<Model[]>([]);
|
| 22 |
const [isGenerating, setIsGenerating] = useState(false);
|
| 23 |
const [isAuthenticated, setIsAuthenticated] = useState(false);
|
frontend/src/components/ControlPanel.tsx
CHANGED
|
@@ -197,7 +197,7 @@ export default function ControlPanel({
|
|
| 197 |
>
|
| 198 |
<div className="flex items-center justify-between gap-2">
|
| 199 |
<span className="text-sm text-[#f5f5f7]">{model.name}</span>
|
| 200 |
-
{['
|
| 201 |
<span className="px-1.5 py-0.5 bg-gradient-to-r from-purple-500 to-pink-500 text-white text-[9px] font-bold rounded uppercase flex-shrink-0">
|
| 202 |
NEW
|
| 203 |
</span>
|
|
|
|
| 197 |
>
|
| 198 |
<div className="flex items-center justify-between gap-2">
|
| 199 |
<span className="text-sm text-[#f5f5f7]">{model.name}</span>
|
| 200 |
+
{['MiniMaxAI/MiniMax-M2.5'].includes(model.id) && (
|
| 201 |
<span className="px-1.5 py-0.5 bg-gradient-to-r from-purple-500 to-pink-500 text-white text-[9px] font-bold rounded uppercase flex-shrink-0">
|
| 202 |
NEW
|
| 203 |
</span>
|
frontend/src/components/LandingPage.tsx
CHANGED
|
@@ -31,7 +31,7 @@ export default function LandingPage({
|
|
| 31 |
onImport,
|
| 32 |
isAuthenticated,
|
| 33 |
initialLanguage = 'html',
|
| 34 |
-
initialModel = '
|
| 35 |
onAuthChange,
|
| 36 |
setPendingPR,
|
| 37 |
pendingPRRef
|
|
@@ -514,8 +514,8 @@ ${isGradio ? '\n\nIMPORTANT: Only output app.py with the redesigned UI (themes,
|
|
| 514 |
if (onStart) {
|
| 515 |
// Pass duplicated space ID so auto-deploy updates it
|
| 516 |
console.log('[Redesign] Calling onStart with duplicated repo ID:', duplicatedRepoId);
|
| 517 |
-
console.log('[Redesign] Using
|
| 518 |
-
onStart(redesignPrompt, result.language || 'html', '
|
| 519 |
}
|
| 520 |
}, 100);
|
| 521 |
|
|
@@ -559,8 +559,8 @@ Note: After generating the redesign, I will create a Pull Request on the origina
|
|
| 559 |
|
| 560 |
if (onStart) {
|
| 561 |
console.log('[Redesign] Will create PR - not passing repo ID');
|
| 562 |
-
console.log('[Redesign] Using
|
| 563 |
-
onStart(redesignPrompt, result.language || 'html', '
|
| 564 |
}
|
| 565 |
|
| 566 |
console.log('[Redesign] Will create PR after code generation completes');
|
|
@@ -834,7 +834,7 @@ Note: After generating the redesign, I will create a Pull Request on the origina
|
|
| 834 |
>
|
| 835 |
<div className="flex items-center justify-between gap-2">
|
| 836 |
<span className="text-xs font-medium text-[#f5f5f7]">{model.name}</span>
|
| 837 |
-
{model.id === '
|
| 838 |
<span className="px-1.5 py-0.5 bg-gradient-to-r from-purple-500 to-pink-500 text-white text-[9px] font-bold rounded uppercase">
|
| 839 |
NEW
|
| 840 |
</span>
|
|
|
|
| 31 |
onImport,
|
| 32 |
isAuthenticated,
|
| 33 |
initialLanguage = 'html',
|
| 34 |
+
initialModel = 'MiniMaxAI/MiniMax-M2.5',
|
| 35 |
onAuthChange,
|
| 36 |
setPendingPR,
|
| 37 |
pendingPRRef
|
|
|
|
| 514 |
if (onStart) {
|
| 515 |
// Pass duplicated space ID so auto-deploy updates it
|
| 516 |
console.log('[Redesign] Calling onStart with duplicated repo ID:', duplicatedRepoId);
|
| 517 |
+
console.log('[Redesign] Using MiniMaxAI/MiniMax-M2.5 for redesign');
|
| 518 |
+
onStart(redesignPrompt, result.language || 'html', 'MiniMaxAI/MiniMax-M2.5', undefined, duplicatedRepoId);
|
| 519 |
}
|
| 520 |
}, 100);
|
| 521 |
|
|
|
|
| 559 |
|
| 560 |
if (onStart) {
|
| 561 |
console.log('[Redesign] Will create PR - not passing repo ID');
|
| 562 |
+
console.log('[Redesign] Using MiniMaxAI/MiniMax-M2.5 for redesign');
|
| 563 |
+
onStart(redesignPrompt, result.language || 'html', 'MiniMaxAI/MiniMax-M2.5', undefined, repoId, true); // Pass true for shouldCreatePR
|
| 564 |
}
|
| 565 |
|
| 566 |
console.log('[Redesign] Will create PR after code generation completes');
|
|
|
|
| 834 |
>
|
| 835 |
<div className="flex items-center justify-between gap-2">
|
| 836 |
<span className="text-xs font-medium text-[#f5f5f7]">{model.name}</span>
|
| 837 |
+
{model.id === 'MiniMaxAI/MiniMax-M2.5' && (
|
| 838 |
<span className="px-1.5 py-0.5 bg-gradient-to-r from-purple-500 to-pink-500 text-white text-[9px] font-bold rounded uppercase">
|
| 839 |
NEW
|
| 840 |
</span>
|