Spaces:
Running
Running
| import { NextRequest, NextResponse } from "next/server"; | |
| import { GoogleGenAI } from "@google/genai"; | |
| import { InferenceClient } from "@huggingface/inference"; | |
| export async function POST(req: NextRequest) { | |
| try { | |
| const { prompt, model, provider, enhancedSettings } = await req.json(); | |
| if (!prompt || typeof prompt !== 'string') { | |
| return NextResponse.json({ ok: false, message: 'Missing prompt' }, { status: 400 }); | |
| } | |
| const system = `You are a senior product designer and prompt engineer. Rewrite the user's prompt to make it clearer, more actionable and specific for an AI that builds complete web applications (frontend-first with modern, responsive, accessible UI; optionally include backend notes if requested). Always preserve the original intent but add concrete details, constraints, and acceptance criteria. If enhanced settings are provided (colors, theme, etc.), weave them naturally into the prompt. Return ONLY the rewritten prompt, nothing else.`; | |
| const userParts: string[] = [ | |
| `Original prompt: ${prompt}` | |
| ]; | |
| if (enhancedSettings?.isActive) { | |
| userParts.push( | |
| `Enhanced settings: primary=${enhancedSettings?.primaryColor || 'auto'}, secondary=${enhancedSettings?.secondaryColor || 'auto'}, theme=${enhancedSettings?.theme || 'auto'}` | |
| ); | |
| } | |
| const selectedProvider = (provider || '').toLowerCase(); | |
| let text = ''; | |
| if (selectedProvider === 'google' || (model || '').toLowerCase().startsWith('gemini-')) { | |
| const apiKey = process.env.GEMINI_API_KEY; | |
| if (!apiKey) return NextResponse.json({ ok: false, message: 'Missing GEMINI_API_KEY' }, { status: 500 }); | |
| const ai = new GoogleGenAI({ apiKey }); | |
| const res = await ai.models.generateContent({ | |
| model: model || 'gemini-2.5-flash', | |
| contents: [ | |
| { role: 'user', parts: [{ text: system }] }, | |
| { role: 'user', parts: [{ text: userParts.join('\n') }] }, | |
| ], | |
| config: { maxOutputTokens: 1024 }, | |
| } as any); | |
| text = ((res as any)?.response?.text && (res as any).response.text()) | |
| || (res as any)?.text | |
| || ((res as any)?.candidates?.[0]?.content?.parts?.map((p: any) => p?.text || "").join("") || ""); | |
| if (!text || !String(text).trim()) { | |
| const res2 = await ai.models.generateContent({ | |
| model: model || 'gemini-2.5-flash', | |
| contents: [ { role: 'user', parts: [{ text: `${system}\n\n${userParts.join('\n')}` }] } ], | |
| config: { maxOutputTokens: 1024 }, | |
| } as any); | |
| text = ((res2 as any)?.response?.text && (res2 as any).response.text()) | |
| || (res2 as any)?.text | |
| || ((res2 as any)?.candidates?.[0]?.content?.parts?.map((p: any) => p?.text || "").join("") || ""); | |
| } | |
| } else { | |
| const token = process.env.HF_TOKEN || process.env.DEFAULT_HF_TOKEN; | |
| const client = new InferenceClient(token); | |
| const res = await client.chatCompletion({ | |
| model: model, | |
| provider: provider as any, | |
| messages: [ | |
| { role: 'system', content: system }, | |
| { role: 'user', content: userParts.join('\n') }, | |
| ], | |
| }); | |
| text = res.choices?.[0]?.message?.content || ''; | |
| } | |
| if (!text.trim()) { | |
| return NextResponse.json({ ok: false, message: 'No content returned' }, { status: 500 }); | |
| } | |
| return NextResponse.json({ ok: true, prompt: text }); | |
| } catch (e: any) { | |
| return NextResponse.json({ ok: false, message: e?.message || 'Internal error' }, { status: 500 }); | |
| } | |
| } | |