kalhdrawi commited on
Commit
9500400
·
1 Parent(s): b0e750c

Reupload OmniDev clean version

Browse files
app/api/augment/route.ts CHANGED
@@ -65,7 +65,20 @@ export async function POST(req: NextRequest) {
65
  ],
66
  config: { maxOutputTokens: 4096 },
67
  } as any);
68
- text = (res as any)?.candidates?.[0]?.content?.parts?.[0]?.text || "";
 
 
 
 
 
 
 
 
 
 
 
 
 
69
  } else {
70
  const token = process.env.HF_TOKEN || process.env.DEFAULT_HF_TOKEN;
71
  const client = new InferenceClient(token);
@@ -101,4 +114,3 @@ export async function POST(req: NextRequest) {
101
  return NextResponse.json({ ok: false, message: e?.message || "Internal error" } as AugmentResponse, { status: 500 });
102
  }
103
  }
104
-
 
65
  ],
66
  config: { maxOutputTokens: 4096 },
67
  } as any);
68
+ text = ((res as any)?.response?.text && (res as any).response.text())
69
+ || (res as any)?.text
70
+ || ((res as any)?.candidates?.[0]?.content?.parts?.map((p: any) => p?.text || "").join("") || "");
71
+ // Fallback: single user message with combined system + prompt
72
+ if (!text || !String(text).trim()) {
73
+ const res2 = await ai.models.generateContent({
74
+ model: model || "gemini-2.5-flash",
75
+ contents: [ { role: 'user', parts: [{ text: `${SYS}\n\n${userPrompt}` }] } ],
76
+ config: { maxOutputTokens: 4096 },
77
+ } as any);
78
+ text = ((res2 as any)?.response?.text && (res2 as any).response.text())
79
+ || (res2 as any)?.text
80
+ || ((res2 as any)?.candidates?.[0]?.content?.parts?.map((p: any) => p?.text || "").join("") || "");
81
+ }
82
  } else {
83
  const token = process.env.HF_TOKEN || process.env.DEFAULT_HF_TOKEN;
84
  const client = new InferenceClient(token);
 
114
  return NextResponse.json({ ok: false, message: e?.message || "Internal error" } as AugmentResponse, { status: 500 });
115
  }
116
  }
 
app/api/enhance/route.ts CHANGED
@@ -35,7 +35,19 @@ export async function POST(req: NextRequest) {
35
  ],
36
  config: { maxOutputTokens: 1024 },
37
  } as any);
38
- text = (res as any)?.candidates?.[0]?.content?.parts?.[0]?.text || '';
 
 
 
 
 
 
 
 
 
 
 
 
39
  } else {
40
  const token = process.env.HF_TOKEN || process.env.DEFAULT_HF_TOKEN;
41
  const client = new InferenceClient(token);
@@ -58,4 +70,3 @@ export async function POST(req: NextRequest) {
58
  return NextResponse.json({ ok: false, message: e?.message || 'Internal error' }, { status: 500 });
59
  }
60
  }
61
-
 
35
  ],
36
  config: { maxOutputTokens: 1024 },
37
  } as any);
38
+ text = ((res as any)?.response?.text && (res as any).response.text())
39
+ || (res as any)?.text
40
+ || ((res as any)?.candidates?.[0]?.content?.parts?.map((p: any) => p?.text || "").join("") || "");
41
+ if (!text || !String(text).trim()) {
42
+ const res2 = await ai.models.generateContent({
43
+ model: model || 'gemini-2.5-flash',
44
+ contents: [ { role: 'user', parts: [{ text: `${system}\n\n${userParts.join('\n')}` }] } ],
45
+ config: { maxOutputTokens: 1024 },
46
+ } as any);
47
+ text = ((res2 as any)?.response?.text && (res2 as any).response.text())
48
+ || (res2 as any)?.text
49
+ || ((res2 as any)?.candidates?.[0]?.content?.parts?.map((p: any) => p?.text || "").join("") || "");
50
+ }
51
  } else {
52
  const token = process.env.HF_TOKEN || process.env.DEFAULT_HF_TOKEN;
53
  const client = new InferenceClient(token);
 
70
  return NextResponse.json({ ok: false, message: e?.message || 'Internal error' }, { status: 500 });
71
  }
72
  }