File size: 2,759 Bytes
8af739b
d2a09b6
8af739b
 
 
d2a09b6
 
8af739b
d2a09b6
8af739b
d2a09b6
8af739b
 
 
 
d2a09b6
8af739b
d2a09b6
 
 
 
 
8af739b
d2a09b6
 
 
 
 
8af739b
d2a09b6
 
 
 
 
 
 
 
14ed427
 
d2a09b6
 
 
 
 
14ed427
 
d2a09b6
062c414
d2a09b6
 
14ed427
 
 
 
 
 
 
 
 
 
 
d2a09b6
 
 
 
 
 
 
 
 
 
 
8af739b
d2a09b6
8af739b
 
d2a09b6
8af739b
062c414
d2a09b6
 
8af739b
 
 
 
 
 
d2a09b6
8af739b
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
import { NextRequest, NextResponse } from 'next/server'
import { GoogleGenAI } from "@google/genai"

export async function POST(request: NextRequest) {
  try {
    const { message, history } = await request.json()
    const apiKey = process.env.GEMINI_API_KEY

    if (!apiKey) {
      return NextResponse.json(
        { error: 'Gemini API key not configured' },
        { status: 500 }
      )
    }

    const ai = new GoogleGenAI({ apiKey })

    // Convert history to Gemini format
    const contents = history?.map((msg: any) => ({
      role: msg.role === 'assistant' ? 'model' : 'user',
      parts: [{ text: msg.content }]
    })) || []

    // Add current message
    contents.push({
      role: 'user',
      parts: [{ text: message }]
    })

    const stream = new ReadableStream({
      async start(controller) {
        try {
          const result = await ai.models.generateContentStream({
            model: 'gemini-flash-latest',
            contents,
            config: {
              thinkingConfig: {
                includeThoughts: true,
                thinkingBudget: -1  // Dynamic thinking: model adjusts based on request complexity
              }
            }
          })

          for await (const chunk of result) {
            // Process each part to separate thoughts from actual response
            let text = ''
            let thought = ''

            if (chunk.candidates?.[0]?.content?.parts) {
              for (const part of chunk.candidates[0].content.parts) {
                if (!part.text) {
                  continue
                }
                // @ts-ignore - thought property exists on parts with thinking mode
                else if (part.thought) {
                  // When part.thought is true, part.text contains the thinking summary
                  thought += part.text
                }
                else {
                  // When part.thought is false/undefined, part.text contains the answer
                  text += part.text
                }
              }
            }

            const data = JSON.stringify({ text, thought })
            controller.enqueue(new TextEncoder().encode(`data: ${data}\n\n`))
          }
          controller.close()
        } catch (error) {
          console.error('Streaming error:', error)
          controller.error(error)
        }
      }
    })

    return new NextResponse(stream, {
      headers: {
        'Content-Type': 'text/event-stream; charset=utf-8',
        'Cache-Control': 'no-cache',
        'Connection': 'keep-alive',
      },
    })

  } catch (error) {
    console.error('Error in Gemini chat API:', error)
    return NextResponse.json(
      { error: 'Failed to process request' },
      { status: 500 }
    )
  }
}