devme commited on
Commit
4c71936
·
verified ·
1 Parent(s): fc2a0d0

Upload 12 files

Browse files
Dockerfile CHANGED
@@ -1,16 +1,21 @@
1
- FROM alpine:latest
2
-
3
- # 安装必要的运行时依赖(如果需要)
4
- RUN apk add --no-cache ca-certificates
5
-
6
- WORKDIR /app
7
-
8
- # 复制可执行文件并设置权限
9
- COPY main ./main
10
- RUN chmod +x main
11
-
12
- # 创建非 root 用户(安全最佳实践)
13
- RUN adduser -D -s /bin/sh main
14
- USER main
15
-
16
- CMD ["./main"]
 
 
 
 
 
 
1
+ FROM node:lts-alpine
2
+
3
+ # 全局安装PM2
4
+ RUN npm install -g pm2
5
+
6
+ WORKDIR /app
7
+
8
+ # 复制package文件
9
+ COPY package*.json ./
10
+
11
+ # 安装依赖
12
+ RUN npm install
13
+
14
+ # 复制应用代码
15
+ COPY . .
16
+
17
+ # 设置权限
18
+ RUN chmod 777 /app
19
+
20
+ EXPOSE 3000
21
+ CMD ["npm", "start"]
config.js ADDED
@@ -0,0 +1,104 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // 配置常量
2
+ const CONFIG = {
3
+ port:process.env.PORT || 3000,
4
+
5
+ endpoint: [
6
+ {
7
+ name: 'openai',
8
+ base_url: 'https://app.factory.ai/api/llm/o/v1/responses'
9
+ },
10
+ {
11
+ name: 'anthropic',
12
+ base_url: 'https://app.factory.ai/api/llm/a/v1/messages'
13
+ },
14
+ {
15
+ name: 'common',
16
+ base_url: 'https://app.factory.ai/api/llm/o/v1/chat/completions'
17
+ }
18
+ ],
19
+
20
+ models: [
21
+ {
22
+ id: 'claude-opus-4-1-20250805',
23
+ type: 'anthropic',
24
+ reasoning: 'auto'
25
+ },
26
+ {
27
+ id: 'claude-haiku-4-5-20251001',
28
+ type: 'anthropic',
29
+ reasoning: 'auto'
30
+ },
31
+ {
32
+ id: 'claude-sonnet-4-5-20250929',
33
+ type: 'anthropic',
34
+ reasoning: 'auto'
35
+ },
36
+ {
37
+ id: 'gpt-5-2025-08-07',
38
+ type: 'openai',
39
+ reasoning: 'auto'
40
+ },
41
+ {
42
+ id: 'gpt-5-codex',
43
+ type: 'openai',
44
+ reasoning: 'off'
45
+ },
46
+ {
47
+ id: 'glm-4.6',
48
+ type: 'common'
49
+ }
50
+ ],
51
+
52
+ dev_mode: process.env.DEV_MODE=='true' || false,
53
+ user_agent: 'factory-cli/0.22.14',
54
+ system_prompt: 'You are Droid, an AI software engineering agent built by Factory.\n\n'
55
+ }
56
+
57
+ export function loadConfig() {
58
+ // 配置已经在代码中定义,无需加载
59
+ return CONFIG
60
+ }
61
+
62
+ export function getConfig() {
63
+ return CONFIG
64
+ }
65
+
66
+ export function getModelById(modelId) {
67
+ return CONFIG.models.find(m => m.id === modelId)
68
+ }
69
+
70
+ export function getEndpointByType(type) {
71
+ return CONFIG.endpoint.find(e => e.name === type)
72
+ }
73
+
74
+ export function isDevMode() {
75
+ return CONFIG.dev_mode === true
76
+ }
77
+
78
+ export function getPort() {
79
+ return parseInt(process.env.PORT) || CONFIG.port
80
+ }
81
+
82
+ export function getSystemPrompt() {
83
+ return CONFIG.system_prompt || ''
84
+ }
85
+
86
+ export function getModelReasoning(modelId) {
87
+ const model = getModelById(modelId)
88
+ if (!model || !model.reasoning) {
89
+ return null
90
+ }
91
+ const reasoningLevel = model.reasoning.toLowerCase()
92
+ if (['low', 'medium', 'high', 'auto'].includes(reasoningLevel)) {
93
+ return reasoningLevel
94
+ }
95
+ return null
96
+ }
97
+
98
+ export function getUserAgent() {
99
+ return CONFIG.user_agent
100
+ }
101
+
102
+ export function getProxyUrl() {
103
+ return process.env.PROXY_URL || null
104
+ }
logger.js ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { isDevMode } from './config.js'
2
+
3
+ export function logInfo(message, data = null) {
4
+ console.log(`[INFO] ${message}`)
5
+ if (data && isDevMode()) {
6
+ console.log(JSON.stringify(data, null, 2))
7
+ }
8
+ }
9
+
10
+ export function logDebug(message, data = null) {
11
+ if (isDevMode()) {
12
+ console.log(`[DEBUG] ${message}`)
13
+ if (data) {
14
+ console.log(JSON.stringify(data, null, 2))
15
+ }
16
+ }
17
+ }
18
+
19
+ export function logError(message, error = null) {
20
+ console.error(`[ERROR] ${message}`)
21
+ if (error && isDevMode()) {
22
+ console.error(error)
23
+ }
24
+ }
25
+
26
+ export function logRequest(method, url, headers = null, body = null) {
27
+ if (isDevMode()) {
28
+ console.log(`[REQUEST] ${method} ${url}`)
29
+ if (headers) console.log('[HEADERS]', JSON.stringify(headers, null, 2))
30
+ if (body) console.log('[BODY]', JSON.stringify(body, null, 2))
31
+ }
32
+ }
33
+
34
+ export function logResponse(status, headers = null, body = null) {
35
+ if (isDevMode()) {
36
+ console.log(`[RESPONSE] ${status}`)
37
+ if (headers) console.log('[HEADERS]', JSON.stringify(headers, null, 2))
38
+ if (body) console.log('[BODY]', JSON.stringify(body, null, 2))
39
+ }
40
+ }
package.json ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "name": "droid2api",
3
+ "version": "1.3.5",
4
+ "description": "OpenAI Compatible API Proxy",
5
+ "main": "server.js",
6
+ "type": "module",
7
+ "scripts": {
8
+ "start": "node server.js",
9
+ "dev": "node server.js"
10
+ },
11
+ "keywords": ["openai", "api", "proxy"],
12
+ "author": "",
13
+ "license": "MIT",
14
+ "dependencies": {
15
+ "express": "^4.18.2",
16
+ "https-proxy-agent": "^7.0.2",
17
+ "node-fetch": "^3.3.2"
18
+ }
19
+ }
proxy-manager.js ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { HttpsProxyAgent } from 'https-proxy-agent'
2
+ import { getProxyUrl } from './config.js'
3
+ import { logInfo, logError } from './logger.js'
4
+
5
+ let cachedAgent = null
6
+ let cachedProxyUrl = null
7
+
8
+ export function getNextProxyAgent(targetUrl) {
9
+ const proxyUrl = getProxyUrl()
10
+
11
+ // 如果没有配置代理,返回 null(直连)
12
+ if (!proxyUrl) {
13
+ return null
14
+ }
15
+
16
+ // 如果代理 URL 改变,清除缓存
17
+ if (proxyUrl !== cachedProxyUrl) {
18
+ cachedAgent = null
19
+ cachedProxyUrl = proxyUrl
20
+ }
21
+
22
+ // 如果已有缓存的代理,直接返回
23
+ if (cachedAgent) {
24
+ return { agent: cachedAgent, proxy: { url: proxyUrl } }
25
+ }
26
+
27
+ // 创建新的代理
28
+ try {
29
+ cachedAgent = new HttpsProxyAgent(proxyUrl)
30
+ logInfo(`使用代理 ${proxyUrl} 请求 ${targetUrl}`)
31
+ return { agent: cachedAgent, proxy: { url: proxyUrl } }
32
+ } catch (error) {
33
+ logError(`为 ${proxyUrl} 创建代理失败`, error)
34
+ return null
35
+ }
36
+ }
37
+
routes.js ADDED
@@ -0,0 +1,608 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import express from 'express'
2
+ import fetch from 'node-fetch'
3
+ import { getConfig, getModelById, getEndpointByType, getSystemPrompt, getModelReasoning } from './config.js'
4
+ import { logError, logRequest, logResponse } from './logger.js'
5
+ import { transformToAnthropic, getAnthropicHeaders } from './transformers/request-anthropic.js'
6
+ import { transformToOpenAI, getOpenAIHeaders } from './transformers/request-openai.js'
7
+ import { transformToCommon, getCommonHeaders } from './transformers/request-common.js'
8
+ import { AnthropicResponseTransformer } from './transformers/response-anthropic.js'
9
+ import { OpenAIResponseTransformer } from './transformers/response-openai.js'
10
+ import { getNextProxyAgent } from './proxy-manager.js'
11
+
12
+ /**
13
+ * 从请求头获取认证信息
14
+ */
15
+ function getAuthHeader(req) {
16
+ return req.headers.authorization || req.headers.Authorization || req.headers['x-api-key']
17
+ }
18
+
19
+ const router = express.Router();
20
+
21
+ /**
22
+ * 将 /v1/responses API 结果转换为 /v1/chat/completions 兼容格式
23
+ * 适用于非流式响应
24
+ */
25
+ function convertResponseToChatCompletion(resp) {
26
+ if (!resp || typeof resp !== 'object') {
27
+ throw new Error('Invalid response object');
28
+ }
29
+
30
+ const outputMsg = (resp.output || []).find(o => o.type === 'message');
31
+ const textBlocks = outputMsg?.content?.filter(c => c.type === 'output_text') || [];
32
+ const content = textBlocks.map(c => c.text).join('');
33
+
34
+ const chatCompletion = {
35
+ id: resp.id ? resp.id.replace(/^resp_/, 'chatcmpl-') : `chatcmpl-${Date.now()}`,
36
+ object: 'chat.completion',
37
+ created: resp.created_at || Math.floor(Date.now() / 1000),
38
+ model: resp.model || 'unknown-model',
39
+ choices: [
40
+ {
41
+ index: 0,
42
+ message: {
43
+ role: outputMsg?.role || 'assistant',
44
+ content: content || ''
45
+ },
46
+ finish_reason: resp.status === 'completed' ? 'stop' : 'unknown'
47
+ }
48
+ ],
49
+ usage: {
50
+ prompt_tokens: resp.usage?.input_tokens ?? 0,
51
+ completion_tokens: resp.usage?.output_tokens ?? 0,
52
+ total_tokens: resp.usage?.total_tokens ?? 0
53
+ }
54
+ };
55
+
56
+ return chatCompletion;
57
+ }
58
+
59
+ router.get('/v1/models', (req, res) => {
60
+ try {
61
+ const config = getConfig()
62
+ const models = config.models.map(model => ({
63
+ id: model.id,
64
+ object: 'model',
65
+ created: Date.now(),
66
+ owned_by: model.type,
67
+ permission: [],
68
+ root: model.id,
69
+ parent: null
70
+ }))
71
+
72
+ res.json({
73
+ object: 'list',
74
+ data: models
75
+ })
76
+ } catch (error) {
77
+ logError('GET /v1/models', error)
78
+ res.status(500).json({ error: 'Internal server error' })
79
+ }
80
+ })
81
+
82
+ // 标准 OpenAI 聊天补全处理函数(带格式转换)
83
+ async function handleChatCompletions(req, res) {
84
+ try {
85
+ const openaiRequest = req.body
86
+ const modelId = openaiRequest.model
87
+
88
+ if (!modelId) {
89
+ return res.status(400).json({ error: '需要提供 model 参数' })
90
+ }
91
+
92
+ const model = getModelById(modelId)
93
+ if (!model) {
94
+ return res.status(404).json({ error: `未找到模型 ${modelId}` })
95
+ }
96
+
97
+ const endpoint = getEndpointByType(model.type)
98
+ if (!endpoint) {
99
+ return res.status(500).json({ error: `未找到端点类型 ${model.type}` })
100
+ }
101
+
102
+ // 获取认证信息
103
+ const authHeader = getAuthHeader(req)
104
+ if (!authHeader) {
105
+ return res.status(401).json({
106
+ error: '未提供认证信息',
107
+ message: '请在请求头中提供 Authorization 或 x-api-key'
108
+ })
109
+ }
110
+
111
+ let transformedRequest
112
+ let headers
113
+ const clientHeaders = req.headers
114
+
115
+ // 转换请求格式
116
+ if (model.type === 'anthropic') {
117
+ transformedRequest = transformToAnthropic(openaiRequest)
118
+ const isStreaming = openaiRequest.stream === true
119
+ headers = getAnthropicHeaders(authHeader, clientHeaders, isStreaming, modelId)
120
+ } else if (model.type === 'openai') {
121
+ transformedRequest = transformToOpenAI(openaiRequest)
122
+ headers = getOpenAIHeaders(authHeader, clientHeaders)
123
+ } else if (model.type === 'common') {
124
+ transformedRequest = transformToCommon(openaiRequest)
125
+ headers = getCommonHeaders(authHeader, clientHeaders)
126
+ } else {
127
+ return res.status(500).json({ error: `未知的端点类型: ${model.type}` })
128
+ }
129
+
130
+ logRequest('POST', endpoint.base_url, headers, transformedRequest)
131
+
132
+ const proxyAgentInfo = getNextProxyAgent(endpoint.base_url)
133
+ const fetchOptions = {
134
+ method: 'POST',
135
+ headers,
136
+ body: JSON.stringify(transformedRequest)
137
+ }
138
+
139
+ if (proxyAgentInfo?.agent) {
140
+ fetchOptions.agent = proxyAgentInfo.agent
141
+ }
142
+
143
+ const response = await fetch(endpoint.base_url, fetchOptions)
144
+
145
+ if (!response.ok) {
146
+ const errorText = await response.text()
147
+ logError(`端点错误: ${response.status}`, new Error(errorText))
148
+ return res.status(response.status).json({
149
+ error: `端点返回 ${response.status}`,
150
+ details: errorText
151
+ })
152
+ }
153
+
154
+ const isStreaming = transformedRequest.stream === true
155
+
156
+ if (isStreaming) {
157
+ res.setHeader('Content-Type', 'text/event-stream')
158
+ res.setHeader('Cache-Control', 'no-cache')
159
+ res.setHeader('Connection', 'keep-alive')
160
+
161
+ // common 类型直接转发,不使用 transformer
162
+ if (model.type === 'common') {
163
+ try {
164
+ for await (const chunk of response.body) {
165
+ res.write(chunk)
166
+ }
167
+ res.end()
168
+ } catch (streamError) {
169
+ logError('流错误', streamError)
170
+ res.end()
171
+ }
172
+ } else {
173
+ // anthropic 和 openai 类型使用 transformer
174
+ let transformer
175
+ if (model.type === 'anthropic') {
176
+ transformer = new AnthropicResponseTransformer(modelId, `chatcmpl-${Date.now()}`)
177
+ } else if (model.type === 'openai') {
178
+ transformer = new OpenAIResponseTransformer(modelId, `chatcmpl-${Date.now()}`)
179
+ }
180
+
181
+ try {
182
+ for await (const chunk of transformer.transformStream(response.body)) {
183
+ res.write(chunk)
184
+ }
185
+ res.end()
186
+ } catch (streamError) {
187
+ logError('流错误', streamError)
188
+ res.end()
189
+ }
190
+ }
191
+ } else {
192
+ const data = await response.json()
193
+ if (model.type === 'openai') {
194
+ try {
195
+ const converted = convertResponseToChatCompletion(data)
196
+ logResponse(200, null, converted)
197
+ res.json(converted)
198
+ } catch (e) {
199
+ logResponse(200, null, data)
200
+ res.json(data)
201
+ }
202
+ } else {
203
+ logResponse(200, null, data)
204
+ res.json(data)
205
+ }
206
+ }
207
+
208
+ } catch (error) {
209
+ logError('/v1/chat/completions', error)
210
+ res.status(500).json({
211
+ error: '内部服务器错误',
212
+ message: error.message
213
+ })
214
+ }
215
+ }
216
+
217
+ // 直接转发 OpenAI 请求(不做格式转换)
218
+ async function handleDirectResponses(req, res) {
219
+ try {
220
+ const openaiRequest = req.body
221
+ const modelId = openaiRequest.model
222
+
223
+ if (!modelId) {
224
+ return res.status(400).json({ error: '需要提供 model 参数' })
225
+ }
226
+
227
+ const model = getModelById(modelId)
228
+ if (!model) {
229
+ return res.status(404).json({ error: `未找到模型 ${modelId}` })
230
+ }
231
+
232
+ // 只允许 openai 类型端点
233
+ if (model.type !== 'openai') {
234
+ return res.status(400).json({
235
+ error: '无效的端点类型',
236
+ message: `/v1/responses 接口只支持 openai 类型端点,当前模型 ${modelId} 是 ${model.type} 类型`
237
+ })
238
+ }
239
+
240
+ const endpoint = getEndpointByType(model.type)
241
+ if (!endpoint) {
242
+ return res.status(500).json({ error: `未找到端点类型 ${model.type}` })
243
+ }
244
+
245
+ // 获取认证信息
246
+ const authHeader = getAuthHeader(req);
247
+ if (!authHeader) {
248
+ return res.status(401).json({
249
+ error: '未提供认证信息',
250
+ message: '请在请求头中提供 Authorization 或 x-api-key'
251
+ });
252
+ }
253
+
254
+ // 如果是 x-api-key,转换为 Bearer 格式
255
+ const finalAuthHeader = authHeader.startsWith('Bearer ')
256
+ ? authHeader
257
+ : `Bearer ${authHeader}`;
258
+
259
+ const clientHeaders = req.headers;
260
+
261
+ // 获取 headers
262
+ const headers = getOpenAIHeaders(finalAuthHeader, clientHeaders);
263
+
264
+ // 注入系统提示到 instructions 字段
265
+ const systemPrompt = getSystemPrompt();
266
+ const modifiedRequest = { ...openaiRequest };
267
+ if (systemPrompt) {
268
+ // 如果已有 instructions,则在前面添加系统提示
269
+ if (modifiedRequest.instructions) {
270
+ modifiedRequest.instructions = systemPrompt + modifiedRequest.instructions;
271
+ } else {
272
+ // 否则直接设置系统提示
273
+ modifiedRequest.instructions = systemPrompt;
274
+ }
275
+ }
276
+
277
+ // 处理reasoning字段
278
+ const reasoningLevel = getModelReasoning(modelId);
279
+ if (reasoningLevel === 'auto') {
280
+ // Auto模式:保持原始请求的reasoning字段不变
281
+ // 如果原始请求有reasoning字段就保留,没有就不添加
282
+ } else if (reasoningLevel && ['low', 'medium', 'high'].includes(reasoningLevel)) {
283
+ modifiedRequest.reasoning = {
284
+ effort: reasoningLevel,
285
+ summary: 'auto'
286
+ };
287
+ } else {
288
+ // 如果配置是off或无效,移除reasoning字段
289
+ delete modifiedRequest.reasoning;
290
+ }
291
+
292
+ logRequest('POST', endpoint.base_url, headers, modifiedRequest)
293
+
294
+ const proxyAgentInfo = getNextProxyAgent(endpoint.base_url)
295
+ const fetchOptions = {
296
+ method: 'POST',
297
+ headers,
298
+ body: JSON.stringify(modifiedRequest)
299
+ }
300
+
301
+ if (proxyAgentInfo?.agent) {
302
+ fetchOptions.agent = proxyAgentInfo.agent
303
+ }
304
+
305
+ const response = await fetch(endpoint.base_url, fetchOptions)
306
+
307
+ if (!response.ok) {
308
+ const errorText = await response.text()
309
+ logError(`端点错误: ${response.status}`, new Error(errorText))
310
+ return res.status(response.status).json({
311
+ error: `端点返回 ${response.status}`,
312
+ details: errorText
313
+ })
314
+ }
315
+
316
+ const isStreaming = openaiRequest.stream === true
317
+
318
+ if (isStreaming) {
319
+ res.setHeader('Content-Type', 'text/event-stream')
320
+ res.setHeader('Cache-Control', 'no-cache')
321
+ res.setHeader('Connection', 'keep-alive')
322
+
323
+ try {
324
+ for await (const chunk of response.body) {
325
+ res.write(chunk)
326
+ }
327
+ res.end()
328
+ } catch (streamError) {
329
+ logError('流错误', streamError)
330
+ res.end()
331
+ }
332
+ } else {
333
+ const data = await response.json()
334
+ logResponse(200, null, data)
335
+ res.json(data)
336
+ }
337
+
338
+ } catch (error) {
339
+ logError('/v1/responses', error)
340
+ res.status(500).json({
341
+ error: '内部服务器错误',
342
+ message: error.message
343
+ })
344
+ }
345
+ }
346
+
347
+ // 直接转发 Anthropic 请求(不做格式转换)
348
+ async function handleDirectMessages(req, res) {
349
+ logInfo('POST /v1/messages');
350
+
351
+ try {
352
+ const anthropicRequest = req.body;
353
+ const modelId = anthropicRequest.model;
354
+
355
+ if (!modelId) {
356
+ return res.status(400).json({ error: '需要提供 model 参数' });
357
+ }
358
+
359
+ const model = getModelById(modelId);
360
+ if (!model) {
361
+ return res.status(404).json({ error: `未找到模型 ${modelId}` });
362
+ }
363
+
364
+ // 只允许 anthropic 类型端点
365
+ if (model.type !== 'anthropic') {
366
+ return res.status(400).json({
367
+ error: '无效的端点类型',
368
+ message: `/v1/messages 接口只支持 anthropic 类型端点,当前模型 ${modelId} 是 ${model.type} 类型`
369
+ });
370
+ }
371
+
372
+ const endpoint = getEndpointByType(model.type);
373
+ if (!endpoint) {
374
+ return res.status(500).json({ error: `未找到端点类型 ${model.type}` });
375
+ }
376
+
377
+ logInfo(`直接转发到 ${model.type} 端点: ${endpoint.base_url}`);
378
+
379
+ // 获取认证信息
380
+ const authHeader = getAuthHeader(req);
381
+ if (!authHeader) {
382
+ return res.status(401).json({
383
+ error: '未提供认证信息',
384
+ message: '请在请求头中提供 Authorization 或 x-api-key'
385
+ });
386
+ }
387
+
388
+ // 如果是 x-api-key,转换为 Bearer 格式
389
+ const finalAuthHeader = authHeader.startsWith('Bearer ')
390
+ ? authHeader
391
+ : `Bearer ${authHeader}`;
392
+
393
+ const clientHeaders = req.headers;
394
+
395
+ // 获取 headers
396
+ const isStreaming = anthropicRequest.stream === true;
397
+ const headers = getAnthropicHeaders(finalAuthHeader, clientHeaders, isStreaming, modelId);
398
+
399
+ // 注入系统提示到 system 字段
400
+ const systemPrompt = getSystemPrompt();
401
+ const modifiedRequest = { ...anthropicRequest };
402
+
403
+ // 清理cc中的 "You are Claude Code, Anthropic's official CLI for Claude."
404
+ if (modifiedRequest.system && Array.isArray(modifiedRequest.system)) {
405
+ for (const msg of modifiedRequest.system) {
406
+ if (msg.type === 'text') {
407
+ msg.text = msg.text.replace("You are Claude Code, Anthropic's official CLI for Claude.", 'you are bot.');
408
+ }
409
+ }
410
+ }
411
+
412
+ if (systemPrompt) {
413
+ if (modifiedRequest.system && Array.isArray(modifiedRequest.system)) {
414
+ // 如果已有 system 数组,则在最前面插入系统提示
415
+ modifiedRequest.system = [
416
+ { type: 'text', text: systemPrompt },
417
+ ...modifiedRequest.system
418
+ ];
419
+ } else {
420
+ // 否则创建新的 system 数组
421
+ modifiedRequest.system = [
422
+ { type: 'text', text: systemPrompt }
423
+ ];
424
+ }
425
+ }
426
+
427
+ // 处理thinking字段
428
+ const reasoningLevel = getModelReasoning(modelId);
429
+ if (reasoningLevel === 'auto') {
430
+ // Auto模式:保持原始请求的thinking字段不变
431
+ // 如果原始请求有thinking字段就保留,没有就不添加
432
+ } else if (reasoningLevel && ['low', 'medium', 'high'].includes(reasoningLevel)) {
433
+ const budgetTokens = {
434
+ 'low': 4096,
435
+ 'medium': 12288,
436
+ 'high': 24576
437
+ };
438
+
439
+ modifiedRequest.thinking = {
440
+ type: 'enabled',
441
+ budget_tokens: budgetTokens[reasoningLevel]
442
+ };
443
+ } else {
444
+ // 如果配置是off或无效,移除thinking字段
445
+ delete modifiedRequest.thinking;
446
+ }
447
+
448
+ logRequest('POST', endpoint.base_url, headers, modifiedRequest);
449
+ // console.log(modifiedRequest);
450
+
451
+ // 转发修改后的请求
452
+ const proxyAgentInfo = getNextProxyAgent(endpoint.base_url);
453
+ const fetchOptions = {
454
+ method: 'POST',
455
+ headers,
456
+ body: JSON.stringify(modifiedRequest)
457
+ };
458
+
459
+ if (proxyAgentInfo?.agent) {
460
+ fetchOptions.agent = proxyAgentInfo.agent;
461
+ }
462
+
463
+ const response = await fetch(endpoint.base_url, fetchOptions);
464
+
465
+ logInfo(`响应状态: ${response.status}`);
466
+
467
+ if (!response.ok) {
468
+ const errorText = await response.text();
469
+ logError(`端点错误: ${response.status}`, new Error(errorText));
470
+ return res.status(response.status).json({
471
+ error: `端点返回 ${response.status}`,
472
+ details: errorText
473
+ });
474
+ }
475
+
476
+ if (isStreaming) {
477
+ // 直接转发流式响应,不做任何转换
478
+ res.setHeader('Content-Type', 'text/event-stream');
479
+ res.setHeader('Cache-Control', 'no-cache');
480
+ res.setHeader('Connection', 'keep-alive');
481
+
482
+ try {
483
+ // 直接将原始响应流转发给客户端
484
+ for await (const chunk of response.body) {
485
+ res.write(chunk);
486
+ }
487
+ res.end();
488
+ logInfo('流转发成功');
489
+ } catch (streamError) {
490
+ logError('流错误', streamError);
491
+ res.end();
492
+ }
493
+ } else {
494
+ // 直接转发非流式响应,不做任何转换
495
+ const data = await response.json();
496
+ logResponse(200, null, data);
497
+ res.json(data);
498
+ }
499
+
500
+ } catch (error) {
501
+ logError('/v1/messages 错误', error);
502
+ res.status(500).json({
503
+ error: '内部服务器错误',
504
+ message: error.message
505
+ });
506
+ }
507
+ }
508
+
509
+ // 处理 Anthropic count_tokens 请求
510
+ async function handleCountTokens(req, res) {
511
+ logInfo('POST /v1/messages/count_tokens');
512
+
513
+ try {
514
+ const anthropicRequest = req.body;
515
+ const modelId = anthropicRequest.model;
516
+
517
+ if (!modelId) {
518
+ return res.status(400).json({ error: '需要提供 model 参数' });
519
+ }
520
+
521
+ const model = getModelById(modelId);
522
+ if (!model) {
523
+ return res.status(404).json({ error: `未找到模型 ${modelId}` });
524
+ }
525
+
526
+ // 只允许 anthropic 类型端点
527
+ if (model.type !== 'anthropic') {
528
+ return res.status(400).json({
529
+ error: '无效的端点类型',
530
+ message: `/v1/messages/count_tokens 接口只支持 anthropic 类型端点,当前模型 ${modelId} 是 ${model.type} 类型`
531
+ });
532
+ }
533
+
534
+ const endpoint = getEndpointByType('anthropic');
535
+ if (!endpoint) {
536
+ return res.status(500).json({ error: '未找到端点类型 anthropic' });
537
+ }
538
+
539
+ // 获取认证信息
540
+ const authHeader = getAuthHeader(req);
541
+ if (!authHeader) {
542
+ return res.status(401).json({
543
+ error: '未提供认证信息',
544
+ message: '请在请求头中提供 Authorization 或 x-api-key'
545
+ });
546
+ }
547
+
548
+ // 如果是 x-api-key,转换为 Bearer 格式
549
+ const finalAuthHeader = authHeader.startsWith('Bearer ')
550
+ ? authHeader
551
+ : `Bearer ${authHeader}`;
552
+
553
+ const clientHeaders = req.headers;
554
+ const headers = getAnthropicHeaders(finalAuthHeader, clientHeaders, false, modelId);
555
+
556
+ // 构建 count_tokens 端点 URL
557
+ const countTokensUrl = endpoint.base_url.replace('/v1/messages', '/v1/messages/count_tokens');
558
+
559
+ // 使用原始请求体
560
+ const modifiedRequest = { ...anthropicRequest };
561
+
562
+ logInfo(`转发到 count_tokens 端点: ${countTokensUrl}`);
563
+ logRequest('POST', countTokensUrl, headers, modifiedRequest);
564
+
565
+ const proxyAgentInfo = getNextProxyAgent(countTokensUrl);
566
+ const fetchOptions = {
567
+ method: 'POST',
568
+ headers,
569
+ body: JSON.stringify(modifiedRequest)
570
+ };
571
+
572
+ if (proxyAgentInfo?.agent) {
573
+ fetchOptions.agent = proxyAgentInfo.agent;
574
+ }
575
+
576
+ const response = await fetch(countTokensUrl, fetchOptions);
577
+
578
+ logInfo(`响应状态: ${response.status}`);
579
+
580
+ if (!response.ok) {
581
+ const errorText = await response.text();
582
+ logError(`计数令牌错误: ${response.status}`, new Error(errorText));
583
+ return res.status(response.status).json({
584
+ error: `端点返回 ${response.status}`,
585
+ details: errorText
586
+ });
587
+ }
588
+
589
+ const data = await response.json();
590
+ logResponse(200, null, data);
591
+ res.json(data);
592
+
593
+ } catch (error) {
594
+ logError('/v1/messages/count_tokens 错误', error);
595
+ res.status(500).json({
596
+ error: '内部服务器错误',
597
+ message: error.message
598
+ });
599
+ }
600
+ }
601
+
602
+ // 注册路由
603
+ router.post('/v1/chat/completions', handleChatCompletions);
604
+ router.post('/v1/responses', handleDirectResponses);
605
+ router.post('/v1/messages', handleDirectMessages);
606
+ router.post('/v1/messages/count_tokens', handleCountTokens);
607
+
608
+ export default router;
server.js ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import express from 'express'
2
+ import { loadConfig, isDevMode, getPort } from './config.js'
3
+ import { logError } from './logger.js'
4
+ import router from './routes.js'
5
+
6
+ const app = express();
7
+
8
+ app.use(express.json({ limit: '50mb' }));
9
+ app.use(express.urlencoded({ extended: true, limit: '50mb' }));
10
+
11
+ app.use((req, res, next) => {
12
+ res.header('Access-Control-Allow-Origin', '*');
13
+ res.header('Access-Control-Allow-Methods', 'GET, POST, PUT, DELETE, OPTIONS');
14
+ res.header('Access-Control-Allow-Headers', 'Content-Type, Authorization, X-API-Key, anthropic-version');
15
+
16
+ if (req.method === 'OPTIONS') {
17
+ return res.sendStatus(200);
18
+ }
19
+ next();
20
+ });
21
+
22
+ app.use(router);
23
+
24
+ app.get('/', (req, res) => {
25
+ res.redirect('https://www.bilibili.com/video/BV1SMH5zfEwe/?spm_id_from=333.1007.tianma.1-1-1.click&vd_source=1f3b8eb28230105c578a443fa6481550')
26
+ })
27
+
28
+
29
+ // 错误处理中间件
30
+ app.use((err, req, res, next) => {
31
+ logError('未处理的错误', err);
32
+ res.status(500).json({
33
+ error: '内部服务器错误',
34
+ message: isDevMode() ? err.message : undefined
35
+ });
36
+ });
37
+
38
+ (async () => {
39
+ try {
40
+ loadConfig()
41
+ const PORT = getPort()
42
+
43
+ const server = app.listen(PORT)
44
+ .on('listening', () => {
45
+ console.log(`服务器运行在 http://localhost:${PORT}`)
46
+ })
47
+ .on('error', (err) => {
48
+ if (err.code === 'EADDRINUSE') {
49
+ console.error(`\n${'='.repeat(80)}`);
50
+ console.error(`错误: 端口 ${PORT} 已被占用!`);
51
+ console.error('');
52
+ console.error('请选择以下选项之一:');
53
+ console.error(` 1. 停止使用端口 ${PORT} 的进程:`);
54
+ console.error(` lsof -ti:${PORT} | xargs kill`);
55
+ console.error('');
56
+ console.error(' 2. 使用环境变量更改端口:');
57
+ console.error(' export PORT=8080');
58
+ console.error(`${'='.repeat(80)}\n`);
59
+ process.exit(1);
60
+ } else {
61
+ logError('启动服务器失败', err);
62
+ process.exit(1);
63
+ }
64
+ });
65
+ } catch (error) {
66
+ logError('启动服务器失败', error);
67
+ process.exit(1);
68
+ }
69
+ })();
transformers/request-anthropic.js ADDED
@@ -0,0 +1,242 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { logDebug } from '../logger.js';
2
+ import { getSystemPrompt, getModelReasoning, getUserAgent } from '../config.js';
3
+
4
+ export function transformToAnthropic(openaiRequest) {
5
+ logDebug('将 OpenAI 请求转换为 Anthropic 格式');
6
+
7
+ const anthropicRequest = {
8
+ model: openaiRequest.model,
9
+ messages: []
10
+ };
11
+
12
+ // 仅在客户端明确提供时添加 stream 参数
13
+ if (openaiRequest.stream !== undefined) {
14
+ anthropicRequest.stream = openaiRequest.stream;
15
+ }
16
+
17
+ // 处理 max_tokens
18
+ if (openaiRequest.max_tokens) {
19
+ anthropicRequest.max_tokens = openaiRequest.max_tokens;
20
+ } else if (openaiRequest.max_completion_tokens) {
21
+ anthropicRequest.max_tokens = openaiRequest.max_completion_tokens;
22
+ } else {
23
+ anthropicRequest.max_tokens = 4096;
24
+ }
25
+
26
+ // 提取系统消息并转换其他消息
27
+ let systemContent = [];
28
+
29
+ if (openaiRequest.messages && Array.isArray(openaiRequest.messages)) {
30
+ for (const msg of openaiRequest.messages) {
31
+ // 单独处理系统消息
32
+ if (msg.role === 'system') {
33
+ if (typeof msg.content === 'string') {
34
+ systemContent.push({
35
+ type: 'text',
36
+ text: msg.content?.replace("You are Claude Code, Anthropic's official CLI for Claude.", 'you are bot.')
37
+ });
38
+ } else if (Array.isArray(msg.content)) {
39
+ for (const part of msg.content) {
40
+ if (part.type === 'text') {
41
+ systemContent.push({
42
+ type: 'text',
43
+ text: part.text?.replace("You are Claude Code, Anthropic's official CLI for Claude.", 'you are bot.')
44
+ });
45
+ } else {
46
+ systemContent.push(part);
47
+ }
48
+ }
49
+ }
50
+ continue; // 跳过将系统消息添加到消息数组
51
+ }
52
+
53
+ const anthropicMsg = {
54
+ role: msg.role,
55
+ content: []
56
+ };
57
+
58
+ if (typeof msg.content === 'string') {
59
+ anthropicMsg.content.push({
60
+ type: 'text',
61
+ text: msg.content
62
+ });
63
+ } else if (Array.isArray(msg.content)) {
64
+ for (const part of msg.content) {
65
+ if (part.type === 'text') {
66
+ anthropicMsg.content.push({
67
+ type: 'text',
68
+ text: part.text
69
+ });
70
+ } else if (part.type === 'image_url') {
71
+ anthropicMsg.content.push({
72
+ type: 'image',
73
+ source: part.image_url
74
+ });
75
+ } else {
76
+ anthropicMsg.content.push(part);
77
+ }
78
+ }
79
+ }
80
+
81
+ anthropicRequest.messages.push(anthropicMsg);
82
+ }
83
+ }
84
+
85
+ // 添加系统参数,并在前面加上系统提示
86
+ const systemPrompt = getSystemPrompt();
87
+ if (systemPrompt || systemContent.length > 0) {
88
+ anthropicRequest.system = [];
89
+ // 如果存在系统提示,则将其作为第一个元素添加
90
+ if (systemPrompt) {
91
+ anthropicRequest.system.push({
92
+ type: 'text',
93
+ text: systemPrompt
94
+ });
95
+ }
96
+ // 添加用户提供的系统内容
97
+ anthropicRequest.system.push(...systemContent);
98
+ }
99
+
100
+ // 如果存在工具,则进行转换
101
+ if (openaiRequest.tools && Array.isArray(openaiRequest.tools)) {
102
+ anthropicRequest.tools = openaiRequest.tools.map(tool => {
103
+ if (tool.type === 'function') {
104
+ return {
105
+ name: tool.function.name,
106
+ description: tool.function.description,
107
+ input_schema: tool.function.parameters || {}
108
+ };
109
+ }
110
+ return tool;
111
+ });
112
+ }
113
+
114
+ // 根据模型配置处理 thinking 字段
115
+ const reasoningLevel = getModelReasoning(openaiRequest.model);
116
+ if (reasoningLevel === 'auto') {
117
+ // 自动模式:完全保留原始请求的 thinking 字段
118
+ if (openaiRequest.thinking !== undefined) {
119
+ anthropicRequest.thinking = openaiRequest.thinking;
120
+ }
121
+ // 如果原始请求没有 thinking 字段,则不添加
122
+ } else if (reasoningLevel && ['low', 'medium', 'high'].includes(reasoningLevel)) {
123
+ // 特定级别:使用模型配置覆盖
124
+ const budgetTokens = {
125
+ 'low': 4096,
126
+ 'medium': 12288,
127
+ 'high': 24576
128
+ };
129
+
130
+ anthropicRequest.thinking = {
131
+ type: 'enabled',
132
+ budget_tokens: budgetTokens[reasoningLevel]
133
+ };
134
+ } else {
135
+ // 关闭或无效:显式删除 thinking 字段
136
+ // 这确保删除原始请求中的任何 thinking 字段
137
+ delete anthropicRequest.thinking;
138
+ }
139
+
140
+ // 传递其他兼容参数
141
+ if (openaiRequest.temperature !== undefined) {
142
+ anthropicRequest.temperature = openaiRequest.temperature;
143
+ }
144
+ if (openaiRequest.top_p !== undefined) {
145
+ anthropicRequest.top_p = openaiRequest.top_p;
146
+ }
147
+ if (openaiRequest.stop !== undefined) {
148
+ anthropicRequest.stop_sequences = Array.isArray(openaiRequest.stop)
149
+ ? openaiRequest.stop
150
+ : [openaiRequest.stop];
151
+ }
152
+
153
+ logDebug('已转换的 Anthropic 请求', anthropicRequest);
154
+ return anthropicRequest;
155
+ }
156
+
157
+ export function getAnthropicHeaders(authHeader, clientHeaders = {}, isStreaming = true, modelId = null) {
158
+ // 如果未提供则生成唯一 ID
159
+ const sessionId = clientHeaders['x-session-id'] || generateUUID();
160
+ const messageId = clientHeaders['x-assistant-message-id'] || generateUUID();
161
+
162
+ const headers = {
163
+ 'accept': 'application/json',
164
+ 'content-type': 'application/json',
165
+ 'anthropic-version': clientHeaders['anthropic-version'] || '2023-06-01',
166
+ 'authorization': authHeader || '',
167
+ 'x-api-key': 'placeholder',
168
+ 'x-api-provider': 'anthropic',
169
+ 'x-factory-client': 'cli',
170
+ 'x-session-id': sessionId,
171
+ 'x-assistant-message-id': messageId,
172
+ 'user-agent': getUserAgent(),
173
+ 'x-stainless-timeout': '600',
174
+ 'connection': 'keep-alive'
175
+ }
176
+
177
+ // 根据推理配置处理 anthropic-beta 头
178
+ const reasoningLevel = modelId ? getModelReasoning(modelId) : null;
179
+ let betaValues = [];
180
+
181
+ // 从客户端头添加现有的 beta 值
182
+ if (clientHeaders['anthropic-beta']) {
183
+ const existingBeta = clientHeaders['anthropic-beta'];
184
+ betaValues = existingBeta.split(',').map(v => v.trim());
185
+ }
186
+
187
+ // 根据推理配置处理 thinking beta
188
+ const thinkingBeta = 'interleaved-thinking-2025-05-14';
189
+ if (reasoningLevel === 'auto') {
190
+ // 自动模式:不修改 anthropic-beta 头,保留原始值
191
+ // betaValues 保持客户端头的不变
192
+ } else if (reasoningLevel && ['low', 'medium', 'high'].includes(reasoningLevel)) {
193
+ // 如果尚未存在,则添加 thinking beta
194
+ if (!betaValues.includes(thinkingBeta)) {
195
+ betaValues.push(thinkingBeta);
196
+ }
197
+ } else {
198
+ // 如果推理关闭或无效,则删除 thinking beta
199
+ betaValues = betaValues.filter(v => v !== thinkingBeta);
200
+ }
201
+
202
+ // 如果有任何值,则设置 anthropic-beta 头
203
+ if (betaValues.length > 0) {
204
+ headers['anthropic-beta'] = betaValues.join(', ');
205
+ }
206
+
207
+ // 使用默认值传递 Stainless SDK 头
208
+ const stainlessDefaults = {
209
+ 'x-stainless-arch': 'x64',
210
+ 'x-stainless-lang': 'js',
211
+ 'x-stainless-os': 'MacOS',
212
+ 'x-stainless-runtime': 'node',
213
+ 'x-stainless-retry-count': '0',
214
+ 'x-stainless-package-version': '0.57.0',
215
+ 'x-stainless-runtime-version': 'v24.3.0'
216
+ };
217
+
218
+ // 根据流式传输设置 helper-method
219
+ if (isStreaming) {
220
+ headers['x-stainless-helper-method'] = 'stream';
221
+ }
222
+
223
+ // 从客户端复制 Stainless 头或使用默认值
224
+ Object.keys(stainlessDefaults).forEach(header => {
225
+ headers[header] = clientHeaders[header] || stainlessDefaults[header];
226
+ });
227
+
228
+ // 如果客户端提供,则覆盖默认超时
229
+ if (clientHeaders['x-stainless-timeout']) {
230
+ headers['x-stainless-timeout'] = clientHeaders['x-stainless-timeout'];
231
+ }
232
+
233
+ return headers;
234
+ }
235
+
236
+ function generateUUID() {
237
+ return 'xxxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx'.replace(/[xy]/g, function(c) {
238
+ const r = Math.random() * 16 | 0;
239
+ const v = c == 'x' ? r : (r & 0x3 | 0x8);
240
+ return v.toString(16);
241
+ });
242
+ }
transformers/request-common.js ADDED
@@ -0,0 +1,88 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { logDebug } from '../logger.js';
2
+ import { getSystemPrompt, getUserAgent } from '../config.js';
3
+
4
+ export function transformToCommon(openaiRequest) {
5
+ logDebug('将 OpenAI 请求转换为通用格式');
6
+
7
+ // 基本保持 OpenAI 格式,只在 messages 前面插入 system 消息
8
+ const commonRequest = {
9
+ ...openaiRequest
10
+ };
11
+
12
+ const systemPrompt = getSystemPrompt();
13
+
14
+ if (systemPrompt) {
15
+ // 检查是否已有 system 消息
16
+ const hasSystemMessage = commonRequest.messages?.some(m => m.role === 'system');
17
+
18
+ if (hasSystemMessage) {
19
+ // 如果已有 system 消息,在第一个 system 消息前插入我们的 system prompt
20
+ commonRequest.messages = commonRequest.messages.map((msg, index) => {
21
+ if (msg.role === 'system' && index === commonRequest.messages.findIndex(m => m.role === 'system')) {
22
+ // 找到第一个 system 消息,前置我们的 prompt
23
+ return {
24
+ role: 'system',
25
+ content: systemPrompt + (typeof msg.content === 'string' ? msg.content?.replace("You are Claude Code, Anthropic's official CLI for Claude.", 'you are bot.') : '')
26
+ };
27
+ }
28
+ return msg;
29
+ });
30
+ } else {
31
+ // 如果没有 system 消息,在 messages 数组最前面插入
32
+ commonRequest.messages = [
33
+ {
34
+ role: 'system',
35
+ content: systemPrompt
36
+ },
37
+ ...(commonRequest.messages || [])
38
+ ];
39
+ }
40
+ }
41
+
42
+ logDebug('已转换的通用请求', commonRequest);
43
+ return commonRequest;
44
+ }
45
+
46
+ export function getCommonHeaders(authHeader, clientHeaders = {}) {
47
+ // 如果未提供则生成唯一 ID
48
+ const sessionId = clientHeaders['x-session-id'] || generateUUID();
49
+ const messageId = clientHeaders['x-assistant-message-id'] || generateUUID();
50
+
51
+ const headers = {
52
+ 'accept': 'application/json',
53
+ 'content-type': 'application/json',
54
+ 'authorization': authHeader || '',
55
+ 'x-api-provider': 'baseten',
56
+ 'x-factory-client': 'cli',
57
+ 'x-session-id': sessionId,
58
+ 'x-assistant-message-id': messageId,
59
+ 'user-agent': getUserAgent(),
60
+ 'connection': 'keep-alive'
61
+ };
62
+
63
+ // 使用默认值传递 Stainless SDK 头
64
+ const stainlessDefaults = {
65
+ 'x-stainless-arch': 'x64',
66
+ 'x-stainless-lang': 'js',
67
+ 'x-stainless-os': 'MacOS',
68
+ 'x-stainless-runtime': 'node',
69
+ 'x-stainless-retry-count': '0',
70
+ 'x-stainless-package-version': '5.23.2',
71
+ 'x-stainless-runtime-version': 'v24.3.0'
72
+ };
73
+
74
+ // 从客户端复制 Stainless 头或使用默认值
75
+ Object.keys(stainlessDefaults).forEach(header => {
76
+ headers[header] = clientHeaders[header] || stainlessDefaults[header];
77
+ });
78
+
79
+ return headers;
80
+ }
81
+
82
+ function generateUUID() {
83
+ return 'xxxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx'.replace(/[xy]/g, function(c) {
84
+ const r = Math.random() * 16 | 0;
85
+ const v = c == 'x' ? r : (r & 0x3 | 0x8);
86
+ return v.toString(16);
87
+ });
88
+ }
transformers/request-openai.js ADDED
@@ -0,0 +1,177 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { logDebug } from '../logger.js';
2
+ import { getSystemPrompt, getModelReasoning, getUserAgent } from '../config.js';
3
+
4
+ export function transformToOpenAI(openaiRequest) {
5
+ logDebug('将 OpenAI 请求转换为目标 OpenAI 格式');
6
+
7
+ const targetRequest = {
8
+ model: openaiRequest.model,
9
+ input: [],
10
+ store: false
11
+ };
12
+
13
+ // 仅在客户端明确提供时添加 stream 参数
14
+ if (openaiRequest.stream !== undefined) {
15
+ targetRequest.stream = openaiRequest.stream;
16
+ }
17
+
18
+ // 将 max_tokens 转换为 max_output_tokens
19
+ if (openaiRequest.max_tokens) {
20
+ targetRequest.max_output_tokens = openaiRequest.max_tokens;
21
+ } else if (openaiRequest.max_completion_tokens) {
22
+ targetRequest.max_output_tokens = openaiRequest.max_completion_tokens;
23
+ }
24
+
25
+ // Transform messages to input
26
+ if (openaiRequest.messages && Array.isArray(openaiRequest.messages)) {
27
+ for (const msg of openaiRequest.messages) {
28
+ const inputMsg = {
29
+ role: msg.role,
30
+ content: []
31
+ };
32
+
33
+ // Determine content type based on role
34
+ // user role uses 'input_text', assistant role uses 'output_text'
35
+ const textType = msg.role === 'assistant' ? 'output_text' : 'input_text';
36
+ const imageType = msg.role === 'assistant' ? 'output_image' : 'input_image';
37
+
38
+ if (typeof msg.content === 'string') {
39
+ inputMsg.content.push({
40
+ type: textType,
41
+ text: msg.content?.replace("You are Claude Code, Anthropic's official CLI for Claude.", 'you are bot.')
42
+ });
43
+ } else if (Array.isArray(msg.content)) {
44
+ for (const part of msg.content) {
45
+ if (part.type === 'text') {
46
+ inputMsg.content.push({
47
+ type: textType,
48
+ text: part.text?.replace("You are Claude Code, Anthropic's official CLI for Claude.", 'you are bot.')
49
+ });
50
+ } else if (part.type === 'image_url') {
51
+ inputMsg.content.push({
52
+ type: imageType,
53
+ image_url: part.image_url
54
+ });
55
+ } else {
56
+ // Pass through other types as-is
57
+ inputMsg.content.push(part);
58
+ }
59
+ }
60
+ }
61
+
62
+ targetRequest.input.push(inputMsg);
63
+ }
64
+ }
65
+
66
+ // Transform tools if present
67
+ if (openaiRequest.tools && Array.isArray(openaiRequest.tools)) {
68
+ targetRequest.tools = openaiRequest.tools.map(tool => ({
69
+ ...tool,
70
+ strict: false
71
+ }));
72
+ }
73
+
74
+ // Extract system message as instructions and prepend system prompt
75
+ const systemPrompt = getSystemPrompt();
76
+ const systemMessage = openaiRequest.messages?.find(m => m.role === 'system');
77
+
78
+ if (systemMessage) {
79
+ let userInstructions = '';
80
+ if (typeof systemMessage.content === 'string') {
81
+ userInstructions = systemMessage.content;
82
+ } else if (Array.isArray(systemMessage.content)) {
83
+ userInstructions = systemMessage.content
84
+ .filter(p => p.type === 'text')
85
+ .map(p => p.text)
86
+ .join('\n');
87
+ }
88
+ targetRequest.instructions = systemPrompt + userInstructions;
89
+ targetRequest.input = targetRequest.input.filter(m => m.role !== 'system');
90
+ } else if (systemPrompt) {
91
+ // If no user-provided system message, just add the system prompt
92
+ targetRequest.instructions = systemPrompt;
93
+ }
94
+
95
+ // 根据模型配置处理 reasoning 字段
96
+ const reasoningLevel = getModelReasoning(openaiRequest.model);
97
+ if (reasoningLevel === 'auto') {
98
+ // 自动模式:完全保留原始请求的 reasoning 字段
99
+ if (openaiRequest.reasoning !== undefined) {
100
+ targetRequest.reasoning = openaiRequest.reasoning;
101
+ }
102
+ // 如果原始请求没有 reasoning 字段,则不添加
103
+ } else if (reasoningLevel && ['low', 'medium', 'high'].includes(reasoningLevel)) {
104
+ // 特定级别:使用模型配置覆盖
105
+ targetRequest.reasoning = {
106
+ effort: reasoningLevel,
107
+ summary: 'auto'
108
+ };
109
+ } else {
110
+ // 关闭或无效:显式删除 reasoning 字段
111
+ // 这确保删除原始请求中的任何 reasoning 字段
112
+ delete targetRequest.reasoning;
113
+ }
114
+
115
+ // Pass through other parameters
116
+ if (openaiRequest.temperature !== undefined) {
117
+ targetRequest.temperature = openaiRequest.temperature;
118
+ }
119
+ if (openaiRequest.top_p !== undefined) {
120
+ targetRequest.top_p = openaiRequest.top_p;
121
+ }
122
+ if (openaiRequest.presence_penalty !== undefined) {
123
+ targetRequest.presence_penalty = openaiRequest.presence_penalty;
124
+ }
125
+ if (openaiRequest.frequency_penalty !== undefined) {
126
+ targetRequest.frequency_penalty = openaiRequest.frequency_penalty;
127
+ }
128
+ if (openaiRequest.parallel_tool_calls !== undefined) {
129
+ targetRequest.parallel_tool_calls = openaiRequest.parallel_tool_calls;
130
+ }
131
+
132
+ logDebug('已转换的目标 OpenAI 请求', targetRequest);
133
+ return targetRequest;
134
+ }
135
+
136
+ export function getOpenAIHeaders(authHeader, clientHeaders = {}) {
137
+ // 如果未提供则生成唯一 ID
138
+ const sessionId = clientHeaders['x-session-id'] || generateUUID();
139
+ const messageId = clientHeaders['x-assistant-message-id'] || generateUUID();
140
+
141
+ const headers = {
142
+ 'content-type': 'application/json',
143
+ 'authorization': authHeader || '',
144
+ 'x-api-provider': 'azure_openai',
145
+ 'x-factory-client': 'cli',
146
+ 'x-session-id': sessionId,
147
+ 'x-assistant-message-id': messageId,
148
+ 'user-agent': getUserAgent(),
149
+ 'connection': 'keep-alive'
150
+ };
151
+
152
+ // Pass through Stainless SDK headers with defaults
153
+ const stainlessDefaults = {
154
+ 'x-stainless-arch': 'x64',
155
+ 'x-stainless-lang': 'js',
156
+ 'x-stainless-os': 'MacOS',
157
+ 'x-stainless-runtime': 'node',
158
+ 'x-stainless-retry-count': '0',
159
+ 'x-stainless-package-version': '5.23.2',
160
+ 'x-stainless-runtime-version': 'v24.3.0'
161
+ };
162
+
163
+ // Copy Stainless headers from client or use defaults
164
+ Object.keys(stainlessDefaults).forEach(header => {
165
+ headers[header] = clientHeaders[header] || stainlessDefaults[header];
166
+ });
167
+
168
+ return headers;
169
+ }
170
+
171
+ function generateUUID() {
172
+ return 'xxxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx'.replace(/[xy]/g, function(c) {
173
+ const r = Math.random() * 16 | 0;
174
+ const v = c == 'x' ? r : (r & 0x3 | 0x8);
175
+ return v.toString(16);
176
+ });
177
+ }
transformers/response-anthropic.js ADDED
@@ -0,0 +1,138 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { logDebug } from '../logger.js';
2
+
3
+ export class AnthropicResponseTransformer {
4
+ constructor(model, requestId) {
5
+ this.model = model;
6
+ this.requestId = requestId || `chatcmpl-${Date.now()}`;
7
+ this.created = Math.floor(Date.now() / 1000);
8
+ this.messageId = null;
9
+ this.currentIndex = 0;
10
+ }
11
+
12
+ parseSSELine(line) {
13
+ if (line.startsWith('event:')) {
14
+ return { type: 'event', value: line.slice(6).trim() };
15
+ }
16
+ if (line.startsWith('data:')) {
17
+ const dataStr = line.slice(5).trim();
18
+ try {
19
+ return { type: 'data', value: JSON.parse(dataStr) };
20
+ } catch (e) {
21
+ return { type: 'data', value: dataStr };
22
+ }
23
+ }
24
+ return null;
25
+ }
26
+
27
+ transformEvent(eventType, eventData) {
28
+ logDebug(`Anthropic 事件: ${eventType}`);
29
+
30
+ if (eventType === 'message_start') {
31
+ this.messageId = eventData.message?.id || this.requestId;
32
+ return this.createOpenAIChunk('', 'assistant', false);
33
+ }
34
+
35
+ if (eventType === 'content_block_start') {
36
+ return null;
37
+ }
38
+
39
+ if (eventType === 'content_block_delta') {
40
+ const text = eventData.delta?.text || '';
41
+ return this.createOpenAIChunk(text, null, false);
42
+ }
43
+
44
+ if (eventType === 'content_block_stop') {
45
+ return null;
46
+ }
47
+
48
+ if (eventType === 'message_delta') {
49
+ const stopReason = eventData.delta?.stop_reason;
50
+ if (stopReason) {
51
+ return this.createOpenAIChunk('', null, true, this.mapStopReason(stopReason));
52
+ }
53
+ return null;
54
+ }
55
+
56
+ if (eventType === 'message_stop') {
57
+ return this.createDoneSignal();
58
+ }
59
+
60
+ if (eventType === 'ping') {
61
+ return null;
62
+ }
63
+
64
+ return null;
65
+ }
66
+
67
+ createOpenAIChunk(content, role = null, finish = false, finishReason = null) {
68
+ const chunk = {
69
+ id: this.requestId,
70
+ object: 'chat.completion.chunk',
71
+ created: this.created,
72
+ model: this.model,
73
+ choices: [
74
+ {
75
+ index: 0,
76
+ delta: {},
77
+ finish_reason: finish ? finishReason : null
78
+ }
79
+ ]
80
+ };
81
+
82
+ if (role) {
83
+ chunk.choices[0].delta.role = role;
84
+ }
85
+ if (content) {
86
+ chunk.choices[0].delta.content = content;
87
+ }
88
+
89
+ return `data: ${JSON.stringify(chunk)}\n\n`;
90
+ }
91
+
92
+ createDoneSignal() {
93
+ return 'data: [DONE]\n\n';
94
+ }
95
+
96
+ mapStopReason(anthropicReason) {
97
+ const mapping = {
98
+ 'end_turn': 'stop',
99
+ 'max_tokens': 'length',
100
+ 'stop_sequence': 'stop',
101
+ 'tool_use': 'tool_calls'
102
+ };
103
+ return mapping[anthropicReason] || 'stop';
104
+ }
105
+
106
+ async *transformStream(sourceStream) {
107
+ let buffer = '';
108
+ let currentEvent = null;
109
+
110
+ try {
111
+ for await (const chunk of sourceStream) {
112
+ buffer += chunk.toString();
113
+ const lines = buffer.split('\n');
114
+ buffer = lines.pop() || '';
115
+
116
+ for (const line of lines) {
117
+ if (!line.trim()) continue;
118
+
119
+ const parsed = this.parseSSELine(line);
120
+ if (!parsed) continue;
121
+
122
+ if (parsed.type === 'event') {
123
+ currentEvent = parsed.value;
124
+ } else if (parsed.type === 'data' && currentEvent) {
125
+ const transformed = this.transformEvent(currentEvent, parsed.value);
126
+ if (transformed) {
127
+ yield transformed;
128
+ }
129
+ currentEvent = null;
130
+ }
131
+ }
132
+ }
133
+ } catch (error) {
134
+ logDebug('Anthropic 流转换错误', error);
135
+ throw error;
136
+ }
137
+ }
138
+ }
transformers/response-openai.js ADDED
@@ -0,0 +1,127 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { logDebug } from '../logger.js';
2
+
3
+ export class OpenAIResponseTransformer {
4
+ constructor(model, requestId) {
5
+ this.model = model;
6
+ this.requestId = requestId || `chatcmpl-${Date.now()}`;
7
+ this.created = Math.floor(Date.now() / 1000);
8
+ }
9
+
10
+ parseSSELine(line) {
11
+ if (line.startsWith('event:')) {
12
+ return { type: 'event', value: line.slice(6).trim() };
13
+ }
14
+ if (line.startsWith('data:')) {
15
+ const dataStr = line.slice(5).trim();
16
+ try {
17
+ return { type: 'data', value: JSON.parse(dataStr) };
18
+ } catch (e) {
19
+ return { type: 'data', value: dataStr };
20
+ }
21
+ }
22
+ return null;
23
+ }
24
+
25
+ transformEvent(eventType, eventData) {
26
+ logDebug(`目标 OpenAI 事件: ${eventType}`);
27
+
28
+ if (eventType === 'response.created') {
29
+ return this.createOpenAIChunk('', 'assistant', false);
30
+ }
31
+
32
+ if (eventType === 'response.in_progress') {
33
+ return null;
34
+ }
35
+
36
+ if (eventType === 'response.output_text.delta') {
37
+ const text = eventData.delta || eventData.text || '';
38
+ return this.createOpenAIChunk(text, null, false);
39
+ }
40
+
41
+ if (eventType === 'response.output_text.done') {
42
+ return null;
43
+ }
44
+
45
+ if (eventType === 'response.done') {
46
+ const status = eventData.response?.status;
47
+ let finishReason = 'stop';
48
+
49
+ if (status === 'completed') {
50
+ finishReason = 'stop';
51
+ } else if (status === 'incomplete') {
52
+ finishReason = 'length';
53
+ }
54
+
55
+ const finalChunk = this.createOpenAIChunk('', null, true, finishReason);
56
+ const done = this.createDoneSignal();
57
+ return finalChunk + done;
58
+ }
59
+
60
+ return null;
61
+ }
62
+
63
+ createOpenAIChunk(content, role = null, finish = false, finishReason = null) {
64
+ const chunk = {
65
+ id: this.requestId,
66
+ object: 'chat.completion.chunk',
67
+ created: this.created,
68
+ model: this.model,
69
+ choices: [
70
+ {
71
+ index: 0,
72
+ delta: {},
73
+ finish_reason: finish ? finishReason : null
74
+ }
75
+ ]
76
+ };
77
+
78
+ if (role) {
79
+ chunk.choices[0].delta.role = role;
80
+ }
81
+ if (content) {
82
+ chunk.choices[0].delta.content = content;
83
+ }
84
+
85
+ return `data: ${JSON.stringify(chunk)}\n\n`;
86
+ }
87
+
88
+ createDoneSignal() {
89
+ return 'data: [DONE]\n\n';
90
+ }
91
+
92
+ async *transformStream(sourceStream) {
93
+ let buffer = '';
94
+ let currentEvent = null;
95
+
96
+ try {
97
+ for await (const chunk of sourceStream) {
98
+ buffer += chunk.toString();
99
+ const lines = buffer.split('\n');
100
+ buffer = lines.pop() || '';
101
+
102
+ for (const line of lines) {
103
+ if (!line.trim()) continue;
104
+
105
+ const parsed = this.parseSSELine(line);
106
+ if (!parsed) continue;
107
+
108
+ if (parsed.type === 'event') {
109
+ currentEvent = parsed.value;
110
+ } else if (parsed.type === 'data' && currentEvent) {
111
+ const transformed = this.transformEvent(currentEvent, parsed.value);
112
+ if (transformed) {
113
+ yield transformed;
114
+ }
115
+ }
116
+ }
117
+ }
118
+
119
+ if (currentEvent === 'response.done' || currentEvent === 'response.completed') {
120
+ yield this.createDoneSignal();
121
+ }
122
+ } catch (error) {
123
+ logDebug('OpenAI 流转换错误', error);
124
+ throw error;
125
+ }
126
+ }
127
+ }