| const { isUserProvided, getOpenAIConfig, getCustomEndpointConfig } = require('@librechat/api'); | |
| const { | |
| CacheKeys, | |
| ErrorTypes, | |
| envVarRegex, | |
| FetchTokenConfig, | |
| extractEnvVariable, | |
| } = require('librechat-data-provider'); | |
| const { getUserKeyValues, checkUserKeyExpiry } = require('~/server/services/UserService'); | |
| const { fetchModels } = require('~/server/services/ModelService'); | |
| const OpenAIClient = require('~/app/clients/OpenAIClient'); | |
| const getLogStores = require('~/cache/getLogStores'); | |
| const { PROXY } = process.env; | |
| const initializeClient = async ({ req, res, endpointOption, optionsOnly, overrideEndpoint }) => { | |
| const appConfig = req.config; | |
| const { key: expiresAt } = req.body; | |
| const endpoint = overrideEndpoint ?? req.body.endpoint; | |
| const endpointConfig = getCustomEndpointConfig({ | |
| endpoint, | |
| appConfig, | |
| }); | |
| if (!endpointConfig) { | |
| throw new Error(`Config not found for the ${endpoint} custom endpoint.`); | |
| } | |
| const CUSTOM_API_KEY = extractEnvVariable(endpointConfig.apiKey); | |
| const CUSTOM_BASE_URL = extractEnvVariable(endpointConfig.baseURL); | |
| if (CUSTOM_API_KEY.match(envVarRegex)) { | |
| throw new Error(`Missing API Key for ${endpoint}.`); | |
| } | |
| if (CUSTOM_BASE_URL.match(envVarRegex)) { | |
| throw new Error(`Missing Base URL for ${endpoint}.`); | |
| } | |
| const userProvidesKey = isUserProvided(CUSTOM_API_KEY); | |
| const userProvidesURL = isUserProvided(CUSTOM_BASE_URL); | |
| let userValues = null; | |
| if (expiresAt && (userProvidesKey || userProvidesURL)) { | |
| checkUserKeyExpiry(expiresAt, endpoint); | |
| userValues = await getUserKeyValues({ userId: req.user.id, name: endpoint }); | |
| } | |
| let apiKey = userProvidesKey ? userValues?.apiKey : CUSTOM_API_KEY; | |
| let baseURL = userProvidesURL ? userValues?.baseURL : CUSTOM_BASE_URL; | |
| if (userProvidesKey & !apiKey) { | |
| throw new Error( | |
| JSON.stringify({ | |
| type: ErrorTypes.NO_USER_KEY, | |
| }), | |
| ); | |
| } | |
| if (userProvidesURL && !baseURL) { | |
| throw new Error( | |
| JSON.stringify({ | |
| type: ErrorTypes.NO_BASE_URL, | |
| }), | |
| ); | |
| } | |
| if (!apiKey) { | |
| throw new Error(`${endpoint} API key not provided.`); | |
| } | |
| if (!baseURL) { | |
| throw new Error(`${endpoint} Base URL not provided.`); | |
| } | |
| const cache = getLogStores(CacheKeys.TOKEN_CONFIG); | |
| const tokenKey = | |
| !endpointConfig.tokenConfig && (userProvidesKey || userProvidesURL) | |
| ? `${endpoint}:${req.user.id}` | |
| : endpoint; | |
| let endpointTokenConfig = | |
| !endpointConfig.tokenConfig && | |
| FetchTokenConfig[endpoint.toLowerCase()] && | |
| (await cache.get(tokenKey)); | |
| if ( | |
| FetchTokenConfig[endpoint.toLowerCase()] && | |
| endpointConfig && | |
| endpointConfig.models.fetch && | |
| !endpointTokenConfig | |
| ) { | |
| await fetchModels({ apiKey, baseURL, name: endpoint, user: req.user.id, tokenKey }); | |
| endpointTokenConfig = await cache.get(tokenKey); | |
| } | |
| const customOptions = { | |
| headers: endpointConfig.headers, | |
| addParams: endpointConfig.addParams, | |
| dropParams: endpointConfig.dropParams, | |
| customParams: endpointConfig.customParams, | |
| titleConvo: endpointConfig.titleConvo, | |
| titleModel: endpointConfig.titleModel, | |
| forcePrompt: endpointConfig.forcePrompt, | |
| summaryModel: endpointConfig.summaryModel, | |
| modelDisplayLabel: endpointConfig.modelDisplayLabel, | |
| titleMethod: endpointConfig.titleMethod ?? 'completion', | |
| contextStrategy: endpointConfig.summarize ? 'summarize' : null, | |
| directEndpoint: endpointConfig.directEndpoint, | |
| titleMessageRole: endpointConfig.titleMessageRole, | |
| streamRate: endpointConfig.streamRate, | |
| endpointTokenConfig, | |
| }; | |
| const allConfig = appConfig.endpoints?.all; | |
| if (allConfig) { | |
| customOptions.streamRate = allConfig.streamRate; | |
| } | |
| let clientOptions = { | |
| reverseProxyUrl: baseURL ?? null, | |
| proxy: PROXY ?? null, | |
| req, | |
| res, | |
| ...customOptions, | |
| ...endpointOption, | |
| }; | |
| if (optionsOnly) { | |
| const modelOptions = endpointOption?.model_parameters ?? {}; | |
| clientOptions = Object.assign( | |
| { | |
| modelOptions, | |
| }, | |
| clientOptions, | |
| ); | |
| clientOptions.modelOptions.user = req.user.id; | |
| const options = getOpenAIConfig(apiKey, clientOptions, endpoint); | |
| if (options != null) { | |
| options.useLegacyContent = true; | |
| options.endpointTokenConfig = endpointTokenConfig; | |
| } | |
| if (!clientOptions.streamRate) { | |
| return options; | |
| } | |
| options.llmConfig._lc_stream_delay = clientOptions.streamRate; | |
| return options; | |
| } | |
| const client = new OpenAIClient(apiKey, clientOptions); | |
| return { | |
| client, | |
| openAIApiKey: apiKey, | |
| }; | |
| }; | |
| module.exports = initializeClient; | |