| | const axios = require('axios'); |
| | const { logger } = require('@librechat/data-schemas'); |
| | const { HttpsProxyAgent } = require('https-proxy-agent'); |
| | const { logAxiosError, inputSchema, processModelData, isUserProvided } = require('@librechat/api'); |
| | const { |
| | CacheKeys, |
| | defaultModels, |
| | KnownEndpoints, |
| | EModelEndpoint, |
| | } = require('librechat-data-provider'); |
| | const { OllamaClient } = require('~/app/clients/OllamaClient'); |
| | const getLogStores = require('~/cache/getLogStores'); |
| | const { extractBaseURL } = require('~/utils'); |
| |
|
| | |
| | |
| | |
| | |
| | |
| | const splitAndTrim = (input) => { |
| | if (!input || typeof input !== 'string') { |
| | return []; |
| | } |
| | return input |
| | .split(',') |
| | .map((item) => item.trim()) |
| | .filter(Boolean); |
| | }; |
| |
|
| | const { openAIApiKey, userProvidedOpenAI } = require('./Config/EndpointService').config; |
| |
|
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | const fetchModels = async ({ |
| | user, |
| | apiKey, |
| | baseURL: _baseURL, |
| | name = EModelEndpoint.openAI, |
| | direct, |
| | azure = false, |
| | userIdQuery = false, |
| | createTokenConfig = true, |
| | tokenKey, |
| | headers, |
| | userObject, |
| | }) => { |
| | let models = []; |
| | const baseURL = direct ? extractBaseURL(_baseURL) : _baseURL; |
| |
|
| | if (!baseURL && !azure) { |
| | return models; |
| | } |
| |
|
| | if (!apiKey) { |
| | return models; |
| | } |
| |
|
| | if (name && name.toLowerCase().startsWith(KnownEndpoints.ollama)) { |
| | try { |
| | return await OllamaClient.fetchModels(baseURL, { headers, user: userObject }); |
| | } catch (ollamaError) { |
| | const logMessage = |
| | 'Failed to fetch models from Ollama API. Attempting to fetch via OpenAI-compatible endpoint.'; |
| | logAxiosError({ message: logMessage, error: ollamaError }); |
| | } |
| | } |
| |
|
| | try { |
| | const options = { |
| | headers: { |
| | ...(headers ?? {}), |
| | }, |
| | timeout: 5000, |
| | }; |
| |
|
| | if (name === EModelEndpoint.anthropic) { |
| | options.headers = { |
| | 'x-api-key': apiKey, |
| | 'anthropic-version': process.env.ANTHROPIC_VERSION || '2023-06-01', |
| | }; |
| | } else { |
| | options.headers.Authorization = `Bearer ${apiKey}`; |
| | } |
| |
|
| | if (process.env.PROXY) { |
| | options.httpsAgent = new HttpsProxyAgent(process.env.PROXY); |
| | } |
| |
|
| | if (process.env.OPENAI_ORGANIZATION && baseURL.includes('openai')) { |
| | options.headers['OpenAI-Organization'] = process.env.OPENAI_ORGANIZATION; |
| | } |
| |
|
| | const url = new URL(`${baseURL.replace(/\/+$/, '')}${azure ? '' : '/models'}`); |
| | if (user && userIdQuery) { |
| | url.searchParams.append('user', user); |
| | } |
| | const res = await axios.get(url.toString(), options); |
| |
|
| | |
| | const input = res.data; |
| |
|
| | const validationResult = inputSchema.safeParse(input); |
| | if (validationResult.success && createTokenConfig) { |
| | const endpointTokenConfig = processModelData(input); |
| | const cache = getLogStores(CacheKeys.TOKEN_CONFIG); |
| | await cache.set(tokenKey ?? name, endpointTokenConfig); |
| | } |
| | models = input.data.map((item) => item.id); |
| | } catch (error) { |
| | const logMessage = `Failed to fetch models from ${azure ? 'Azure ' : ''}${name} API`; |
| | logAxiosError({ message: logMessage, error }); |
| | } |
| |
|
| | return models; |
| | }; |
| |
|
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | const fetchOpenAIModels = async (opts, _models = []) => { |
| | let models = _models.slice() ?? []; |
| | let apiKey = openAIApiKey; |
| | const openaiBaseURL = 'https://api.openai.com/v1'; |
| | let baseURL = openaiBaseURL; |
| | let reverseProxyUrl = process.env.OPENAI_REVERSE_PROXY; |
| |
|
| | if (opts.assistants && process.env.ASSISTANTS_BASE_URL) { |
| | reverseProxyUrl = process.env.ASSISTANTS_BASE_URL; |
| | } else if (opts.azure) { |
| | return models; |
| | |
| | |
| | |
| | |
| | |
| | } |
| |
|
| | if (reverseProxyUrl) { |
| | baseURL = extractBaseURL(reverseProxyUrl); |
| | } |
| |
|
| | const modelsCache = getLogStores(CacheKeys.MODEL_QUERIES); |
| |
|
| | const cachedModels = await modelsCache.get(baseURL); |
| | if (cachedModels) { |
| | return cachedModels; |
| | } |
| |
|
| | if (baseURL || opts.azure) { |
| | models = await fetchModels({ |
| | apiKey, |
| | baseURL, |
| | azure: opts.azure, |
| | user: opts.user, |
| | name: EModelEndpoint.openAI, |
| | }); |
| | } |
| |
|
| | if (models.length === 0) { |
| | return _models; |
| | } |
| |
|
| | if (baseURL === openaiBaseURL) { |
| | const regex = /(text-davinci-003|gpt-|o\d+)/; |
| | const excludeRegex = /audio|realtime/; |
| | models = models.filter((model) => regex.test(model) && !excludeRegex.test(model)); |
| | const instructModels = models.filter((model) => model.includes('instruct')); |
| | const otherModels = models.filter((model) => !model.includes('instruct')); |
| | models = otherModels.concat(instructModels); |
| | } |
| |
|
| | await modelsCache.set(baseURL, models); |
| | return models; |
| | }; |
| |
|
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | const getOpenAIModels = async (opts) => { |
| | let models = defaultModels[EModelEndpoint.openAI]; |
| |
|
| | if (opts.assistants) { |
| | models = defaultModels[EModelEndpoint.assistants]; |
| | } else if (opts.azure) { |
| | models = defaultModels[EModelEndpoint.azureAssistants]; |
| | } |
| |
|
| | if (opts.plugins) { |
| | models = models.filter( |
| | (model) => |
| | !model.includes('text-davinci') && |
| | !model.includes('instruct') && |
| | !model.includes('0613') && |
| | !model.includes('0314') && |
| | !model.includes('0301'), |
| | ); |
| | } |
| |
|
| | let key; |
| | if (opts.assistants) { |
| | key = 'ASSISTANTS_MODELS'; |
| | } else if (opts.azure) { |
| | key = 'AZURE_OPENAI_MODELS'; |
| | } else if (opts.plugins) { |
| | key = 'PLUGIN_MODELS'; |
| | } else { |
| | key = 'OPENAI_MODELS'; |
| | } |
| |
|
| | if (process.env[key]) { |
| | models = splitAndTrim(process.env[key]); |
| | return models; |
| | } |
| |
|
| | if (userProvidedOpenAI) { |
| | return models; |
| | } |
| |
|
| | return await fetchOpenAIModels(opts, models); |
| | }; |
| |
|
| | const getChatGPTBrowserModels = () => { |
| | let models = ['text-davinci-002-render-sha', 'gpt-4']; |
| | if (process.env.CHATGPT_MODELS) { |
| | models = splitAndTrim(process.env.CHATGPT_MODELS); |
| | } |
| |
|
| | return models; |
| | }; |
| |
|
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | const fetchAnthropicModels = async (opts, _models = []) => { |
| | let models = _models.slice() ?? []; |
| | let apiKey = process.env.ANTHROPIC_API_KEY; |
| | const anthropicBaseURL = 'https://api.anthropic.com/v1'; |
| | let baseURL = anthropicBaseURL; |
| | let reverseProxyUrl = process.env.ANTHROPIC_REVERSE_PROXY; |
| |
|
| | if (reverseProxyUrl) { |
| | baseURL = extractBaseURL(reverseProxyUrl); |
| | } |
| |
|
| | if (!apiKey) { |
| | return models; |
| | } |
| |
|
| | const modelsCache = getLogStores(CacheKeys.MODEL_QUERIES); |
| |
|
| | const cachedModels = await modelsCache.get(baseURL); |
| | if (cachedModels) { |
| | return cachedModels; |
| | } |
| |
|
| | if (baseURL) { |
| | models = await fetchModels({ |
| | apiKey, |
| | baseURL, |
| | user: opts.user, |
| | name: EModelEndpoint.anthropic, |
| | tokenKey: EModelEndpoint.anthropic, |
| | }); |
| | } |
| |
|
| | if (models.length === 0) { |
| | return _models; |
| | } |
| |
|
| | await modelsCache.set(baseURL, models); |
| | return models; |
| | }; |
| |
|
| | const getAnthropicModels = async (opts = {}) => { |
| | let models = defaultModels[EModelEndpoint.anthropic]; |
| | if (process.env.ANTHROPIC_MODELS) { |
| | models = splitAndTrim(process.env.ANTHROPIC_MODELS); |
| | return models; |
| | } |
| |
|
| | if (isUserProvided(process.env.ANTHROPIC_API_KEY)) { |
| | return models; |
| | } |
| |
|
| | try { |
| | return await fetchAnthropicModels(opts, models); |
| | } catch (error) { |
| | logger.error('Error fetching Anthropic models:', error); |
| | return models; |
| | } |
| | }; |
| |
|
| | const getGoogleModels = () => { |
| | let models = defaultModels[EModelEndpoint.google]; |
| | if (process.env.GOOGLE_MODELS) { |
| | models = splitAndTrim(process.env.GOOGLE_MODELS); |
| | } |
| |
|
| | return models; |
| | }; |
| |
|
| | const getBedrockModels = () => { |
| | let models = defaultModels[EModelEndpoint.bedrock]; |
| | if (process.env.BEDROCK_AWS_MODELS) { |
| | models = splitAndTrim(process.env.BEDROCK_AWS_MODELS); |
| | } |
| |
|
| | return models; |
| | }; |
| |
|
| | module.exports = { |
| | fetchModels, |
| | splitAndTrim, |
| | getOpenAIModels, |
| | getBedrockModels, |
| | getChatGPTBrowserModels, |
| | getAnthropicModels, |
| | getGoogleModels, |
| | }; |
| |
|