| | |
| | const { logger } = require('@librechat/data-schemas'); |
| | const { CacheKeys, ViolationTypes, ContentTypes } = require('librechat-data-provider'); |
| | const { recordUsage, checkMessageGaps } = require('~/server/services/Threads'); |
| | const { sendResponse } = require('~/server/middleware/error'); |
| | const { getConvo } = require('~/models/Conversation'); |
| | const getLogStores = require('~/cache/getLogStores'); |
| |
|
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| |
|
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| |
|
| | |
| | |
| | |
| | |
| | |
| | const createErrorHandler = ({ req, res, getContext, originPath = '/assistants/chat/' }) => { |
| | const cache = getLogStores(CacheKeys.ABORT_KEYS); |
| |
|
| | |
| | |
| | |
| | |
| | |
| | return async (error) => { |
| | const { |
| | openai, |
| | run_id, |
| | endpoint, |
| | cacheKey, |
| | thread_id, |
| | completedRun, |
| | assistant_id, |
| | conversationId, |
| | parentMessageId, |
| | responseMessageId, |
| | } = getContext(); |
| |
|
| | const defaultErrorMessage = |
| | 'The Assistant run failed to initialize. Try sending a message in a new conversation.'; |
| | const messageData = { |
| | thread_id, |
| | assistant_id, |
| | conversationId, |
| | parentMessageId, |
| | sender: 'System', |
| | user: req.user.id, |
| | shouldSaveMessage: false, |
| | messageId: responseMessageId, |
| | endpoint, |
| | }; |
| |
|
| | if (error.message === 'Run cancelled') { |
| | return res.end(); |
| | } else if (error.message === 'Request closed' && completedRun) { |
| | return; |
| | } else if (error.message === 'Request closed') { |
| | logger.debug(`[${originPath}] Request aborted on close`); |
| | } else if (/Files.*are invalid/.test(error.message)) { |
| | const errorMessage = `Files are invalid, or may not have uploaded yet.${ |
| | endpoint === 'azureAssistants' |
| | ? " If using Azure OpenAI, files are only available in the region of the assistant's model at the time of upload." |
| | : '' |
| | }`; |
| | return sendResponse(req, res, messageData, errorMessage); |
| | } else if (error?.message?.includes('string too long')) { |
| | return sendResponse( |
| | req, |
| | res, |
| | messageData, |
| | 'Message too long. The Assistants API has a limit of 32,768 characters per message. Please shorten it and try again.', |
| | ); |
| | } else if (error?.message?.includes(ViolationTypes.TOKEN_BALANCE)) { |
| | return sendResponse(req, res, messageData, error.message); |
| | } else { |
| | logger.error(`[${originPath}]`, error); |
| | } |
| |
|
| | if (!openai || !thread_id || !run_id) { |
| | return sendResponse(req, res, messageData, defaultErrorMessage); |
| | } |
| |
|
| | await new Promise((resolve) => setTimeout(resolve, 2000)); |
| |
|
| | try { |
| | const status = await cache.get(cacheKey); |
| | if (status === 'cancelled') { |
| | logger.debug(`[${originPath}] Run already cancelled`); |
| | return res.end(); |
| | } |
| | await cache.delete(cacheKey); |
| | const cancelledRun = await openai.beta.threads.runs.cancel(run_id, { thread_id }); |
| | logger.debug(`[${originPath}] Cancelled run:`, cancelledRun); |
| | } catch (error) { |
| | logger.error(`[${originPath}] Error cancelling run`, error); |
| | } |
| |
|
| | await new Promise((resolve) => setTimeout(resolve, 2000)); |
| |
|
| | let run; |
| | try { |
| | run = await openai.beta.threads.runs.retrieve(run_id, { thread_id }); |
| | await recordUsage({ |
| | ...run.usage, |
| | model: run.model, |
| | user: req.user.id, |
| | conversationId, |
| | }); |
| | } catch (error) { |
| | logger.error(`[${originPath}] Error fetching or processing run`, error); |
| | } |
| |
|
| | let finalEvent; |
| | try { |
| | const runMessages = await checkMessageGaps({ |
| | openai, |
| | run_id, |
| | endpoint, |
| | thread_id, |
| | conversationId, |
| | latestMessageId: responseMessageId, |
| | }); |
| |
|
| | const errorContentPart = { |
| | text: { |
| | value: |
| | error?.message ?? 'There was an error processing your request. Please try again later.', |
| | }, |
| | type: ContentTypes.ERROR, |
| | }; |
| |
|
| | if (!Array.isArray(runMessages[runMessages.length - 1]?.content)) { |
| | runMessages[runMessages.length - 1].content = [errorContentPart]; |
| | } else { |
| | const contentParts = runMessages[runMessages.length - 1].content; |
| | for (let i = 0; i < contentParts.length; i++) { |
| | const currentPart = contentParts[i]; |
| | |
| | const toolCall = currentPart?.[ContentTypes.TOOL_CALL]; |
| | if ( |
| | toolCall && |
| | toolCall?.function && |
| | !(toolCall?.function?.output || toolCall?.function?.output?.length) |
| | ) { |
| | contentParts[i] = { |
| | ...currentPart, |
| | [ContentTypes.TOOL_CALL]: { |
| | ...toolCall, |
| | function: { |
| | ...toolCall.function, |
| | output: 'error processing tool', |
| | }, |
| | }, |
| | }; |
| | } |
| | } |
| | runMessages[runMessages.length - 1].content.push(errorContentPart); |
| | } |
| |
|
| | finalEvent = { |
| | final: true, |
| | conversation: await getConvo(req.user.id, conversationId), |
| | runMessages, |
| | }; |
| | } catch (error) { |
| | logger.error(`[${originPath}] Error finalizing error process`, error); |
| | return sendResponse(req, res, messageData, 'The Assistant run failed'); |
| | } |
| |
|
| | return sendResponse(req, res, finalEvent); |
| | }; |
| | }; |
| |
|
| | module.exports = { createErrorHandler }; |
| |
|