id
stringlengths 6
6
| text
stringlengths 20
17.2k
| title
stringclasses 1
value |
|---|---|---|
144596
|
import {
BaseRetriever,
type BaseRetrieverInput,
type BaseRetrieverInterface,
} from "@langchain/core/retrievers";
import type { DocumentInterface } from "@langchain/core/documents";
import { CallbackManagerForRetrieverRun } from "@langchain/core/callbacks/manager";
import { BaseDocumentCompressor } from "./document_compressors/index.js";
/**
* Interface for the arguments required to construct a
* ContextualCompressionRetriever. It extends the BaseRetrieverInput
* interface with two additional fields: baseCompressor and baseRetriever.
*/
export interface ContextualCompressionRetrieverArgs extends BaseRetrieverInput {
baseCompressor: BaseDocumentCompressor;
baseRetriever: BaseRetrieverInterface;
}
/**
* A retriever that wraps a base retriever and compresses the results. It
* retrieves relevant documents based on a given query and then compresses
* these documents using a specified document compressor.
* @example
* ```typescript
* const retriever = new ContextualCompressionRetriever({
* baseCompressor: new LLMChainExtractor(),
* baseRetriever: new HNSWLib().asRetriever(),
* });
* const retrievedDocs = await retriever.getRelevantDocuments(
* "What did the speaker say about Justice Breyer?",
* );
* ```
*/
export class ContextualCompressionRetriever extends BaseRetriever {
static lc_name() {
return "ContextualCompressionRetriever";
}
lc_namespace = ["langchain", "retrievers", "contextual_compression"];
baseCompressor: BaseDocumentCompressor;
baseRetriever: BaseRetrieverInterface;
constructor(fields: ContextualCompressionRetrieverArgs) {
super(fields);
this.baseCompressor = fields.baseCompressor;
this.baseRetriever = fields.baseRetriever;
}
async _getRelevantDocuments(
query: string,
runManager?: CallbackManagerForRetrieverRun
): Promise<DocumentInterface[]> {
const docs = await this.baseRetriever.getRelevantDocuments(
query,
runManager?.getChild("base_retriever")
);
const compressedDocs = await this.baseCompressor.compressDocuments(
docs,
query,
runManager?.getChild("base_compressor")
);
return compressedDocs;
}
}
| |
144612
|
import type { EmbeddingsInterface } from "@langchain/core/embeddings";
import type { DocumentInterface } from "@langchain/core/documents";
import { cosineSimilarity } from "@langchain/core/utils/math";
import { BaseDocumentCompressor } from "./index.js";
/**
* Interface for the parameters of the `EmbeddingsFilter` class.
*/
export interface EmbeddingsFilterParams {
embeddings: EmbeddingsInterface;
similarityFn?: (x: number[][], y: number[][]) => number[][];
similarityThreshold?: number;
k?: number;
}
/**
* Class that represents a document compressor that uses embeddings to
* drop documents unrelated to the query.
* @example
* ```typescript
* const embeddingsFilter = new EmbeddingsFilter({
* embeddings: new OpenAIEmbeddings(),
* similarityThreshold: 0.8,
* k: 5,
* });
* const retrievedDocs = await embeddingsFilter.filterDocuments(
* getDocuments(),
* "What did the speaker say about Justice Breyer in the 2022 State of the Union?",
* );
* console.log({ retrievedDocs });
* ```
*/
export class EmbeddingsFilter extends BaseDocumentCompressor {
/**
* Embeddings to use for embedding document contents and queries.
*/
embeddings: EmbeddingsInterface;
/**
* Similarity function for comparing documents.
*/
similarityFn = cosineSimilarity;
/**
* Threshold for determining when two documents are similar enough
* to be considered redundant. Must be specified if `k` is not set.
*/
similarityThreshold?: number;
/**
* The number of relevant documents to return. Can be explicitly set to undefined, in which case
* similarity_threshold` must be specified. Defaults to 20
*/
k? = 20;
constructor(params: EmbeddingsFilterParams) {
super();
this.embeddings = params.embeddings;
this.similarityFn = params.similarityFn ?? this.similarityFn;
this.similarityThreshold = params.similarityThreshold;
this.k = "k" in params ? params.k : this.k;
if (this.k === undefined && this.similarityThreshold === undefined) {
throw new Error(`Must specify one of "k" or "similarity_threshold".`);
}
}
async compressDocuments(
documents: DocumentInterface[],
query: string
): Promise<DocumentInterface[]> {
const embeddedDocuments = await this.embeddings.embedDocuments(
documents.map((doc) => doc.pageContent)
);
const embeddedQuery = await this.embeddings.embedQuery(query);
const similarity = this.similarityFn([embeddedQuery], embeddedDocuments)[0];
let includedIdxs = Array.from(
{ length: embeddedDocuments.length },
(_, i) => i
);
if (this.k !== undefined) {
includedIdxs = includedIdxs
.map((v, i) => [similarity[i], v])
.sort(([a], [b]) => b - a)
.slice(0, this.k)
.map(([, i]) => i);
}
if (this.similarityThreshold !== undefined) {
const threshold = this.similarityThreshold;
includedIdxs = includedIdxs.filter((i) => similarity[i] > threshold);
}
return includedIdxs.map((i) => documents[i]);
}
}
| |
144692
|
import type { BaseLanguageModelInterface } from "@langchain/core/language_models/base";
import {
BaseMessage,
SystemMessage,
getBufferString,
} from "@langchain/core/messages";
import { BasePromptTemplate } from "@langchain/core/prompts";
import {
InputValues,
MemoryVariables,
OutputValues,
} from "@langchain/core/memory";
import { LLMChain } from "../chains/llm_chain.js";
import { SUMMARY_PROMPT } from "./prompt.js";
import { BaseChatMemory, BaseChatMemoryInput } from "./chat_memory.js";
/**
* Interface for the input parameters of the ConversationSummaryMemory
* class.
*/
export interface ConversationSummaryMemoryInput
extends BaseConversationSummaryMemoryInput {}
/**
* Interface for the input parameters of the BaseConversationSummaryMemory
* class.
*/
export interface BaseConversationSummaryMemoryInput
extends BaseChatMemoryInput {
llm: BaseLanguageModelInterface;
memoryKey?: string;
humanPrefix?: string;
aiPrefix?: string;
prompt?: BasePromptTemplate;
summaryChatMessageClass?: new (content: string) => BaseMessage;
}
/**
* Abstract class that provides a structure for storing and managing the
* memory of a conversation. It includes methods for predicting a new
* summary for the conversation given the existing messages and summary.
*/
export abstract class BaseConversationSummaryMemory extends BaseChatMemory {
memoryKey = "history";
humanPrefix = "Human";
aiPrefix = "AI";
llm: BaseLanguageModelInterface;
prompt: BasePromptTemplate = SUMMARY_PROMPT;
summaryChatMessageClass: new (content: string) => BaseMessage = SystemMessage;
constructor(fields: BaseConversationSummaryMemoryInput) {
const {
returnMessages,
inputKey,
outputKey,
chatHistory,
humanPrefix,
aiPrefix,
llm,
prompt,
summaryChatMessageClass,
} = fields;
super({ returnMessages, inputKey, outputKey, chatHistory });
this.memoryKey = fields?.memoryKey ?? this.memoryKey;
this.humanPrefix = humanPrefix ?? this.humanPrefix;
this.aiPrefix = aiPrefix ?? this.aiPrefix;
this.llm = llm;
this.prompt = prompt ?? this.prompt;
this.summaryChatMessageClass =
summaryChatMessageClass ?? this.summaryChatMessageClass;
}
/**
* Predicts a new summary for the conversation given the existing messages
* and summary.
* @param messages Existing messages in the conversation.
* @param existingSummary Current summary of the conversation.
* @returns A promise that resolves to a new summary string.
*/
async predictNewSummary(
messages: BaseMessage[],
existingSummary: string
): Promise<string> {
const newLines = getBufferString(messages, this.humanPrefix, this.aiPrefix);
const chain = new LLMChain({ llm: this.llm, prompt: this.prompt });
return await chain.predict({
summary: existingSummary,
new_lines: newLines,
});
}
}
/**
* Class that provides a concrete implementation of the conversation
* memory. It includes methods for loading memory variables, saving
* context, and clearing the memory.
* @example
* ```typescript
* const memory = new ConversationSummaryMemory({
* memoryKey: "chat_history",
* llm: new ChatOpenAI({ modelName: "gpt-3.5-turbo", temperature: 0 }),
* });
*
* const model = new ChatOpenAI();
* const prompt =
* PromptTemplate.fromTemplate(`The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.
*
* Current conversation:
* {chat_history}
* Human: {input}
* AI:`);
* const chain = new LLMChain({ llm: model, prompt, memory });
*
* const res1 = await chain.call({ input: "Hi! I'm Jim." });
* console.log({ res1, memory: await memory.loadMemoryVariables({}) });
*
* const res2 = await chain.call({ input: "What's my name?" });
* console.log({ res2, memory: await memory.loadMemoryVariables({}) });
*
* ```
*/
export class ConversationSummaryMemory extends BaseConversationSummaryMemory {
buffer = "";
constructor(fields: ConversationSummaryMemoryInput) {
super(fields);
}
get memoryKeys() {
return [this.memoryKey];
}
/**
* Loads the memory variables for the conversation memory.
* @returns A promise that resolves to an object containing the memory variables.
*/
async loadMemoryVariables(_: InputValues): Promise<MemoryVariables> {
if (this.returnMessages) {
const result = {
[this.memoryKey]: [new this.summaryChatMessageClass(this.buffer)],
};
return result;
}
const result = { [this.memoryKey]: this.buffer };
return result;
}
/**
* Saves the context of the conversation memory.
* @param inputValues Input values for the conversation.
* @param outputValues Output values from the conversation.
* @returns A promise that resolves when the context has been saved.
*/
async saveContext(
inputValues: InputValues,
outputValues: OutputValues
): Promise<void> {
await super.saveContext(inputValues, outputValues);
const messages = await this.chatHistory.getMessages();
this.buffer = await this.predictNewSummary(messages.slice(-2), this.buffer);
}
/**
* Clears the conversation memory.
* @returns A promise that resolves when the memory has been cleared.
*/
async clear() {
await super.clear();
this.buffer = "";
}
}
| |
144696
|
import { getBufferString } from "@langchain/core/messages";
import {
InputValues,
MemoryVariables,
OutputValues,
} from "@langchain/core/memory";
import {
BaseConversationSummaryMemory,
BaseConversationSummaryMemoryInput,
} from "./summary.js";
/**
* Interface for the input parameters of the
* ConversationSummaryBufferMemory class.
*/
export interface ConversationSummaryBufferMemoryInput
extends BaseConversationSummaryMemoryInput {
maxTokenLimit?: number;
}
/**
* Class that extends BaseConversationSummaryMemory and implements
* ConversationSummaryBufferMemoryInput. It manages the conversation
* history in a LangChain application by maintaining a buffer of chat
* messages and providing methods to load, save, prune, and clear the
* memory.
* @example
* ```typescript
* // Initialize the memory with a specific model and token limit
* const memory = new ConversationSummaryBufferMemory({
* llm: new ChatOpenAI({ modelName: "gpt-3.5-turbo-instruct", temperature: 0 }),
* maxTokenLimit: 10,
* });
*
* // Save conversation context to memory
* await memory.saveContext({ input: "hi" }, { output: "whats up" });
* await memory.saveContext({ input: "not much you" }, { output: "not much" });
*
* // Load the conversation history from memory
* const history = await memory.loadMemoryVariables({});
* console.log({ history });
*
* // Create a chat prompt using the conversation history
* const chatPrompt = ChatPromptTemplate.fromMessages([
* SystemMessagePromptTemplate.fromTemplate(
* "The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.",
* ),
* new MessagesPlaceholder("history"),
* HumanMessagePromptTemplate.fromTemplate("{input}"),
* ]);
*
* // Initialize the conversation chain with the model, memory, and prompt
* const chain = new ConversationChain({
* llm: new ChatOpenAI({ temperature: 0.9, verbose: true }),
* memory: memory,
* prompt: chatPrompt,
* });
* ```
*/
export class ConversationSummaryBufferMemory
extends BaseConversationSummaryMemory
implements ConversationSummaryBufferMemoryInput
{
movingSummaryBuffer = "";
maxTokenLimit = 2000;
constructor(fields: ConversationSummaryBufferMemoryInput) {
super(fields);
this.maxTokenLimit = fields?.maxTokenLimit ?? this.maxTokenLimit;
}
get memoryKeys() {
return [this.memoryKey];
}
/**
* Method that loads the chat messages from the memory and returns them as
* a string or as a list of messages, depending on the returnMessages
* property.
* @param _ InputValues object, not used in this method.
* @returns Promise that resolves with MemoryVariables object containing the loaded chat messages.
*/
async loadMemoryVariables(_?: InputValues): Promise<MemoryVariables> {
let buffer = await this.chatHistory.getMessages();
if (this.movingSummaryBuffer) {
buffer = [
new this.summaryChatMessageClass(this.movingSummaryBuffer),
...buffer,
];
}
let finalBuffer;
if (this.returnMessages) {
finalBuffer = buffer;
} else {
finalBuffer = getBufferString(buffer, this.humanPrefix, this.aiPrefix);
}
return { [this.memoryKey]: finalBuffer };
}
/**
* Method that saves the context of the conversation, including the input
* and output values, and prunes the memory if it exceeds the maximum
* token limit.
* @param inputValues InputValues object containing the input values of the conversation.
* @param outputValues OutputValues object containing the output values of the conversation.
* @returns Promise that resolves when the context is saved and the memory is pruned.
*/
async saveContext(
inputValues: InputValues,
outputValues: OutputValues
): Promise<void> {
await super.saveContext(inputValues, outputValues);
await this.prune();
}
/**
* Method that prunes the memory if the total number of tokens in the
* buffer exceeds the maxTokenLimit. It removes messages from the
* beginning of the buffer until the total number of tokens is within the
* limit.
* @returns Promise that resolves when the memory is pruned.
*/
async prune() {
// Prune buffer if it exceeds max token limit
let buffer = await this.chatHistory.getMessages();
if (this.movingSummaryBuffer) {
buffer = [
new this.summaryChatMessageClass(this.movingSummaryBuffer),
...buffer,
];
}
let currBufferLength = await this.llm.getNumTokens(
getBufferString(buffer, this.humanPrefix, this.aiPrefix)
);
if (currBufferLength > this.maxTokenLimit) {
const prunedMemory = [];
while (currBufferLength > this.maxTokenLimit) {
const poppedMessage = buffer.shift();
if (poppedMessage) {
prunedMemory.push(poppedMessage);
currBufferLength = await this.llm.getNumTokens(
getBufferString(buffer, this.humanPrefix, this.aiPrefix)
);
}
}
this.movingSummaryBuffer = await this.predictNewSummary(
prunedMemory,
this.movingSummaryBuffer
);
}
}
/**
* Method that clears the memory and resets the movingSummaryBuffer.
* @returns Promise that resolves when the memory is cleared.
*/
async clear() {
await super.clear();
this.movingSummaryBuffer = "";
}
}
| |
144700
|
import { InputValues, MemoryVariables } from "@langchain/core/memory";
import { getBufferString } from "@langchain/core/messages";
import { BaseChatMemory, BaseChatMemoryInput } from "./chat_memory.js";
/**
* Interface for the input parameters of the `BufferMemory` class.
*/
export interface BufferMemoryInput extends BaseChatMemoryInput {
humanPrefix?: string;
aiPrefix?: string;
memoryKey?: string;
}
/**
* The `BufferMemory` class is a type of memory component used for storing
* and managing previous chat messages. It is a wrapper around
* `ChatMessageHistory` that extracts the messages into an input variable.
* This class is particularly useful in applications like chatbots where
* it is essential to remember previous interactions. Note: The memory
* instance represents the history of a single conversation. Therefore, it
* is not recommended to share the same history or memory instance between
* two different chains. If you deploy your LangChain app on a serverless
* environment, do not store memory instances in a variable, as your
* hosting provider may reset it by the next time the function is called.
* @example
* ```typescript
* // Initialize the memory to store chat history and set up the language model with a specific temperature.
* const memory = new BufferMemory({ memoryKey: "chat_history" });
* const model = new ChatOpenAI({ temperature: 0.9 });
*
* // Create a prompt template for a friendly conversation between a human and an AI.
* const prompt =
* PromptTemplate.fromTemplate(`The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.
*
* Current conversation:
* {chat_history}
* Human: {input}
* AI:`);
*
* // Set up the chain with the language model, prompt, and memory.
* const chain = new LLMChain({ llm: model, prompt, memory });
*
* // Example usage of the chain to continue the conversation.
* // The `call` method sends the input to the model and returns the AI's response.
* const res = await chain.call({ input: "Hi! I'm Jim." });
* console.log({ res });
*
* ```
*/
export class BufferMemory extends BaseChatMemory implements BufferMemoryInput {
humanPrefix = "Human";
aiPrefix = "AI";
memoryKey = "history";
constructor(fields?: BufferMemoryInput) {
super({
chatHistory: fields?.chatHistory,
returnMessages: fields?.returnMessages ?? false,
inputKey: fields?.inputKey,
outputKey: fields?.outputKey,
});
this.humanPrefix = fields?.humanPrefix ?? this.humanPrefix;
this.aiPrefix = fields?.aiPrefix ?? this.aiPrefix;
this.memoryKey = fields?.memoryKey ?? this.memoryKey;
}
get memoryKeys() {
return [this.memoryKey];
}
/**
* Loads the memory variables. It takes an `InputValues` object as a
* parameter and returns a `Promise` that resolves with a
* `MemoryVariables` object.
* @param _values `InputValues` object.
* @returns A `Promise` that resolves with a `MemoryVariables` object.
*/
async loadMemoryVariables(_values: InputValues): Promise<MemoryVariables> {
const messages = await this.chatHistory.getMessages();
if (this.returnMessages) {
const result = {
[this.memoryKey]: messages,
};
return result;
}
const result = {
[this.memoryKey]: getBufferString(
messages,
this.humanPrefix,
this.aiPrefix
),
};
return result;
}
}
| |
144706
|
import { test, expect } from "@jest/globals";
import { ChatOpenAI, OpenAIChat } from "@langchain/openai";
import { SystemMessage } from "@langchain/core/messages";
import { ConversationSummaryMemory } from "../summary.js";
test("Test summary memory", async () => {
const memory = new ConversationSummaryMemory({
llm: new OpenAIChat({ modelName: "gpt-3.5-turbo", temperature: 0 }),
});
expect(await memory.loadMemoryVariables({})).toEqual({
history: "",
});
await memory.saveContext(
{ input: "How's it going?" },
{ response: "Hello! I'm doing fine. and you?" }
);
// @eslint-disable-next-line/@typescript-eslint/ban-ts-comment
// @ts-expect-error unused var
const result2 = await memory.loadMemoryVariables({});
// console.log("result2", result2);
await memory.clear();
expect(await memory.loadMemoryVariables({})).toEqual({
history: "",
});
});
test("Test summary memory with chat model", async () => {
const memory = new ConversationSummaryMemory({
llm: new ChatOpenAI({ temperature: 0 }),
});
expect(await memory.loadMemoryVariables({})).toEqual({
history: "",
});
await memory.saveContext(
{ input: "How's it going?" },
{ response: "Hello! I'm doing fine. and you?" }
);
// @eslint-disable-next-line/@typescript-eslint/ban-ts-comment
// @ts-expect-error unused var
const result2 = await memory.loadMemoryVariables({});
// console.log("result2", result2);
await memory.clear();
expect(await memory.loadMemoryVariables({})).toEqual({
history: "",
});
});
test("Test summary memory return messages", async () => {
const memory = new ConversationSummaryMemory({
llm: new OpenAIChat({ modelName: "gpt-3.5-turbo", temperature: 0 }),
returnMessages: true,
});
expect(await memory.loadMemoryVariables({})).toEqual({
history: [new SystemMessage("")],
});
await memory.saveContext(
{ input: "How's it going?" },
{ response: "Hello! I'm doing fine. and you?" }
);
// @eslint-disable-next-line/@typescript-eslint/ban-ts-comment
// @ts-expect-error unused var
const result2 = await memory.loadMemoryVariables({});
// console.log("result2", result2);
await memory.clear();
expect(await memory.loadMemoryVariables({})).toEqual({
history: [new SystemMessage("")],
});
});
| |
144709
|
import { test, expect } from "@jest/globals";
import { OpenAI, ChatOpenAI } from "@langchain/openai";
import { SystemMessage } from "@langchain/core/messages";
import { ConversationSummaryBufferMemory } from "../summary_buffer.js";
test("Test summary buffer memory", async () => {
const memory = new ConversationSummaryBufferMemory({
llm: new OpenAI({ modelName: "gpt-3.5-turbo-instruct", temperature: 0 }),
maxTokenLimit: 10,
});
expect(await memory.loadMemoryVariables({})).toEqual({
history: "",
});
await memory.saveContext(
{ input: "How's it going?" },
{ response: "Hello! I'm doing fine. and you?" }
);
// @eslint-disable-next-line/@typescript-eslint/ban-ts-comment
// @ts-expect-error unused var
const result = await memory.loadMemoryVariables({});
// console.log("result", result);
await memory.clear();
expect(await memory.loadMemoryVariables({})).toEqual({
history: "",
});
});
test("Test summary buffer memory with chat model", async () => {
const memory = new ConversationSummaryBufferMemory({
llm: new ChatOpenAI({ temperature: 0 }),
maxTokenLimit: 10,
});
expect(await memory.loadMemoryVariables({})).toEqual({
history: "",
});
await memory.saveContext(
{ input: "How's it going?" },
{ response: "Hello! I'm doing fine. and you?" }
);
// @eslint-disable-next-line/@typescript-eslint/ban-ts-comment
// @ts-expect-error unused var
const result = await memory.loadMemoryVariables({});
// console.log("result", result);
await memory.clear();
expect(await memory.loadMemoryVariables({})).toEqual({
history: "",
});
});
test("Test summary buffer memory return messages", async () => {
const memory = new ConversationSummaryBufferMemory({
llm: new OpenAI({ modelName: "gpt-3.5-turbo-instruct", temperature: 0 }),
returnMessages: true,
maxTokenLimit: 10,
});
const exampleBuffer = "hello summary buffer";
memory.movingSummaryBuffer = exampleBuffer;
expect(await memory.loadMemoryVariables({})).toEqual({
history: [new SystemMessage(exampleBuffer)],
});
await memory.saveContext(
{ input: "How's it going?" },
{ response: "Hello! I'm doing fine. and you?" }
);
// @eslint-disable-next-line/@typescript-eslint/ban-ts-comment
// @ts-expect-error unused var
const result = await memory.loadMemoryVariables({});
// console.log("result", result);
await memory.clear();
expect(await memory.loadMemoryVariables({})).toEqual({
history: [],
});
});
| |
144727
|
import { Document } from "@langchain/core/documents";
/**
* Given a list of documents, this util formats their contents
* into a string, separated by newlines.
*
* @param documents
* @returns A string of the documents page content, separated by newlines.
*/
export const formatDocumentsAsString = (documents: Document[]): string =>
documents.map((doc) => doc.pageContent).join("\n\n");
| |
144787
|
import { describe, expect, test } from "@jest/globals";
import { Document } from "@langchain/core/documents";
import {
CharacterTextSplitter,
LatexTextSplitter,
MarkdownTextSplitter,
RecursiveCharacterTextSplitter,
TokenTextSplitter,
} from "../text_splitter.js";
function textLineGenerator(char: string, length: number) {
const line = new Array(length).join(char);
return `${line}\n`;
}
describe("Character text splitter", () => {
test("Test splitting by character count.", async () => {
const text = "foo bar baz 123";
const splitter = new CharacterTextSplitter({
separator: " ",
chunkSize: 7,
chunkOverlap: 3,
});
const output = await splitter.splitText(text);
const expectedOutput = ["foo bar", "bar baz", "baz 123"];
expect(output).toEqual(expectedOutput);
});
test("Test splitting by character count doesn't create empty documents.", async () => {
const text = "foo bar";
const splitter = new CharacterTextSplitter({
separator: " ",
chunkSize: 2,
chunkOverlap: 0,
});
const output = await splitter.splitText(text);
const expectedOutput = ["foo", "bar"];
expect(output).toEqual(expectedOutput);
});
test("Test splitting by character count on long words.", async () => {
const text = "foo bar baz a a";
const splitter = new CharacterTextSplitter({
separator: " ",
chunkSize: 3,
chunkOverlap: 1,
});
const output = await splitter.splitText(text);
const expectedOutput = ["foo", "bar", "baz", "a a"];
expect(output).toEqual(expectedOutput);
});
test("Test splitting by character count when shorter words are first.", async () => {
const text = "a a foo bar baz";
const splitter = new CharacterTextSplitter({
separator: " ",
chunkSize: 3,
chunkOverlap: 1,
});
const output = await splitter.splitText(text);
const expectedOutput = ["a a", "foo", "bar", "baz"];
expect(output).toEqual(expectedOutput);
});
test("Test splitting by characters when splits not found easily.", async () => {
const text = "foo bar baz 123";
const splitter = new CharacterTextSplitter({
separator: " ",
chunkSize: 1,
chunkOverlap: 0,
});
const output = await splitter.splitText(text);
const expectedOutput = ["foo", "bar", "baz", "123"];
expect(output).toEqual(expectedOutput);
});
test("Test invalid arguments.", () => {
expect(() => {
// @eslint-disable-next-line/@typescript-eslint/ban-ts-comment
// @ts-expect-error unused var
const res = new CharacterTextSplitter({ chunkSize: 2, chunkOverlap: 4 });
// console.log(res);
}).toThrow();
});
test("Test create documents method.", async () => {
const texts = ["foo bar", "baz"];
const splitter = new CharacterTextSplitter({
separator: " ",
chunkSize: 3,
chunkOverlap: 0,
});
const docs = await splitter.createDocuments(texts);
const metadata = { loc: { lines: { from: 1, to: 1 } } };
const expectedDocs = [
new Document({ pageContent: "foo", metadata }),
new Document({ pageContent: "bar", metadata }),
new Document({ pageContent: "baz", metadata }),
];
expect(docs).toEqual(expectedDocs);
});
test("Test create documents with metadata method.", async () => {
const texts = ["foo bar", "baz"];
const splitter = new CharacterTextSplitter({
separator: " ",
chunkSize: 3,
chunkOverlap: 0,
});
const docs = await splitter.createDocuments(texts, [
{ source: "1" },
{ source: "2" },
]);
const loc = { lines: { from: 1, to: 1 } };
const expectedDocs = [
new Document({ pageContent: "foo", metadata: { source: "1", loc } }),
new Document({
pageContent: "bar",
metadata: { source: "1", loc },
}),
new Document({ pageContent: "baz", metadata: { source: "2", loc } }),
];
expect(docs).toEqual(expectedDocs);
});
test("Test create documents method with metadata and an added chunk header.", async () => {
const texts = ["foo bar", "baz"];
const splitter = new CharacterTextSplitter({
separator: " ",
chunkSize: 3,
chunkOverlap: 0,
});
const docs = await splitter.createDocuments(
texts,
[{ source: "1" }, { source: "2" }],
{
chunkHeader: `SOURCE NAME: testing\n-----\n`,
appendChunkOverlapHeader: true,
}
);
const loc = { lines: { from: 1, to: 1 } };
const expectedDocs = [
new Document({
pageContent: "SOURCE NAME: testing\n-----\nfoo",
metadata: { source: "1", loc },
}),
new Document({
pageContent: "SOURCE NAME: testing\n-----\n(cont'd) bar",
metadata: { source: "1", loc },
}),
new Document({
pageContent: "SOURCE NAME: testing\n-----\nbaz",
metadata: { source: "2", loc },
}),
];
expect(docs).toEqual(expectedDocs);
});
});
| |
144788
|
describe("RecursiveCharacter text splitter", () => {
test("One unique chunk", async () => {
const splitter = new RecursiveCharacterTextSplitter({
chunkSize: 100,
chunkOverlap: 0,
});
const content = textLineGenerator("A", 70);
const docs = await splitter.createDocuments([content]);
const expectedDocs = [
new Document({
pageContent: content.trim(),
metadata: { loc: { lines: { from: 1, to: 1 } } },
}),
];
expect(docs).toEqual(expectedDocs);
});
test("Test iterative text splitter.", async () => {
const text = `Hi.\n\nI'm Harrison.\n\nHow? Are? You?\nOkay then f f f f.
This is a weird text to write, but gotta test the splittingggg some how.\n\n
Bye!\n\n-H.`;
const splitter = new RecursiveCharacterTextSplitter({
chunkSize: 10,
chunkOverlap: 1,
});
const output = await splitter.splitText(text);
const expectedOutput = [
"Hi.",
"I'm",
"Harrison.",
"How? Are?",
"You?",
"Okay then",
"f f f f.",
"This is a",
"weird",
"text to",
"write,",
"but gotta",
"test the",
"splitting",
"gggg",
"some how.",
"Bye!",
"-H.",
];
expect(output).toEqual(expectedOutput);
});
test("A basic chunked document", async () => {
const splitter = new RecursiveCharacterTextSplitter({
chunkSize: 100,
chunkOverlap: 0,
});
const line1 = textLineGenerator("A", 70);
const line2 = textLineGenerator("B", 70);
const content = line1 + line2;
const docs = await splitter.createDocuments([content]);
const expectedDocs = [
new Document({
pageContent: line1.trim(),
metadata: { loc: { lines: { from: 1, to: 1 } } },
}),
new Document({
pageContent: line2.trim(),
metadata: { loc: { lines: { from: 2, to: 2 } } },
}),
];
expect(docs).toEqual(expectedDocs);
});
test("A chunked document with similar text", async () => {
const splitter = new RecursiveCharacterTextSplitter({
chunkSize: 100,
chunkOverlap: 0,
});
const line = textLineGenerator("A", 70);
const content = line + line;
const docs = await splitter.createDocuments([content]);
const expectedDocs = [
new Document({
pageContent: line.trim(),
metadata: { loc: { lines: { from: 1, to: 1 } } },
}),
new Document({
pageContent: line.trim(),
metadata: { loc: { lines: { from: 2, to: 2 } } },
}),
];
expect(docs).toEqual(expectedDocs);
});
test("A chunked document starting with new lines", async () => {
const splitter = new RecursiveCharacterTextSplitter({
chunkSize: 100,
chunkOverlap: 0,
});
const line1 = textLineGenerator("\n", 2);
const line2 = textLineGenerator("A", 70);
const line3 = textLineGenerator("\n", 4);
const line4 = textLineGenerator("B", 70);
const line5 = textLineGenerator("\n", 4);
const content = line1 + line2 + line3 + line4 + line5;
const docs = await splitter.createDocuments([content]);
const expectedDocs = [
new Document({
pageContent: line2.trim(),
metadata: { loc: { lines: { from: 3, to: 3 } } },
}),
new Document({
pageContent: line4.trim(),
metadata: { loc: { lines: { from: 8, to: 8 } } },
}),
];
expect(docs).toEqual(expectedDocs);
});
test("A chunked with overlap", async () => {
const splitter = new RecursiveCharacterTextSplitter({
chunkSize: 100,
chunkOverlap: 30,
});
const line1 = textLineGenerator("A", 70);
const line2 = textLineGenerator("B", 20);
const line3 = textLineGenerator("C", 70);
const content = line1 + line2 + line3;
const docs = await splitter.createDocuments([content]);
const expectedDocs = [
new Document({
pageContent: line1 + line2.trim(),
metadata: { loc: { lines: { from: 1, to: 2 } } },
}),
new Document({
pageContent: line2 + line3.trim(),
metadata: { loc: { lines: { from: 2, to: 3 } } },
}),
];
expect(docs).toEqual(expectedDocs);
});
test("Chunks with overlap that contains new lines", async () => {
const splitter = new RecursiveCharacterTextSplitter({
chunkSize: 100,
chunkOverlap: 30,
});
const line1 = textLineGenerator("A", 70);
const line2 = textLineGenerator("B", 10);
const line3 = textLineGenerator("C", 10);
const line4 = textLineGenerator("D", 70);
const content = line1 + line2 + line3 + line4;
const docs = await splitter.createDocuments([content]);
const expectedDocs = [
new Document({
pageContent: line1 + line2 + line3.trim(),
metadata: { loc: { lines: { from: 1, to: 3 } } },
}),
new Document({
pageContent: line2 + line3 + line4.trim(),
metadata: { loc: { lines: { from: 2, to: 4 } } },
}),
];
expect(docs).toEqual(expectedDocs);
});
});
test("Separator length is considered correctly for chunk size", async () => {
const text = "aa ab ac ba bb";
const splitter = new RecursiveCharacterTextSplitter({
keepSeparator: false,
chunkSize: 7,
chunkOverlap: 3,
});
const output = await splitter.splitText(text);
const expectedOutput = ["aa ab", "ab ac", "ac ba", "ba bb"];
expect(output).toEqual(expectedOutput);
});
test("Token text splitter", async () => {
const text = "foo bar baz a a";
const splitter = new TokenTextSplitter({
encodingName: "r50k_base",
chunkSize: 3,
chunkOverlap: 0,
});
const output = await splitter.splitText(text);
const expectedOutput = ["foo bar b", "az a a"];
expect(output).toEqual(expectedOutput);
});
test("Token text splitter overlap when last chunk is large", async () => {
const text = "foo bar baz a a";
const splitter = new TokenTextSplitter({
encodingName: "r50k_base",
chunkSize: 5,
chunkOverlap: 3,
});
const output = await splitter.splitText(text);
const expectedOutput = ["foo bar baz a", " baz a a"];
expect(output).toEqual(expectedOutput);
});
test("Test markdown text splitter", async () => {
const text =
"# 🦜️🔗 LangChain\n" +
"\n" +
"⚡ Building applications with LLMs through composability ⚡\n" +
"\n" +
"## Quick Install\n" +
"\n" +
"```bash\n" +
"# Hopefully this code block isn't split\n" +
"pip install langchain\n" +
"```\n" +
"\n" +
"As an open source project in a rapidly developing field, we are extremely open to contributions.";
const splitter = new MarkdownTextSplitter({
chunkSize: 100,
chunkOverlap: 0,
});
const output = await splitter.splitText(text);
const expectedOutput = [
"# 🦜️🔗 LangChain\n\n⚡ Building applications with LLMs through composability ⚡",
"## Quick Install\n\n```bash\n# Hopefully this code block isn't split\npip install langchain",
"```",
"As an open source project in a rapidly developing field, we are extremely open to contributions.",
];
expect(output).toEqual(expectedOutput);
});
test("Test latex text
| |
144789
|
litter.", async () => {
const text = `\\begin{document}
\\title{🦜️🔗 LangChain}
⚡ Building applications with LLMs through composability ⚡
\\section{Quick Install}
\\begin{verbatim}
Hopefully this code block isn't split
yarn add langchain
\\end{verbatim}
As an open source project in a rapidly developing field, we are extremely open to contributions.
\\end{document}`;
const splitter = new LatexTextSplitter({
chunkSize: 100,
chunkOverlap: 0,
});
const output = await splitter.splitText(text);
const expectedOutput = [
"\\begin{document}\n\\title{🦜️🔗 LangChain}\n⚡ Building applications with LLMs through composability ⚡",
"\\section{Quick Install}",
"\\begin{verbatim}\nHopefully this code block isn't split\nyarn add langchain\n\\end{verbatim}",
"As an open source project in a rapidly developing field, we are extremely open to contributions.",
"\\end{document}",
];
expect(output).toEqual(expectedOutput);
});
test("Test HTML text splitter", async () => {
const text = `<!DOCTYPE html>
<html>
<head>
<title>🦜️🔗 LangChain</title>
<style>
body {
font-family: Arial, sans-serif;
}
h1 {
color: darkblue;
}
</style>
</head>
<body>
<div>
<h1>🦜️🔗 LangChain</h1>
<p>⚡ Building applications with LLMs through composability ⚡</p>
</div>
<div>
As an open source project in a rapidly developing field, we are extremely open to contributions.
</div>
</body>
</html>`;
const splitter = RecursiveCharacterTextSplitter.fromLanguage("html", {
chunkSize: 175,
chunkOverlap: 20,
});
const output = await splitter.splitText(text);
const expectedOutput = [
"<!DOCTYPE html>\n<html>",
"<head>\n <title>🦜️🔗 LangChain</title>",
`<style>\n body {
font-family: Arial, sans-serif;
}
h1 {
color: darkblue;
}
</style>
</head>`,
`<body>
<div>
<h1>🦜️🔗 LangChain</h1>
<p>⚡ Building applications with LLMs through composability ⚡</p>
</div>`,
`<div>
As an open source project in a rapidly developing field, we are extremely open to contributions.
</div>
</body>
</html>`,
];
expect(output).toEqual(expectedOutput);
});
test("Test lines loc on iterative text splitter.", async () => {
const text = `Hi.\nI'm Harrison.\n\nHow?\na\nb`;
const splitter = new RecursiveCharacterTextSplitter({
chunkSize: 20,
chunkOverlap: 1,
});
const docs = await splitter.createDocuments([text]);
const expectedDocs = [
new Document({
pageContent: "Hi.\nI'm Harrison.",
metadata: { loc: { lines: { from: 1, to: 2 } } },
}),
new Document({
pageContent: "How?\na\nb",
metadata: { loc: { lines: { from: 4, to: 6 } } },
}),
];
expect(docs).toEqual(expectedDocs);
});
| |
144794
|
export {
mapStoredMessagesToChatMessages,
mapChatMessagesToStoredMessages,
} from "@langchain/core/messages";
| |
144807
|
import type { BaseLanguageModelInterface } from "@langchain/core/language_models/base";
import type {
StructuredToolInterface,
ToolInterface,
} from "@langchain/core/tools";
import { CallbackManager } from "@langchain/core/callbacks/manager";
import { BufferMemory } from "../memory/buffer_memory.js";
import { ChatAgent } from "./chat/index.js";
import { ChatConversationalAgent } from "./chat_convo/index.js";
import { StructuredChatAgent } from "./structured_chat/index.js";
import { AgentExecutor, AgentExecutorInput } from "./executor.js";
import { ZeroShotAgent } from "./mrkl/index.js";
import { OpenAIAgent } from "./openai_functions/index.js";
import { XMLAgent } from "./xml/index.js";
/**
* Represents the type of an agent in LangChain. It can be
* "zero-shot-react-description", "chat-zero-shot-react-description", or
* "chat-conversational-react-description".
*/
type AgentType =
| "zero-shot-react-description"
| "chat-zero-shot-react-description"
| "chat-conversational-react-description";
/**
* @deprecated See {@link https://js.langchain.com/docs/modules/agents/agent_types/ | new agent creation docs}.
*/
export const initializeAgentExecutor = async (
tools: ToolInterface[],
llm: BaseLanguageModelInterface,
_agentType?: AgentType,
_verbose?: boolean,
_callbackManager?: CallbackManager
): Promise<AgentExecutor> => {
const agentType = _agentType ?? "zero-shot-react-description";
const verbose = _verbose;
const callbackManager = _callbackManager;
switch (agentType) {
case "zero-shot-react-description":
return AgentExecutor.fromAgentAndTools({
agent: ZeroShotAgent.fromLLMAndTools(llm, tools),
tools,
returnIntermediateSteps: true,
verbose,
callbackManager,
});
case "chat-zero-shot-react-description":
return AgentExecutor.fromAgentAndTools({
agent: ChatAgent.fromLLMAndTools(llm, tools),
tools,
returnIntermediateSteps: true,
verbose,
callbackManager,
});
case "chat-conversational-react-description":
return AgentExecutor.fromAgentAndTools({
agent: ChatConversationalAgent.fromLLMAndTools(llm, tools),
tools,
verbose,
callbackManager,
});
default:
throw new Error("Unknown agent type");
}
};
/**
* @interface
*/
export type InitializeAgentExecutorOptions =
| ({
agentType: "zero-shot-react-description";
agentArgs?: Parameters<typeof ZeroShotAgent.fromLLMAndTools>[2];
memory?: never;
} & Omit<AgentExecutorInput, "agent" | "tools">)
| ({
agentType: "chat-zero-shot-react-description";
agentArgs?: Parameters<typeof ChatAgent.fromLLMAndTools>[2];
memory?: never;
} & Omit<AgentExecutorInput, "agent" | "tools">)
| ({
agentType: "chat-conversational-react-description";
agentArgs?: Parameters<typeof ChatConversationalAgent.fromLLMAndTools>[2];
} & Omit<AgentExecutorInput, "agent" | "tools">)
| ({
agentType: "xml";
agentArgs?: Parameters<typeof XMLAgent.fromLLMAndTools>[2];
} & Omit<AgentExecutorInput, "agent" | "tools">);
/**
* @interface
*/
export type InitializeAgentExecutorOptionsStructured =
| ({
agentType: "structured-chat-zero-shot-react-description";
agentArgs?: Parameters<typeof StructuredChatAgent.fromLLMAndTools>[2];
} & Omit<AgentExecutorInput, "agent" | "tools">)
| ({
agentType: "openai-functions";
agentArgs?: Parameters<typeof OpenAIAgent.fromLLMAndTools>[2];
} & Omit<AgentExecutorInput, "agent" | "tools">);
/**
* Initialize an agent executor with options.
* @deprecated See {@link https://js.langchain.com/docs/modules/agents/agent_types/ | new agent creation docs}.
* @param tools Array of tools to use in the agent
* @param llm LLM or ChatModel to use in the agent
* @param options Options for the agent, including agentType, agentArgs, and other options for AgentExecutor.fromAgentAndTools
* @returns AgentExecutor
*/
export async function initializeAgentExecutorWithOptions(
tools: StructuredToolInterface[],
llm: BaseLanguageModelInterface,
options: InitializeAgentExecutorOptionsStructured
): Promise<AgentExecutor>;
/** @deprecated See {@link https://js.langchain.com/docs/modules/agents/agent_types/ | new agent creation docs}. */
export async function initializeAgentExecutorWithOptions(
tools: ToolInterface[],
llm: BaseLanguageModelInterface,
options?: InitializeAgentExecutorOptions
): Promise<AgentExecutor>;
/** @deprecated See {@link https://js.langchain.com/docs/modules/agents/agent_types/ | new agent creation docs}. */
export async function initializeAgentExecutorWithOptions(
tools: StructuredToolInterface[] | ToolInterface[],
llm: BaseLanguageModelInterface,
options:
| InitializeAgentExecutorOptions
| InitializeAgentExecutorOptionsStructured = {
agentType:
llm._modelType() === "base_chat_model"
? "chat-zero-shot-react-description"
: "zero-shot-react-description",
}
): Promise<AgentExecutor> {
// Note this tools cast is safe as the overload signatures prevent
// the function from being called with a StructuredTool[] when
// the agentType is not in InitializeAgentExecutorOptionsStructured
switch (options.agentType) {
case "zero-shot-react-description": {
const { agentArgs, tags, ...rest } = options;
return AgentExecutor.fromAgentAndTools({
tags: [...(tags ?? []), "zero-shot-react-description"],
agent: ZeroShotAgent.fromLLMAndTools(
llm,
tools as ToolInterface[],
agentArgs
),
tools,
...rest,
});
}
case "chat-zero-shot-react-description": {
const { agentArgs, tags, ...rest } = options;
return AgentExecutor.fromAgentAndTools({
tags: [...(tags ?? []), "chat-zero-shot-react-description"],
agent: ChatAgent.fromLLMAndTools(
llm,
tools as ToolInterface[],
agentArgs
),
tools,
...rest,
});
}
case "chat-conversational-react-description": {
const { agentArgs, memory, tags, ...rest } = options;
const executor = AgentExecutor.fromAgentAndTools({
tags: [...(tags ?? []), "chat-conversational-react-description"],
agent: ChatConversationalAgent.fromLLMAndTools(
llm,
tools as ToolInterface[],
agentArgs
),
tools,
memory:
memory ??
new BufferMemory({
returnMessages: true,
memoryKey: "chat_history",
inputKey: "input",
outputKey: "output",
}),
...rest,
});
return executor;
}
case "xml": {
const { agentArgs, tags, ...rest } = options;
const executor = AgentExecutor.fromAgentAndTools({
tags: [...(tags ?? []), "xml"],
agent: XMLAgent.fromLLMAndTools(
llm,
tools as ToolInterface[],
agentArgs
),
tools,
...rest,
});
return executor;
}
case "structured-chat-zero-shot-react-description": {
const { agentArgs, memory, tags, ...rest } = options;
const executor = AgentExecutor.fromAgentAndTools({
tags: [...(tags ?? []), "structured-chat-zero-shot-react-description"],
agent: StructuredChatAgent.fromLLMAndTools(llm, tools, agentArgs),
tools,
memory,
...rest,
});
return executor;
}
case "openai-functions": {
const { agentArgs, memory, tags, ...rest } = options;
const executor = AgentExecutor.fromAgentAndTools({
tags: [...(tags ?? []), "openai-functions"],
agent: OpenAIAgent.fromLLMAndTools(llm, tools, agentArgs),
tools,
memory:
memory ??
new BufferMemory({
returnMessages: true,
memoryKey: "chat_history",
inputKey: "input",
outputKey: "output",
}),
...rest,
});
return executor;
}
default: {
throw new Error("Unknown agent type");
}
}
}
| |
144825
|
import { OutputParserException } from "@langchain/core/output_parsers";
import { OutputParserArgs } from "../agent.js";
import { AgentActionOutputParser } from "../types.js";
import { FORMAT_INSTRUCTIONS } from "./prompt.js";
export const FINAL_ANSWER_ACTION = "Final Answer:";
/**
* A class that extends `AgentActionOutputParser` to provide a custom
* implementation for parsing the output of a ZeroShotAgent action.
*/
export class ZeroShotAgentOutputParser extends AgentActionOutputParser {
lc_namespace = ["langchain", "agents", "mrkl"];
finishToolName: string;
constructor(fields?: OutputParserArgs) {
super(fields);
this.finishToolName = fields?.finishToolName || FINAL_ANSWER_ACTION;
}
/**
* Parses the text output of an agent action, extracting the tool, tool
* input, and output.
* @param text The text output of an agent action.
* @returns An object containing the tool, tool input, and output extracted from the text, along with the original text as a log.
*/
async parse(text: string) {
if (text.includes(this.finishToolName)) {
const parts = text.split(this.finishToolName);
const output = parts[parts.length - 1].trim();
return {
returnValues: { output },
log: text,
};
}
const match = /Action:([\s\S]*?)(?:\nAction Input:([\s\S]*?))?$/.exec(text);
if (!match) {
throw new OutputParserException(`Could not parse LLM output: ${text}`);
}
return {
tool: match[1].trim(),
toolInput: match[2]
? match[2].trim().replace(/^("+)(.*?)(\1)$/, "$2")
: "",
log: text,
};
}
/**
* Returns the format instructions for parsing the output of an agent
* action in the style of the ZeroShotAgent.
* @returns The format instructions for parsing the output.
*/
getFormatInstructions(): string {
return FORMAT_INSTRUCTIONS;
}
}
| |
144828
|
export type CreateStructuredChatAgentParams = {
/** LLM to use as the agent. */
llm: BaseLanguageModelInterface;
/** Tools this agent has access to. */
tools: (StructuredToolInterface | ToolDefinition)[];
/**
* The prompt to use. Must have input keys for
* `tools`, `tool_names`, and `agent_scratchpad`.
*/
prompt: BasePromptTemplate;
/**
* Whether to invoke the underlying model in streaming mode,
* allowing streaming of intermediate steps. Defaults to true.
*/
streamRunnable?: boolean;
};
/**
* Create an agent aimed at supporting tools with multiple inputs.
* @param params Params required to create the agent. Includes an LLM, tools, and prompt.
* @returns A runnable sequence representing an agent. It takes as input all the same input
* variables as the prompt passed in does. It returns as output either an
* AgentAction or AgentFinish.
*
* @example
* ```typescript
* import { AgentExecutor, createStructuredChatAgent } from "langchain/agents";
* import { pull } from "langchain/hub";
* import type { ChatPromptTemplate } from "@langchain/core/prompts";
* import { AIMessage, HumanMessage } from "@langchain/core/messages";
*
* import { ChatOpenAI } from "@langchain/openai";
*
* // Define the tools the agent will have access to.
* const tools = [...];
*
* // Get the prompt to use - you can modify this!
* // If you want to see the prompt in full, you can at:
* // https://smith.langchain.com/hub/hwchase17/structured-chat-agent
* const prompt = await pull<ChatPromptTemplate>(
* "hwchase17/structured-chat-agent"
* );
*
* const llm = new ChatOpenAI({
* temperature: 0,
* modelName: "gpt-3.5-turbo-1106",
* });
*
* const agent = await createStructuredChatAgent({
* llm,
* tools,
* prompt,
* });
*
* const agentExecutor = new AgentExecutor({
* agent,
* tools,
* });
*
* const result = await agentExecutor.invoke({
* input: "what is LangChain?",
* });
*
* // With chat history
* const result2 = await agentExecutor.invoke({
* input: "what's my name?",
* chat_history: [
* new HumanMessage("hi! my name is cob"),
* new AIMessage("Hello Cob! How can I assist you today?"),
* ],
* });
* ```
*/
export async function createStructuredChatAgent({
llm,
tools,
prompt,
streamRunnable,
}: CreateStructuredChatAgentParams) {
const missingVariables = ["tools", "tool_names", "agent_scratchpad"].filter(
(v) => !prompt.inputVariables.includes(v)
);
if (missingVariables.length > 0) {
throw new Error(
`Provided prompt is missing required input variables: ${JSON.stringify(
missingVariables
)}`
);
}
let toolNames: string[] = [];
if (tools.every(isOpenAITool)) {
toolNames = tools.map((tool) => tool.function.name);
} else if (tools.every(isStructuredTool)) {
toolNames = tools.map((tool) => tool.name);
} else {
throw new Error(
"All tools must be either OpenAI or Structured tools, not a mix."
);
}
const partialedPrompt = await prompt.partial({
tools: renderTextDescriptionAndArgs(tools),
tool_names: toolNames.join(", "),
});
// TODO: Add .bind to core runnable interface.
const llmWithStop = (llm as BaseLanguageModel).bind({
stop: ["Observation"],
});
const agent = AgentRunnableSequence.fromRunnables(
[
RunnablePassthrough.assign({
agent_scratchpad: (input: { steps: AgentStep[] }) =>
formatLogToString(input.steps),
}),
partialedPrompt,
llmWithStop,
StructuredChatOutputParserWithRetries.fromLLM(llm, {
toolNames,
}),
],
{
name: "StructuredChatAgent",
streamRunnable,
singleAction: true,
}
);
return agent;
}
| |
144829
|
import type { BaseLanguageModelInterface } from "@langchain/core/language_models/base";
import { Callbacks } from "@langchain/core/callbacks/manager";
import { AgentAction, AgentFinish } from "@langchain/core/agents";
import { OutputParserException } from "@langchain/core/output_parsers";
import { renderTemplate } from "@langchain/core/prompts";
import { AgentActionOutputParser } from "../types.js";
import {
AGENT_ACTION_FORMAT_INSTRUCTIONS,
FORMAT_INSTRUCTIONS,
} from "./prompt.js";
import { OutputFixingParser } from "../../output_parsers/fix.js";
/**
* A class that provides a custom implementation for parsing the output of
* a StructuredChatAgent action. It extends the `AgentActionOutputParser`
* class and extracts the action and action input from the text output,
* returning an `AgentAction` or `AgentFinish` object.
*/
export class StructuredChatOutputParser extends AgentActionOutputParser {
lc_namespace = ["langchain", "agents", "structured_chat"];
private toolNames: string[];
constructor(fields: { toolNames: string[] }) {
super(...arguments);
this.toolNames = fields.toolNames;
}
/**
* Parses the given text and returns an `AgentAction` or `AgentFinish`
* object. If an `OutputFixingParser` is provided, it is used for parsing;
* otherwise, the base parser is used.
* @param text The text to parse.
* @param callbacks Optional callbacks for asynchronous operations.
* @returns A Promise that resolves to an `AgentAction` or `AgentFinish` object.
*/
async parse(text: string): Promise<AgentAction | AgentFinish> {
try {
const regex = /```(?:json)?(.*)(```)/gs;
const actionMatch = regex.exec(text);
if (actionMatch === null) {
throw new OutputParserException(
`Could not parse an action. The agent action must be within a markdown code block, and "action" must be a provided tool or "Final Answer"`
);
}
const response = JSON.parse(actionMatch[1].trim());
const { action, action_input } = response;
if (action === "Final Answer") {
return { returnValues: { output: action_input }, log: text };
}
return { tool: action, toolInput: action_input || {}, log: text };
} catch (e) {
throw new OutputParserException(
`Failed to parse. Text: "${text}". Error: ${e}`
);
}
}
/**
* Returns the format instructions for parsing the output of an agent
* action in the style of the StructuredChatAgent.
* @returns A string representing the format instructions.
*/
getFormatInstructions(): string {
return renderTemplate(AGENT_ACTION_FORMAT_INSTRUCTIONS, "f-string", {
tool_names: this.toolNames.join(", "),
});
}
}
/**
* An interface for the arguments used to construct a
* `StructuredChatOutputParserWithRetries` instance.
*/
export interface StructuredChatOutputParserArgs {
baseParser?: StructuredChatOutputParser;
outputFixingParser?: OutputFixingParser<AgentAction | AgentFinish>;
toolNames?: string[];
}
/**
* A class that provides a wrapper around the `StructuredChatOutputParser`
* and `OutputFixingParser` classes. It extends the
* `AgentActionOutputParser` class and allows for retrying the output
* parsing using the `OutputFixingParser` if it is provided.
* @example
* ```typescript
* const outputParser = new StructuredChatOutputParserWithRetries.fromLLM(
* new ChatOpenAI({ temperature: 0 }),
* {
* toolNames: ["calculator", "random-number-generator"],
* },
* );
* const result = await outputParser.parse(
* "What is a random number between 5 and 10 raised to the second power?"
* );
* ```
*/
export class StructuredChatOutputParserWithRetries extends AgentActionOutputParser {
lc_namespace = ["langchain", "agents", "structured_chat"];
private baseParser: StructuredChatOutputParser;
private outputFixingParser?: OutputFixingParser<AgentAction | AgentFinish>;
private toolNames: string[] = [];
constructor(fields: StructuredChatOutputParserArgs) {
super(fields);
this.toolNames = fields.toolNames ?? this.toolNames;
this.baseParser =
fields?.baseParser ??
new StructuredChatOutputParser({ toolNames: this.toolNames });
this.outputFixingParser = fields?.outputFixingParser;
}
/**
* Parses the given text and returns an `AgentAction` or `AgentFinish`
* object. Throws an `OutputParserException` if the parsing fails.
* @param text The text to parse.
* @returns A Promise that resolves to an `AgentAction` or `AgentFinish` object.
*/
async parse(
text: string,
callbacks?: Callbacks
): Promise<AgentAction | AgentFinish> {
if (this.outputFixingParser !== undefined) {
return this.outputFixingParser.parse(text, callbacks);
}
return this.baseParser.parse(text);
}
/**
* Returns the format instructions for parsing the output of an agent
* action in the style of the StructuredChatAgent.
* @returns A string representing the format instructions.
*/
getFormatInstructions(): string {
return renderTemplate(FORMAT_INSTRUCTIONS, "f-string", {
tool_names: this.toolNames.join(", "),
});
}
/**
* Creates a new `StructuredChatOutputParserWithRetries` instance from a
* `BaseLanguageModel` and options. The options can include a base parser
* and tool names.
* @param llm A `BaseLanguageModel` instance.
* @param options Options for creating a `StructuredChatOutputParserWithRetries` instance.
* @returns A new `StructuredChatOutputParserWithRetries` instance.
*/
static fromLLM(
llm: BaseLanguageModelInterface,
options: Omit<StructuredChatOutputParserArgs, "outputFixingParser">
): StructuredChatOutputParserWithRetries {
const baseParser =
options.baseParser ??
new StructuredChatOutputParser({ toolNames: options.toolNames ?? [] });
const outputFixingParser = OutputFixingParser.fromLLM(llm, baseParser);
return new StructuredChatOutputParserWithRetries({
baseParser,
outputFixingParser,
toolNames: options.toolNames,
});
}
}
| |
144833
|
/* eslint-disable no-process-env */
import { expect, test } from "@jest/globals";
import { OpenAI, OpenAIEmbeddings, ChatOpenAI } from "@langchain/openai";
import { Tool } from "@langchain/core/tools";
import { RunnableSequence } from "@langchain/core/runnables";
import { OutputParserException } from "@langchain/core/output_parsers";
import { AIMessage } from "@langchain/core/messages";
import { AgentStep } from "@langchain/core/agents";
import { ChatMessageHistory } from "../../stores/message/in_memory.js";
import { AgentExecutor, ZeroShotAgent } from "../index.js";
import { SerpAPI } from "../../util/testing/tools/serpapi.js";
import { Calculator } from "../../util/testing/tools/calculator.js";
import { initializeAgentExecutorWithOptions } from "../initialize.js";
import { WebBrowser } from "../../tools/webbrowser.js";
import { BufferMemory } from "../../memory/buffer_memory.js";
test("Pass runnable to agent executor", async () => {
const model = new ChatOpenAI({ temperature: 0, modelName: "gpt-3.5-turbo" });
const tools: Tool[] = [
new SerpAPI(undefined, {
location: "Austin,Texas,United States",
hl: "en",
gl: "us",
}),
new Calculator(),
];
const prompt = ZeroShotAgent.createPrompt(tools);
const outputParser = ZeroShotAgent.getDefaultOutputParser();
const runnable = RunnableSequence.from([
{
input: (i: { input: string }) => i.input,
agent_scratchpad: (i: { input: string }) => i.input,
},
prompt,
model,
outputParser,
]);
const executor = AgentExecutor.fromAgentAndTools({
agent: runnable,
tools,
});
const res = await executor.invoke({
input:
"Who is Olivia Wilde's boyfriend? What is his current age raised to the 0.23 power?",
});
// console.log(
// {
// res,
// },
// "Pass runnable to agent executor"
// );
expect(res.output).not.toEqual("");
expect(res.output).not.toEqual("Agent stopped due to max iterations.");
});
test("Custom output parser", async () => {
const model = new ChatOpenAI({ temperature: 0, modelName: "gpt-3.5-turbo" });
const tools: Tool[] = [
new SerpAPI(undefined, {
location: "Austin,Texas,United States",
hl: "en",
gl: "us",
}),
new Calculator(),
];
const parser = (output: AIMessage) => {
const text = output.content;
if (typeof text !== "string") {
throw new Error("Cannot parse non-string output.");
}
if (text.includes("Final Answer:")) {
return {
returnValues: {
output: "We did it!",
},
log: text,
};
}
const match = /Action:([\s\S]*?)(?:\nAction Input:([\s\S]*?))?$/.exec(text);
if (!match) {
throw new OutputParserException(`Could not parse LLM output: ${text}`);
}
return {
tool: match[1].trim(),
toolInput: match[2]
? match[2].trim().replace(/^("+)(.*?)(\1)$/, "$2")
: "",
log: text,
};
};
const prompt = ZeroShotAgent.createPrompt(tools);
const runnable = RunnableSequence.from([
{
input: (i: { input: string }) => i.input,
agent_scratchpad: (i: { input: string }) => i.input,
},
prompt,
model,
parser,
]);
const executor = AgentExecutor.fromAgentAndTools({
agent: runnable,
tools,
});
const res = await executor.invoke({
input:
"Who is Olivia Wilde's boyfriend? What is his current age raised to the 0.23 power?",
});
// console.log(
// {
// res,
// },
// "Custom output parser"
// );
expect(res.output).toEqual("We did it!");
});
test("Add a fallback method", async () => {
// Model should always fail since the model name passed does not exist.
const modelBase = new ChatOpenAI({
modelName: "fake-model",
temperature: 10,
});
const modelLarge = new ChatOpenAI({
modelName: "gpt-3.5-turbo-16k",
temperature: 0.6,
});
const model = modelBase.withFallbacks({
fallbacks: [modelLarge],
});
const prompt = ZeroShotAgent.createPrompt([]);
const outputParser = ZeroShotAgent.getDefaultOutputParser();
const runnable = RunnableSequence.from([
{
input: (i: { input: string }) => i.input,
agent_scratchpad: (i: { input: string }) => i.input,
},
prompt,
model,
outputParser,
]);
const executor = AgentExecutor.fromAgentAndTools({
agent: runnable,
tools: [],
});
const res = await executor.invoke({
input: "Is the sky blue? Response with a concise answer",
});
// console.log(
// {
// res,
// },
// "Pass runnable to agent executor"
// );
expect(res.output).not.toEqual("");
expect(res.output).not.toEqual("Agent stopped due to max iterations.");
});
test("Run agent with an abort signal", async () => {
const model = new OpenAI({ temperature: 0, modelName: "text-babbage-001" });
const tools = [new Calculator()];
const executor = await initializeAgentExecutorWithOptions(tools, model, {
agentType: "zero-shot-react-description",
});
// console.log("Loaded agent.");
const input = `What is 3 to the fourth power?`;
// console.log(`Executing with input "${input}"...`);
const controller = new AbortController();
await expect(() => {
const result = executor.call({ input, signal: controller.signal });
controller.abort();
return result;
}).rejects.toThrow();
});
test("Run agent with incorrect api key should throw error", async () => {
const model = new OpenAI({
temperature: 0,
modelName: "text-babbage-001",
openAIApiKey: "invalid",
});
const tools = [
new SerpAPI(undefined, {
location: "Austin,Texas,United States",
hl: "en",
gl: "us",
}),
new Calculator(),
];
const executor = await initializeAgentExecutorWithOptions(tools, model, {
agentType: "zero-shot-react-description",
});
// console.log("Loaded agent.");
const input = `Who is Olivia Wilde's boyfriend? What is his current age raised to the 0.23 power?`;
let error;
// Test that the model throws an error
await expect(async () => {
try {
await model.invoke(input);
} catch (e) {
error = e;
throw e;
}
}).rejects.toThrowError();
// Test that the agent throws the same error
await expect(() => executor.call({ input })).rejects.toThrowError(
// eslint-disable-next-line @typescript-eslint/no-explicit-any
(error as any).message
);
}, 10000);
test("Run tool web-browser", async () => {
const model = new OpenAI({ temperature: 0 });
const tools = [
new SerpAPI(process.env.SERPAPI_API_KEY, {
location: "Austin,Texas,United States",
hl: "en",
gl: "us",
}),
new Calculator(),
new WebBrowser({ model, embeddings: new OpenAIEmbeddings() }),
];
const executor = await initializeAgentExecutorWithOptions(tools, model, {
agentType: "zero-shot-react-description",
returnIntermediateSteps: true,
});
// console.log("Loaded agent.");
const input = `What is the word of the day on merriam webster`;
// console.log(`Executing with input "${input}"...`);
const result = await executor.call({ input });
// console.log(
// {
// result,
// },
// "Run tool web-browser"
// );
expect(result.intermediateSteps.length).toBeGreaterThanOrEqual(1);
expect(result.intermediateSteps[0].action.tool).toEqual("search");
expect(result.intermediateSteps[1].action.tool).toEqual("web-browser");
expect(result.output).not.toEqual("");
expect(result.output).not.toEqual("Agent stopped due to max iterations.");
});
| |
144843
|
import { zodToJsonSchema } from "zod-to-json-schema";
import fs from "fs";
import { z } from "zod";
import { AgentAction, AgentFinish, AgentStep } from "@langchain/core/agents";
import { AIMessage } from "@langchain/core/messages";
import { OpenAIEmbeddings, ChatOpenAI } from "@langchain/openai";
import { convertToOpenAIFunction } from "@langchain/core/utils/function_calling";
import { RunnableSequence } from "@langchain/core/runnables";
import {
ChatPromptTemplate,
MessagesPlaceholder,
} from "@langchain/core/prompts";
import { createRetrieverTool } from "../toolkits/index.js";
import { RecursiveCharacterTextSplitter } from "../../text_splitter.js";
import { MemoryVectorStore } from "../../vectorstores/memory.js";
import { AgentExecutor } from "../executor.js";
import { formatForOpenAIFunctions } from "../format_scratchpad/openai_functions.js";
/** Define a custom structured output parser. */
const structuredOutputParser = (
output: AIMessage
): AgentAction | AgentFinish => {
if (typeof output.content !== "string") {
throw new Error("Cannot parse non-string output.");
}
if (output.additional_kwargs.function_call === undefined) {
return { returnValues: { output: output.content }, log: output.content };
}
const functionCall = output.additional_kwargs.function_call;
const name = functionCall?.name as string;
const inputs = functionCall?.arguments as string;
// console.log(functionCall);
const jsonInput = JSON.parse(inputs);
if (name === "response") {
return { returnValues: { ...jsonInput }, log: output.content };
}
return {
tool: name,
toolInput: jsonInput,
log: output.content,
};
};
test("Pass custom structured output parsers", async () => {
/** Read text file & embed documents */
const text = fs.readFileSync("../examples/state_of_the_union.txt", "utf8");
const textSplitter = new RecursiveCharacterTextSplitter({ chunkSize: 1000 });
let docs = await textSplitter.createDocuments([text]);
// Add fake source information
docs = docs.map((doc, i) => ({
...doc,
metadata: {
page_chunk: i,
},
}));
/** Initialize docs & create retriever */
const vectorStore = await MemoryVectorStore.fromDocuments(
docs,
new OpenAIEmbeddings()
);
const retriever = vectorStore.asRetriever();
/** Instantiate the LLM */
const llm = new ChatOpenAI({});
/** Define the prompt template */
const prompt = ChatPromptTemplate.fromMessages([
["system", "You are a helpful assistant"],
new MessagesPlaceholder("agent_scratchpad"),
["user", "{input}"],
]);
/** Define the response schema */
const responseSchema = z.object({
answer: z.string().describe("The final answer to respond to the user"),
sources: z
.array(z.string())
.describe(
"List of page chunks that contain answer to the question. Only include a page chunk if it contains relevant information"
),
});
/** Create the response function */
const responseOpenAIFunction = {
name: "response",
description: "Return the response to the user",
parameters: zodToJsonSchema(responseSchema),
};
/** Convert retriever into a tool */
const retrieverTool = createRetrieverTool(retriever, {
name: "state-of-union-retriever",
description:
"Query a retriever to get information about state of the union address",
});
/** Bind both retriever and response functions to LLM */
const llmWithTools = llm.bind({
functions: [convertToOpenAIFunction(retrieverTool), responseOpenAIFunction],
});
/** Create the runnable */
const runnableAgent = RunnableSequence.from([
{
input: (i: { input: string; steps: Array<AgentStep> }) => i.input,
agent_scratchpad: (i: { input: string; steps: Array<AgentStep> }) =>
formatForOpenAIFunctions(i.steps),
},
prompt,
llmWithTools,
structuredOutputParser,
]);
/** Create the agent by passing in the runnable & tools */
const executor = AgentExecutor.fromAgentAndTools({
agent: runnableAgent,
tools: [retrieverTool],
});
/** Call invoke on the agent */
// @eslint-disable-next-line/@typescript-eslint/ban-ts-comment
// @ts-expect-error unused var
const res = await executor.invoke({
input: "what did the president say about kentaji brown jackson",
});
// console.log({
// res,
// });
/**
{
res: {
answer: 'President mentioned that he nominated Circuit Court of Appeals Judge Ketanji Brown Jackson. He described her as one of our nation’s top legal minds and stated that she will continue Justice Breyer’s legacy of excellence.',
sources: [
'And I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson. One of our nation’s top legal minds, who will continue Justice Breyer’s legacy of excellence. A former top litigator in private practice. A former federal public defender. And from a family of public school educators and police officers. A consensus builder. Since she’s been nominated, she’s received a broad range of support—from the Fraternal Order of Police to former judges appointed by Democrats and Republicans.'
]
}
}
*/
});
| |
144858
|
import type { BaseLanguageModelInterface } from "@langchain/core/language_models/base";
import { type ToolInterface, BaseToolkit } from "@langchain/core/tools";
import { renderTemplate } from "@langchain/core/prompts";
import {
InfoSqlTool,
ListTablesSqlTool,
QueryCheckerTool,
QuerySqlTool,
} from "../../../tools/sql.js";
import { SQL_PREFIX, SQL_SUFFIX } from "./prompt.js";
import { LLMChain } from "../../../chains/llm_chain.js";
import { ZeroShotAgent, ZeroShotCreatePromptArgs } from "../../mrkl/index.js";
import { AgentExecutor } from "../../executor.js";
import { SqlDatabase } from "../../../sql_db.js";
/**
* Interface that extends ZeroShotCreatePromptArgs and adds an optional
* topK parameter for specifying the number of results to return.
*/
export interface SqlCreatePromptArgs extends ZeroShotCreatePromptArgs {
/** Number of results to return. */
topK?: number;
}
/**
* Class that represents a toolkit for working with SQL databases. It
* initializes SQL tools based on the provided SQL database.
* @example
* ```typescript
* const model = new ChatOpenAI({});
* const toolkit = new SqlToolkit(sqlDb, model);
* const executor = createSqlAgent(model, toolkit);
* const result = await executor.invoke({ input: 'List the total sales per country. Which country's customers spent the most?' });
* console.log(`Got output ${result.output}`);
* ```
*/
export class SqlToolkit extends BaseToolkit {
tools: ToolInterface[];
db: SqlDatabase;
dialect = "sqlite";
constructor(db: SqlDatabase, llm?: BaseLanguageModelInterface) {
super();
this.db = db;
this.tools = [
new QuerySqlTool(db),
new InfoSqlTool(db),
new ListTablesSqlTool(db),
new QueryCheckerTool({ llm }),
];
}
}
export function createSqlAgent(
llm: BaseLanguageModelInterface,
toolkit: SqlToolkit,
args?: SqlCreatePromptArgs
) {
const {
prefix = SQL_PREFIX,
suffix = SQL_SUFFIX,
inputVariables = ["input", "agent_scratchpad"],
topK = 10,
} = args ?? {};
const { tools } = toolkit;
const formattedPrefix = renderTemplate(prefix, "f-string", {
dialect: toolkit.dialect,
top_k: topK,
});
const prompt = ZeroShotAgent.createPrompt(tools, {
prefix: formattedPrefix,
suffix,
inputVariables,
});
const chain = new LLMChain({ prompt, llm });
const agent = new ZeroShotAgent({
llmChain: chain,
allowedTools: tools.map((t) => t.name),
});
return AgentExecutor.fromAgentAndTools({
agent,
tools,
returnIntermediateSteps: true,
});
}
| |
144862
|
import { AgentAction, AgentFinish } from "@langchain/core/agents";
import { OutputParserException } from "@langchain/core/output_parsers";
import { AgentActionOutputParser } from "../types.js";
/**
* @example
* ```typescript
* const prompt = ChatPromptTemplate.fromMessages([
* HumanMessagePromptTemplate.fromTemplate(AGENT_INSTRUCTIONS),
* new MessagesPlaceholder("agent_scratchpad"),
* ]);
* const runnableAgent = RunnableSequence.from([
* ...rest of runnable
* prompt,
* new ChatAnthropic({ modelName: "claude-2", temperature: 0 }).bind({
* stop: ["</tool_input>", "</final_answer>"],
* }),
* new XMLAgentOutputParser(),
* ]);
* const result = await executor.invoke({
* input: "What is the weather in Honolulu?",
* tools: [],
* });
* ```
*/
export class XMLAgentOutputParser extends AgentActionOutputParser {
lc_namespace = ["langchain", "agents", "xml"];
static lc_name() {
return "XMLAgentOutputParser";
}
/**
* Parses the output text from the agent and returns an AgentAction or
* AgentFinish object.
* @param text The output text from the agent.
* @returns An AgentAction or AgentFinish object.
*/
async parse(text: string): Promise<AgentAction | AgentFinish> {
if (text.includes("</tool>")) {
const [tool, toolInput] = text.split("</tool>");
const _tool = tool.split("<tool>")[1];
const _toolInput = toolInput.split("<tool_input>")[1];
return { tool: _tool, toolInput: _toolInput, log: text };
} else if (text.includes("<final_answer>")) {
const [, answer] = text.split("<final_answer>");
return { returnValues: { output: answer }, log: text };
} else {
throw new OutputParserException(`Could not parse LLM output: ${text}`);
}
}
getFormatInstructions(): string {
throw new Error(
"getFormatInstructions not implemented inside OpenAIFunctionsAgentOutputParser."
);
}
}
| |
144872
|
import type { ToolInterface } from "@langchain/core/tools";
import { BasePromptTemplate } from "@langchain/core/prompts";
import type {
BaseLanguageModel,
BaseLanguageModelInterface,
} from "@langchain/core/language_models/base";
import { RunnablePassthrough } from "@langchain/core/runnables";
import { AgentStep } from "@langchain/core/agents";
import { renderTextDescription } from "../../tools/render.js";
import { formatLogToString } from "../format_scratchpad/log.js";
import { ReActSingleInputOutputParser } from "./output_parser.js";
import { AgentRunnableSequence } from "../agent.js";
/**
* Params used by the createXmlAgent function.
*/
export type CreateReactAgentParams = {
/** LLM to use for the agent. */
llm: BaseLanguageModelInterface;
/** Tools this agent has access to. */
tools: ToolInterface[];
/**
* The prompt to use. Must have input keys for
* `tools`, `tool_names`, and `agent_scratchpad`.
*/
prompt: BasePromptTemplate;
/**
* Whether to invoke the underlying model in streaming mode,
* allowing streaming of intermediate steps. Defaults to true.
*/
streamRunnable?: boolean;
};
/**
* Create an agent that uses ReAct prompting.
* @param params Params required to create the agent. Includes an LLM, tools, and prompt.
* @returns A runnable sequence representing an agent. It takes as input all the same input
* variables as the prompt passed in does. It returns as output either an
* AgentAction or AgentFinish.
*
* @example
* ```typescript
* import { AgentExecutor, createReactAgent } from "langchain/agents";
* import { pull } from "langchain/hub";
* import type { PromptTemplate } from "@langchain/core/prompts";
*
* import { OpenAI } from "@langchain/openai";
*
* // Define the tools the agent will have access to.
* const tools = [...];
*
* // Get the prompt to use - you can modify this!
* // If you want to see the prompt in full, you can at:
* // https://smith.langchain.com/hub/hwchase17/react
* const prompt = await pull<PromptTemplate>("hwchase17/react");
*
* const llm = new OpenAI({
* temperature: 0,
* });
*
* const agent = await createReactAgent({
* llm,
* tools,
* prompt,
* });
*
* const agentExecutor = new AgentExecutor({
* agent,
* tools,
* });
*
* const result = await agentExecutor.invoke({
* input: "what is LangChain?",
* });
* ```
*/
export async function createReactAgent({
llm,
tools,
prompt,
streamRunnable,
}: CreateReactAgentParams) {
const missingVariables = ["tools", "tool_names", "agent_scratchpad"].filter(
(v) => !prompt.inputVariables.includes(v)
);
if (missingVariables.length > 0) {
throw new Error(
`Provided prompt is missing required input variables: ${JSON.stringify(
missingVariables
)}`
);
}
const toolNames = tools.map((tool) => tool.name);
const partialedPrompt = await prompt.partial({
tools: renderTextDescription(tools),
tool_names: toolNames.join(", "),
});
// TODO: Add .bind to core runnable interface.
const llmWithStop = (llm as BaseLanguageModel).bind({
stop: ["\nObservation:"],
});
const agent = AgentRunnableSequence.fromRunnables(
[
RunnablePassthrough.assign({
agent_scratchpad: (input: { steps: AgentStep[] }) =>
formatLogToString(input.steps),
}),
partialedPrompt,
llmWithStop,
new ReActSingleInputOutputParser({
toolNames,
}),
],
{
name: "ReactAgent",
streamRunnable,
singleAction: true,
}
);
return agent;
}
| |
144942
|
export {
BaseMessagePromptTemplate,
type MessagesPlaceholderFields as MessagePlaceholderFields,
MessagesPlaceholder,
type MessageStringPromptTemplateFields,
BaseMessageStringPromptTemplate,
BaseChatPromptTemplate,
type ChatMessagePromptTemplateFields,
ChatMessagePromptTemplate,
HumanMessagePromptTemplate,
AIMessagePromptTemplate,
SystemMessagePromptTemplate,
type ChatPromptTemplateInput,
type BaseMessagePromptTemplateLike,
ChatPromptTemplate,
} from "@langchain/core/prompts";
export {
type ChatPromptValueFields,
ChatPromptValue,
} from "@langchain/core/prompt_values";
| |
144981
|
import * as url from "node:url";
import * as path from "node:path";
import { test, expect } from "@jest/globals";
import { Document } from "@langchain/core/documents";
import { JSONLoader } from "../fs/json.js";
test("Test JSON loader", async () => {
const filePath = path.resolve(
path.dirname(url.fileURLToPath(import.meta.url)),
"./example_data/Star_Wars_The_Clone_Wars_S06E07_Crisis_at_the_Heart.json"
);
const loader = new JSONLoader(filePath);
const docs = await loader.load();
expect(docs.length).toBe(32);
expect(docs[0]).toEqual(
new Document({
metadata: { source: filePath, line: 1 },
pageContent:
"<i>Corruption discovered at the core of the Banking Clan!</i>",
})
);
});
test("Test JSON loader for complex json without keys", async () => {
const filePath = path.resolve(
path.dirname(url.fileURLToPath(import.meta.url)),
"./example_data/complex.json"
);
const loader = new JSONLoader(filePath);
const docs = await loader.load();
expect(docs.length).toBe(10);
expect(docs[0]).toEqual(
new Document({
metadata: { source: filePath, line: 1 },
pageContent: "BD 2023 SUMMER",
})
);
expect(docs[1]).toEqual(
new Document({
metadata: { source: filePath, line: 2 },
pageContent: "LinkedIn Job",
})
);
expect(docs[2]).toEqual(
new Document({
metadata: { source: filePath, line: 3 },
pageContent: "IMPORTANT",
})
);
});
test("Test JSON loader for complex json with one key that points nothing", async () => {
const filePath = path.resolve(
path.dirname(url.fileURLToPath(import.meta.url)),
"./example_data/complex.json"
);
const loader = new JSONLoader(filePath, ["/plop"]);
const docs = await loader.load();
expect(docs.length).toBe(0);
});
test("Test JSON loader for complex json with one key that exists", async () => {
const filePath = path.resolve(
path.dirname(url.fileURLToPath(import.meta.url)),
"./example_data/complex.json"
);
const loader = new JSONLoader(filePath, ["/from"]);
const docs = await loader.load();
expect(docs.length).toBe(2);
expect(docs[1]).toEqual(
new Document({
metadata: { source: filePath, line: 2 },
pageContent: "LinkedIn Job2",
})
);
});
test("Test JSON loader for complex json with two keys that exists", async () => {
const filePath = path.resolve(
path.dirname(url.fileURLToPath(import.meta.url)),
"./example_data/complex.json"
);
const loader = new JSONLoader(filePath, ["/from", "/labels"]);
const docs = await loader.load();
expect(docs.length).toBe(6);
expect(docs[3]).toEqual(
new Document({
metadata: { source: filePath, line: 4 },
pageContent: "INBOX",
})
);
});
test("Test JSON loader for complex json with two existing keys on different level", async () => {
const filePath = path.resolve(
path.dirname(url.fileURLToPath(import.meta.url)),
"./example_data/complex.json"
);
const loader = new JSONLoader(filePath, ["/from", "/surname"]);
const docs = await loader.load();
expect(docs.length).toBe(3);
expect(docs[2]).toEqual(
new Document({
metadata: { source: filePath, line: 3 },
pageContent: "bob",
})
);
});
| |
144984
|
import * as url from "node:url";
import * as path from "node:path";
import * as fs from "node:fs/promises";
import { test, expect } from "@jest/globals";
import { Document } from "@langchain/core/documents";
import { JSONLoader } from "../fs/json.js";
test("Test JSON loader from blob", async () => {
const filePath = path.resolve(
path.dirname(url.fileURLToPath(import.meta.url)),
"./example_data/Star_Wars_The_Clone_Wars_S06E07_Crisis_at_the_Heart.json"
);
const loader = new JSONLoader(
new Blob([await fs.readFile(filePath)], { type: "application/json" })
);
const docs = await loader.load();
expect(docs.length).toBe(32);
expect(docs[0]).toEqual(
new Document({
metadata: { source: "blob", blobType: "application/json", line: 1 },
pageContent:
"<i>Corruption discovered at the core of the Banking Clan!</i>",
})
);
});
test("Test JSON loader from blob", async () => {
const loader = new JSONLoader(
new Blob(
[
`{
"texts": ["This is a sentence.", "This is another sentence."]
}`,
],
{ type: "application/json" }
)
);
const docs = await loader.load();
expect(docs.length).toBe(2);
expect(docs[0]).toMatchInlineSnapshot(`
Document {
"id": undefined,
"metadata": {
"blobType": "application/json",
"line": 1,
"source": "blob",
},
"pageContent": "This is a sentence.",
}
`);
expect(docs[1]).toMatchInlineSnapshot(`
Document {
"id": undefined,
"metadata": {
"blobType": "application/json",
"line": 2,
"source": "blob",
},
"pageContent": "This is another sentence.",
}
`);
});
test("Test JSON loader from blob", async () => {
const loader = new JSONLoader(
new Blob(
[
`{
"1": {
"body": "BD 2023 SUMMER",
"from": "LinkedIn Job",
"labels": ["IMPORTANT", "CATEGORY_UPDATES", "INBOX"]
},
"2": {
"body": "Intern, Treasury and other roles are available",
"from": "LinkedIn Job2",
"labels": ["IMPORTANT"],
"other": {
"name": "plop",
"surname": "bob"
}
}
}`,
],
{ type: "application/json" }
)
);
const docs = await loader.load();
expect(docs.length).toBe(10);
expect(docs[0]).toMatchInlineSnapshot(`
Document {
"id": undefined,
"metadata": {
"blobType": "application/json",
"line": 1,
"source": "blob",
},
"pageContent": "BD 2023 SUMMER",
}
`);
expect(docs[1]).toMatchInlineSnapshot(`
Document {
"id": undefined,
"metadata": {
"blobType": "application/json",
"line": 2,
"source": "blob",
},
"pageContent": "LinkedIn Job",
}
`);
});
| |
144991
|
import type { readFile as ReadFileT } from "node:fs/promises";
import { Document } from "@langchain/core/documents";
import { getEnv } from "@langchain/core/utils/env";
import { BaseDocumentLoader } from "../base.js";
/**
* A class that extends the `BaseDocumentLoader` class. It represents a
* document loader that loads documents from a text file. The `load()`
* method is implemented to read the text from the file or blob, parse it
* using the `parse()` method, and create a `Document` instance for each
* parsed page. The metadata includes the source of the text (file path or
* blob) and, if there are multiple pages, the line number of each page.
* @example
* ```typescript
* const loader = new TextLoader("src/document_loaders/example_data/example.txt");
* const docs = await loader.load();
* ```
*/
export class TextLoader extends BaseDocumentLoader {
constructor(public filePathOrBlob: string | Blob) {
super();
}
/**
* A protected method that takes a `raw` string as a parameter and returns
* a promise that resolves to an array containing the raw text as a single
* element.
* @param raw The raw text to be parsed.
* @returns A promise that resolves to an array containing the raw text as a single element.
*/
protected async parse(raw: string): Promise<string[]> {
return [raw];
}
/**
* A method that loads the text file or blob and returns a promise that
* resolves to an array of `Document` instances. It reads the text from
* the file or blob using the `readFile` function from the
* `node:fs/promises` module or the `text()` method of the blob. It then
* parses the text using the `parse()` method and creates a `Document`
* instance for each parsed page. The metadata includes the source of the
* text (file path or blob) and, if there are multiple pages, the line
* number of each page.
* @returns A promise that resolves to an array of `Document` instances.
*/
public async load(): Promise<Document[]> {
let text: string;
let metadata: Record<string, string>;
if (typeof this.filePathOrBlob === "string") {
const { readFile } = await TextLoader.imports();
text = await readFile(this.filePathOrBlob, "utf8");
metadata = { source: this.filePathOrBlob };
} else {
text = await this.filePathOrBlob.text();
metadata = { source: "blob", blobType: this.filePathOrBlob.type };
}
const parsed = await this.parse(text);
parsed.forEach((pageContent, i) => {
if (typeof pageContent !== "string") {
throw new Error(
`Expected string, at position ${i} got ${typeof pageContent}`
);
}
});
return parsed.map(
(pageContent, i) =>
new Document({
pageContent,
metadata:
parsed.length === 1
? metadata
: {
...metadata,
line: i + 1,
},
})
);
}
/**
* A static method that imports the `readFile` function from the
* `node:fs/promises` module. It is used to dynamically import the
* function when needed. If the import fails, it throws an error
* indicating that the `fs/promises` module is not available in the
* current environment.
* @returns A promise that resolves to an object containing the `readFile` function from the `node:fs/promises` module.
*/
static async imports(): Promise<{
readFile: typeof ReadFileT;
}> {
try {
const { readFile } = await import("node:fs/promises");
return { readFile };
} catch (e) {
console.error(e);
throw new Error(
`Failed to load fs/promises. TextLoader available only on environment 'node'. It appears you are running environment '${getEnv()}'. See https://<link to docs> for alternatives.`
);
}
}
}
| |
144996
|
import type { BaseLanguageModelInterface } from "@langchain/core/language_models/base";
import type { VectorStoreInterface } from "@langchain/core/vectorstores";
import { ChainValues } from "@langchain/core/utils/types";
import { CallbackManagerForChainRun } from "@langchain/core/callbacks/manager";
import { PromptTemplate } from "@langchain/core/prompts";
import { SerializedChatVectorDBQAChain } from "./serde.js";
import { BaseChain, ChainInputs } from "./base.js";
import { LLMChain } from "./llm_chain.js";
import { loadQAStuffChain } from "./question_answering/load.js";
// eslint-disable-next-line @typescript-eslint/no-explicit-any
export type LoadValues = Record<string, any>;
const question_generator_template = `Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question.
Chat History:
{chat_history}
Follow Up Input: {question}
Standalone question:`;
const qa_template = `Use the following pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer.
{context}
Question: {question}
Helpful Answer:`;
/**
* Interface for the input parameters of the ChatVectorDBQAChain class.
*/
export interface ChatVectorDBQAChainInput extends ChainInputs {
vectorstore: VectorStoreInterface;
combineDocumentsChain: BaseChain;
questionGeneratorChain: LLMChain;
returnSourceDocuments?: boolean;
outputKey?: string;
inputKey?: string;
k?: number;
}
/** @deprecated use `ConversationalRetrievalQAChain` instead. */
export class ChatVectorDBQAChain
extends BaseChain
implements ChatVectorDBQAChainInput
{
k = 4;
inputKey = "question";
chatHistoryKey = "chat_history";
get inputKeys() {
return [this.inputKey, this.chatHistoryKey];
}
outputKey = "result";
get outputKeys() {
return [this.outputKey];
}
vectorstore: VectorStoreInterface;
combineDocumentsChain: BaseChain;
questionGeneratorChain: LLMChain;
returnSourceDocuments = false;
constructor(fields: ChatVectorDBQAChainInput) {
super(fields);
this.vectorstore = fields.vectorstore;
this.combineDocumentsChain = fields.combineDocumentsChain;
this.questionGeneratorChain = fields.questionGeneratorChain;
this.inputKey = fields.inputKey ?? this.inputKey;
this.outputKey = fields.outputKey ?? this.outputKey;
this.k = fields.k ?? this.k;
this.returnSourceDocuments =
fields.returnSourceDocuments ?? this.returnSourceDocuments;
}
/** @ignore */
async _call(
values: ChainValues,
runManager?: CallbackManagerForChainRun
): Promise<ChainValues> {
if (!(this.inputKey in values)) {
throw new Error(`Question key ${this.inputKey} not found.`);
}
if (!(this.chatHistoryKey in values)) {
throw new Error(`chat history key ${this.inputKey} not found.`);
}
const question: string = values[this.inputKey];
const chatHistory: string = values[this.chatHistoryKey];
let newQuestion = question;
if (chatHistory.length > 0) {
const result = await this.questionGeneratorChain.call(
{
question,
chat_history: chatHistory,
},
runManager?.getChild("question_generator")
);
const keys = Object.keys(result);
console.log("_call", values, keys);
if (keys.length === 1) {
newQuestion = result[keys[0]];
} else {
throw new Error(
"Return from llm chain has multiple values, only single values supported."
);
}
}
const docs = await this.vectorstore.similaritySearch(
newQuestion,
this.k,
undefined,
runManager?.getChild("vectorstore")
);
const inputs = {
question: newQuestion,
input_documents: docs,
chat_history: chatHistory,
};
const result = await this.combineDocumentsChain.call(
inputs,
runManager?.getChild("combine_documents")
);
if (this.returnSourceDocuments) {
return {
...result,
sourceDocuments: docs,
};
}
return result;
}
_chainType() {
return "chat-vector-db" as const;
}
static async deserialize(
data: SerializedChatVectorDBQAChain,
values: LoadValues
) {
if (!("vectorstore" in values)) {
throw new Error(
`Need to pass in a vectorstore to deserialize VectorDBQAChain`
);
}
const { vectorstore } = values;
return new ChatVectorDBQAChain({
combineDocumentsChain: await BaseChain.deserialize(
data.combine_documents_chain
),
questionGeneratorChain: await LLMChain.deserialize(
data.question_generator
),
k: data.k,
vectorstore,
});
}
serialize(): SerializedChatVectorDBQAChain {
return {
_type: this._chainType(),
combine_documents_chain: this.combineDocumentsChain.serialize(),
question_generator: this.questionGeneratorChain.serialize(),
k: this.k,
};
}
/**
* Creates an instance of ChatVectorDBQAChain using a BaseLanguageModel
* and other options.
* @param llm Instance of BaseLanguageModel used to generate a new question.
* @param vectorstore Instance of VectorStore used for vector operations.
* @param options (Optional) Additional options for creating the ChatVectorDBQAChain instance.
* @returns New instance of ChatVectorDBQAChain.
*/
static fromLLM(
llm: BaseLanguageModelInterface,
vectorstore: VectorStoreInterface,
options: {
inputKey?: string;
outputKey?: string;
k?: number;
returnSourceDocuments?: boolean;
questionGeneratorTemplate?: string;
qaTemplate?: string;
verbose?: boolean;
} = {}
): ChatVectorDBQAChain {
const { questionGeneratorTemplate, qaTemplate, verbose, ...rest } = options;
const question_generator_prompt = PromptTemplate.fromTemplate(
questionGeneratorTemplate || question_generator_template
);
const qa_prompt = PromptTemplate.fromTemplate(qaTemplate || qa_template);
const qaChain = loadQAStuffChain(llm, { prompt: qa_prompt, verbose });
const questionGeneratorChain = new LLMChain({
prompt: question_generator_prompt,
llm,
verbose,
});
const instance = new this({
vectorstore,
combineDocumentsChain: qaChain,
questionGeneratorChain,
...rest,
});
return instance;
}
}
| |
145008
|
import type { BaseLanguageModelInterface } from "@langchain/core/language_models/base";
import type { BaseRetrieverInterface } from "@langchain/core/retrievers";
import { ChainValues } from "@langchain/core/utils/types";
import { CallbackManagerForChainRun } from "@langchain/core/callbacks/manager";
import { BaseChain, ChainInputs } from "./base.js";
import { SerializedVectorDBQAChain } from "./serde.js";
import {
StuffQAChainParams,
loadQAStuffChain,
} from "./question_answering/load.js";
// eslint-disable-next-line @typescript-eslint/no-explicit-any
export type LoadValues = Record<string, any>;
/**
* Interface for the input parameters of the RetrievalQAChain class.
*/
export interface RetrievalQAChainInput extends Omit<ChainInputs, "memory"> {
retriever: BaseRetrieverInterface;
combineDocumentsChain: BaseChain;
inputKey?: string;
returnSourceDocuments?: boolean;
}
/**
* @deprecated This class will be removed in 1.0.0. See below for an example implementation using
* `createRetrievalChain`:
* Class representing a chain for performing question-answering tasks with
* a retrieval component.
* @example
* ```typescript
* import { createStuffDocumentsChain } from "langchain/chains/combine_documents";
* import { ChatPromptTemplate } from "@langchain/core/prompts";
* import { createRetrievalChain } from "langchain/chains/retrieval";
* import { MemoryVectorStore } from "langchain/vectorstores/memory";
*
* const documents = [...your documents here];
* const embeddings = ...your embeddings model;
* const llm = ...your LLM model;
*
* const vectorstore = await MemoryVectorStore.fromDocuments(
* documents,
* embeddings
* );
* const prompt = ChatPromptTemplate.fromTemplate(`Answer the user's question: {input} based on the following context {context}`);
*
* const combineDocsChain = await createStuffDocumentsChain({
* llm,
* prompt,
* });
* const retriever = vectorstore.asRetriever();
*
* const retrievalChain = await createRetrievalChain({
* combineDocsChain,
* retriever,
* });
* ```
*/
export class RetrievalQAChain
extends BaseChain
implements RetrievalQAChainInput
{
static lc_name() {
return "RetrievalQAChain";
}
inputKey = "query";
get inputKeys() {
return [this.inputKey];
}
get outputKeys() {
return this.combineDocumentsChain.outputKeys.concat(
this.returnSourceDocuments ? ["sourceDocuments"] : []
);
}
retriever: BaseRetrieverInterface;
combineDocumentsChain: BaseChain;
returnSourceDocuments = false;
constructor(fields: RetrievalQAChainInput) {
super(fields);
this.retriever = fields.retriever;
this.combineDocumentsChain = fields.combineDocumentsChain;
this.inputKey = fields.inputKey ?? this.inputKey;
this.returnSourceDocuments =
fields.returnSourceDocuments ?? this.returnSourceDocuments;
}
/** @ignore */
async _call(
values: ChainValues,
runManager?: CallbackManagerForChainRun
): Promise<ChainValues> {
if (!(this.inputKey in values)) {
throw new Error(`Question key "${this.inputKey}" not found.`);
}
const question: string = values[this.inputKey];
const docs = await this.retriever.getRelevantDocuments(
question,
runManager?.getChild("retriever")
);
const inputs = { question, input_documents: docs, ...values };
const result = await this.combineDocumentsChain.call(
inputs,
runManager?.getChild("combine_documents")
);
if (this.returnSourceDocuments) {
return {
...result,
sourceDocuments: docs,
};
}
return result;
}
_chainType() {
return "retrieval_qa" as const;
}
static async deserialize(
_data: SerializedVectorDBQAChain,
_values: LoadValues
): Promise<RetrievalQAChain> {
throw new Error("Not implemented");
}
serialize(): SerializedVectorDBQAChain {
throw new Error("Not implemented");
}
/**
* Creates a new instance of RetrievalQAChain using a BaseLanguageModel
* and a BaseRetriever.
* @param llm The BaseLanguageModel used to generate a new question.
* @param retriever The BaseRetriever used to retrieve relevant documents.
* @param options Optional parameters for the RetrievalQAChain.
* @returns A new instance of RetrievalQAChain.
*/
static fromLLM(
llm: BaseLanguageModelInterface,
retriever: BaseRetrieverInterface,
options?: Partial<
Omit<
RetrievalQAChainInput,
"retriever" | "combineDocumentsChain" | "index"
>
> &
StuffQAChainParams
): RetrievalQAChain {
const qaChain = loadQAStuffChain(llm, {
prompt: options?.prompt,
});
return new this({
...options,
retriever,
combineDocumentsChain: qaChain,
});
}
}
| |
145009
|
import { PromptTemplate } from "@langchain/core/prompts";
import { LLMChain, LLMChainInput } from "./llm_chain.js";
import { BufferMemory } from "../memory/buffer_memory.js";
import { Optional } from "../types/type-utils.js";
export const DEFAULT_TEMPLATE = `The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.
Current conversation:
{history}
Human: {input}
AI:`;
/**
* A class for conducting conversations between a human and an AI. It
* extends the {@link LLMChain} class.
* @example
* ```typescript
* const model = new ChatOpenAI({});
* const chain = new ConversationChain({ llm: model });
*
* // Sending a greeting to the conversation chain
* const res1 = await chain.call({ input: "Hi! I'm Jim." });
* console.log({ res1 });
*
* // Following up with a question in the conversation
* const res2 = await chain.call({ input: "What's my name?" });
* console.log({ res2 });
* ```
*/
export class ConversationChain extends LLMChain {
static lc_name() {
return "ConversationChain";
}
constructor({
prompt,
outputKey,
memory,
...rest
}: Optional<LLMChainInput, "prompt">) {
super({
prompt:
prompt ??
new PromptTemplate({
template: DEFAULT_TEMPLATE,
inputVariables: ["history", "input"],
}),
outputKey: outputKey ?? "response",
memory: memory ?? new BufferMemory(),
...rest,
});
}
}
| |
145010
|
import {
BaseLanguageModel,
BaseLanguageModelInterface,
BaseLanguageModelInput,
} from "@langchain/core/language_models/base";
import type { ChainValues } from "@langchain/core/utils/types";
import type { Generation } from "@langchain/core/outputs";
import type { BaseMessage } from "@langchain/core/messages";
import type { BasePromptValueInterface } from "@langchain/core/prompt_values";
import { BasePromptTemplate } from "@langchain/core/prompts";
import {
BaseLLMOutputParser,
BaseOutputParser,
} from "@langchain/core/output_parsers";
import {
CallbackManager,
BaseCallbackConfig,
CallbackManagerForChainRun,
Callbacks,
} from "@langchain/core/callbacks/manager";
import { Runnable, type RunnableInterface } from "@langchain/core/runnables";
import { BaseChain, ChainInputs } from "./base.js";
import { SerializedLLMChain } from "./serde.js";
import { NoOpOutputParser } from "../output_parsers/noop.js";
type LLMType =
| BaseLanguageModelInterface
| Runnable<BaseLanguageModelInput, string>
| Runnable<BaseLanguageModelInput, BaseMessage>;
// eslint-disable-next-line @typescript-eslint/no-explicit-any
type CallOptionsIfAvailable<T> = T extends { CallOptions: infer CO } ? CO : any;
/**
* Interface for the input parameters of the LLMChain class.
*/
export interface LLMChainInput<
T extends string | object = string,
Model extends LLMType = LLMType
> extends ChainInputs {
/** Prompt object to use */
prompt: BasePromptTemplate;
/** LLM Wrapper to use */
llm: Model;
/** Kwargs to pass to LLM */
llmKwargs?: CallOptionsIfAvailable<Model>;
/** OutputParser to use */
outputParser?: BaseLLMOutputParser<T>;
/** Key to use for output, defaults to `text` */
outputKey?: string;
}
function isBaseLanguageModel(llmLike: unknown): llmLike is BaseLanguageModel {
return typeof (llmLike as BaseLanguageModelInterface)._llmType === "function";
}
function _getLanguageModel(llmLike: RunnableInterface): BaseLanguageModel {
if (isBaseLanguageModel(llmLike)) {
return llmLike;
} else if ("bound" in llmLike && Runnable.isRunnable(llmLike.bound)) {
return _getLanguageModel(llmLike.bound);
} else if (
"runnable" in llmLike &&
"fallbacks" in llmLike &&
Runnable.isRunnable(llmLike.runnable)
) {
return _getLanguageModel(llmLike.runnable);
} else if ("default" in llmLike && Runnable.isRunnable(llmLike.default)) {
return _getLanguageModel(llmLike.default);
} else {
throw new Error("Unable to extract BaseLanguageModel from llmLike object.");
}
}
/**
* @deprecated This class will be removed in 1.0.0. Use the LangChain Expression Language (LCEL) instead.
* See the example below for how to use LCEL with the LLMChain class:
*
* Chain to run queries against LLMs.
*
* @example
* ```ts
* import { ChatPromptTemplate } from "@langchain/core/prompts";
* import { ChatOpenAI } from "@langchain/openai";
*
* const prompt = ChatPromptTemplate.fromTemplate("Tell me a {adjective} joke");
* const llm = new ChatOpenAI();
* const chain = prompt.pipe(llm);
*
* const response = await chain.invoke({ adjective: "funny" });
* ```
*/
export class LLMChain<
T extends string | object = string,
Model extends LLMType = LLMType
>
extends BaseChain
implements LLMChainInput<T>
{
static lc_name() {
return "LLMChain";
}
lc_serializable = true;
prompt: BasePromptTemplate;
llm: Model;
llmKwargs?: CallOptionsIfAvailable<Model>;
outputKey = "text";
outputParser?: BaseLLMOutputParser<T>;
get inputKeys() {
return this.prompt.inputVariables;
}
get outputKeys() {
return [this.outputKey];
}
constructor(fields: LLMChainInput<T, Model>) {
super(fields);
this.prompt = fields.prompt;
this.llm = fields.llm;
this.llmKwargs = fields.llmKwargs;
this.outputKey = fields.outputKey ?? this.outputKey;
this.outputParser =
fields.outputParser ?? (new NoOpOutputParser() as BaseOutputParser<T>);
if (this.prompt.outputParser) {
if (fields.outputParser) {
throw new Error("Cannot set both outputParser and prompt.outputParser");
}
this.outputParser = this.prompt.outputParser as BaseOutputParser<T>;
}
}
private getCallKeys(): string[] {
const callKeys = "callKeys" in this.llm ? this.llm.callKeys : [];
return callKeys;
}
/** @ignore */
_selectMemoryInputs(values: ChainValues): ChainValues {
const valuesForMemory = super._selectMemoryInputs(values);
const callKeys = this.getCallKeys();
for (const key of callKeys) {
if (key in values) {
delete valuesForMemory[key];
}
}
return valuesForMemory;
}
/** @ignore */
async _getFinalOutput(
generations: Generation[],
promptValue: BasePromptValueInterface,
runManager?: CallbackManagerForChainRun
): Promise<unknown> {
let finalCompletion: unknown;
if (this.outputParser) {
finalCompletion = await this.outputParser.parseResultWithPrompt(
generations,
promptValue,
runManager?.getChild()
);
} else {
finalCompletion = generations[0].text;
}
return finalCompletion;
}
/**
* Run the core logic of this chain and add to output if desired.
*
* Wraps _call and handles memory.
*/
call(
values: ChainValues & CallOptionsIfAvailable<Model>,
config?: Callbacks | BaseCallbackConfig
): Promise<ChainValues> {
return super.call(values, config);
}
/** @ignore */
async _call(
values: ChainValues & CallOptionsIfAvailable<Model>,
runManager?: CallbackManagerForChainRun
): Promise<ChainValues> {
const valuesForPrompt = { ...values };
const valuesForLLM = {
...this.llmKwargs,
} as CallOptionsIfAvailable<Model>;
const callKeys = this.getCallKeys();
for (const key of callKeys) {
if (key in values) {
if (valuesForLLM) {
valuesForLLM[key as keyof CallOptionsIfAvailable<Model>] =
values[key];
delete valuesForPrompt[key];
}
}
}
const promptValue = await this.prompt.formatPromptValue(valuesForPrompt);
if ("generatePrompt" in this.llm) {
const { generations } = await this.llm.generatePrompt(
[promptValue],
valuesForLLM,
runManager?.getChild()
);
return {
[this.outputKey]: await this._getFinalOutput(
generations[0],
promptValue,
runManager
),
};
}
const modelWithParser = this.outputParser
? this.llm.pipe(this.outputParser)
: this.llm;
const response = await modelWithParser.invoke(
promptValue,
runManager?.getChild()
);
return {
[this.outputKey]: response,
};
}
/**
* Format prompt with values and pass to LLM
*
* @param values - keys to pass to prompt template
* @param callbackManager - CallbackManager to use
* @returns Completion from LLM.
*
* @example
* ```ts
* llm.predict({ adjective: "funny" })
* ```
*/
async predict(
values: ChainValues & CallOptionsIfAvailable<Model>,
callbackManager?: CallbackManager
): Promise<T> {
const output = await this.call(values, callbackManager);
return output[this.outputKey];
}
_chainType() {
return "llm" as const;
}
static async deserialize(data: SerializedLLMChain) {
const { llm, prompt } = data;
if (!llm) {
throw new Error("LLMChain must have llm");
}
if (!prompt) {
throw new Error("LLMChain must have prompt");
}
return new LLMChain({
llm: await BaseLanguageModel.deserialize(llm),
prompt: await BasePromptTemplate.deserialize(prompt),
});
}
/** @deprecated */
serialize(): SerializedLLMChain {
const serialize =
"serialize" in this.llm ? this.llm.serialize() : undefined;
return {
_type: `${this._chainType()}_chain`,
llm: serialize,
prompt: this.prompt.serialize(),
};
}
_getNumTokens(text: string): Promise<number> {
return _getLanguageModel(this.llm).getNumTokens(text);
}
}
| |
145012
|
export class ConversationalRetrievalQAChain
extends BaseChain
implements ConversationalRetrievalQAChainInput
{
static lc_name() {
return "ConversationalRetrievalQAChain";
}
inputKey = "question";
chatHistoryKey = "chat_history";
get inputKeys() {
return [this.inputKey, this.chatHistoryKey];
}
get outputKeys() {
return this.combineDocumentsChain.outputKeys.concat(
this.returnSourceDocuments ? ["sourceDocuments"] : []
);
}
retriever: BaseRetrieverInterface;
combineDocumentsChain: BaseChain;
questionGeneratorChain: LLMChain;
returnSourceDocuments = false;
returnGeneratedQuestion = false;
constructor(fields: ConversationalRetrievalQAChainInput) {
super(fields);
this.retriever = fields.retriever;
this.combineDocumentsChain = fields.combineDocumentsChain;
this.questionGeneratorChain = fields.questionGeneratorChain;
this.inputKey = fields.inputKey ?? this.inputKey;
this.returnSourceDocuments =
fields.returnSourceDocuments ?? this.returnSourceDocuments;
this.returnGeneratedQuestion =
fields.returnGeneratedQuestion ?? this.returnGeneratedQuestion;
}
/**
* Static method to convert the chat history input into a formatted
* string.
* @param chatHistory Chat history input which can be a string, an array of BaseMessage instances, or an array of string arrays.
* @returns A formatted string representing the chat history.
*/
static getChatHistoryString(
chatHistory: string | BaseMessage[] | string[][]
) {
let historyMessages: BaseMessage[];
if (Array.isArray(chatHistory)) {
// TODO: Deprecate on a breaking release
if (
Array.isArray(chatHistory[0]) &&
typeof chatHistory[0][0] === "string"
) {
console.warn(
"Passing chat history as an array of strings is deprecated.\nPlease see https://js.langchain.com/docs/modules/chains/popular/chat_vector_db#externally-managed-memory for more information."
);
historyMessages = chatHistory.flat().map((stringMessage, i) => {
if (i % 2 === 0) {
return new HumanMessage(stringMessage);
} else {
return new AIMessage(stringMessage);
}
});
} else {
historyMessages = chatHistory as BaseMessage[];
}
return historyMessages
.map((chatMessage) => {
if (chatMessage._getType() === "human") {
return `Human: ${chatMessage.content}`;
} else if (chatMessage._getType() === "ai") {
return `Assistant: ${chatMessage.content}`;
} else {
return `${chatMessage.content}`;
}
})
.join("\n");
}
return chatHistory;
}
/** @ignore */
async _call(
values: ChainValues,
runManager?: CallbackManagerForChainRun
): Promise<ChainValues> {
if (!(this.inputKey in values)) {
throw new Error(`Question key ${this.inputKey} not found.`);
}
if (!(this.chatHistoryKey in values)) {
throw new Error(`Chat history key ${this.chatHistoryKey} not found.`);
}
const question: string = values[this.inputKey];
const chatHistory: string =
ConversationalRetrievalQAChain.getChatHistoryString(
values[this.chatHistoryKey]
);
let newQuestion = question;
if (chatHistory.length > 0) {
const result = await this.questionGeneratorChain.call(
{
question,
chat_history: chatHistory,
},
runManager?.getChild("question_generator")
);
const keys = Object.keys(result);
if (keys.length === 1) {
newQuestion = result[keys[0]];
} else {
throw new Error(
"Return from llm chain has multiple values, only single values supported."
);
}
}
const docs = await this.retriever.getRelevantDocuments(
newQuestion,
runManager?.getChild("retriever")
);
const inputs = {
question: newQuestion,
input_documents: docs,
chat_history: chatHistory,
};
let result = await this.combineDocumentsChain.call(
inputs,
runManager?.getChild("combine_documents")
);
if (this.returnSourceDocuments) {
result = {
...result,
sourceDocuments: docs,
};
}
if (this.returnGeneratedQuestion) {
result = {
...result,
generatedQuestion: newQuestion,
};
}
return result;
}
_chainType(): string {
return "conversational_retrieval_chain";
}
static async deserialize(
_data: SerializedChatVectorDBQAChain,
_values: LoadValues
): Promise<ConversationalRetrievalQAChain> {
throw new Error("Not implemented.");
}
serialize(): SerializedChatVectorDBQAChain {
throw new Error("Not implemented.");
}
/**
* Static method to create a new ConversationalRetrievalQAChain from a
* BaseLanguageModel and a BaseRetriever.
* @param llm {@link BaseLanguageModelInterface} instance used to generate a new question.
* @param retriever {@link BaseRetrieverInterface} instance used to retrieve relevant documents.
* @param options.returnSourceDocuments Whether to return source documents in the final output
* @param options.questionGeneratorChainOptions Options to initialize the standalone question generation chain used as the first internal step
* @param options.qaChainOptions {@link QAChainParams} used to initialize the QA chain used as the second internal step
* @returns A new instance of ConversationalRetrievalQAChain.
*/
static fromLLM(
llm: BaseLanguageModelInterface,
retriever: BaseRetrieverInterface,
options: {
outputKey?: string; // not used
returnSourceDocuments?: boolean;
/** @deprecated Pass in questionGeneratorChainOptions.template instead */
questionGeneratorTemplate?: string;
/** @deprecated Pass in qaChainOptions.prompt instead */
qaTemplate?: string;
questionGeneratorChainOptions?: {
llm?: BaseLanguageModelInterface;
template?: string;
};
qaChainOptions?: QAChainParams;
} & Omit<
ConversationalRetrievalQAChainInput,
"retriever" | "combineDocumentsChain" | "questionGeneratorChain"
> = {}
): ConversationalRetrievalQAChain {
const {
questionGeneratorTemplate,
qaTemplate,
qaChainOptions = {
type: "stuff",
prompt: qaTemplate
? PromptTemplate.fromTemplate(qaTemplate)
: undefined,
},
questionGeneratorChainOptions,
verbose,
...rest
} = options;
const qaChain = loadQAChain(llm, qaChainOptions);
const questionGeneratorChainPrompt = PromptTemplate.fromTemplate(
questionGeneratorChainOptions?.template ??
questionGeneratorTemplate ??
question_generator_template
);
const questionGeneratorChain = new LLMChain({
prompt: questionGeneratorChainPrompt,
llm: questionGeneratorChainOptions?.llm ?? llm,
verbose,
});
const instance = new this({
retriever,
combineDocumentsChain: qaChain,
questionGeneratorChain,
verbose,
...rest,
});
return instance;
}
}
| |
145018
|
/* eslint-disable spaced-comment */
import {
ChatPromptTemplate,
SystemMessagePromptTemplate,
HumanMessagePromptTemplate,
PromptTemplate,
} from "@langchain/core/prompts";
import {
ConditionalPromptSelector,
isChatModel,
} from "@langchain/core/example_selectors";
export const DEFAULT_QA_PROMPT = /*#__PURE__*/ new PromptTemplate({
template:
"Use the following pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer.\n\n{context}\n\nQuestion: {question}\nHelpful Answer:",
inputVariables: ["context", "question"],
});
const system_template = `Use the following pieces of context to answer the users question.
If you don't know the answer, just say that you don't know, don't try to make up an answer.
----------------
{context}`;
const messages = [
/*#__PURE__*/ SystemMessagePromptTemplate.fromTemplate(system_template),
/*#__PURE__*/ HumanMessagePromptTemplate.fromTemplate("{question}"),
];
const CHAT_PROMPT = /*#__PURE__*/ ChatPromptTemplate.fromMessages(messages);
export const QA_PROMPT_SELECTOR = /*#__PURE__*/ new ConditionalPromptSelector(
DEFAULT_QA_PROMPT,
[[isChatModel, CHAT_PROMPT]]
);
| |
145019
|
import { test } from "@jest/globals";
import { OpenAI } from "@langchain/openai";
import { Document } from "@langchain/core/documents";
import {
loadQAMapReduceChain,
loadQARefineChain,
loadQAStuffChain,
} from "../load.js";
test("Test loadQAStuffChain", async () => {
const model = new OpenAI({ modelName: "gpt-3.5-turbo-instruct" });
const chain = loadQAStuffChain(model);
const docs = [
new Document({ pageContent: "foo" }),
new Document({ pageContent: "bar" }),
new Document({ pageContent: "baz" }),
];
// @eslint-disable-next-line/@typescript-eslint/ban-ts-comment
// @ts-expect-error unused var
const res = await chain.call({ input_documents: docs, question: "Whats up" });
// console.log({ res });
});
test("Test loadQAMapReduceChain", async () => {
const model = new OpenAI({ modelName: "gpt-3.5-turbo-instruct" });
const chain = loadQAMapReduceChain(model);
const docs = [
new Document({ pageContent: "foo" }),
new Document({ pageContent: "bar" }),
new Document({ pageContent: "baz" }),
];
// @eslint-disable-next-line/@typescript-eslint/ban-ts-comment
// @ts-expect-error unused var
const res = await chain.call({ input_documents: docs, question: "Whats up" });
// console.log({ res });
});
test("Test loadQARefineChain", async () => {
const model = new OpenAI({ modelName: "gpt-3.5-turbo-instruct" });
const chain = loadQARefineChain(model);
const docs = [
new Document({ pageContent: "Harrison went to Harvard." }),
new Document({ pageContent: "Ankush went to Princeton." }),
];
// @eslint-disable-next-line/@typescript-eslint/ban-ts-comment
// @ts-expect-error unused var
const res = await chain.call({
input_documents: docs,
question: "Where did Harrison go to college?",
});
// console.log({ res });
});
| |
145048
|
import { test } from "@jest/globals";
import { OpenAI, ChatOpenAI } from "@langchain/openai";
import {
ChatPromptTemplate,
HumanMessagePromptTemplate,
PromptTemplate,
} from "@langchain/core/prompts";
import { LLMChain } from "../llm_chain.js";
import { BufferMemory } from "../../memory/buffer_memory.js";
test("Test OpenAI", async () => {
const model = new OpenAI({ modelName: "gpt-3.5-turbo-instruct" });
const prompt = new PromptTemplate({
template: "Print {foo}",
inputVariables: ["foo"],
});
const chain = new LLMChain({ prompt, llm: model });
// @eslint-disable-next-line/@typescript-eslint/ban-ts-comment
// @ts-expect-error unused var
const res = await chain.call({ foo: "my favorite color" });
// console.log({ res });
});
test("Test OpenAI with timeout", async () => {
const model = new OpenAI({ modelName: "gpt-3.5-turbo-instruct" });
const prompt = new PromptTemplate({
template: "Print {foo}",
inputVariables: ["foo"],
});
const chain = new LLMChain({ prompt, llm: model });
await expect(() =>
chain.call({
foo: "my favorite color",
timeout: 10,
})
).rejects.toThrow();
});
test("Test run method", async () => {
const model = new OpenAI({ modelName: "gpt-3.5-turbo-instruct" });
const prompt = new PromptTemplate({
template: "Print {foo}",
inputVariables: ["foo"],
});
const chain = new LLMChain({ prompt, llm: model });
// @eslint-disable-next-line/@typescript-eslint/ban-ts-comment
// @ts-expect-error unused var
const res = await chain.run("my favorite color");
// console.log({ res });
});
test("Test run method", async () => {
const model = new OpenAI({ modelName: "gpt-3.5-turbo-instruct" });
const prompt = new PromptTemplate({
template: "{history} Print {foo}",
inputVariables: ["foo", "history"],
});
const chain = new LLMChain({
prompt,
llm: model,
memory: new BufferMemory(),
});
// @eslint-disable-next-line/@typescript-eslint/ban-ts-comment
// @ts-expect-error unused var
const res = await chain.run("my favorite color");
// console.log({ res });
});
test("Test memory + cancellation", async () => {
const model = new OpenAI({ modelName: "gpt-3.5-turbo-instruct" });
const prompt = new PromptTemplate({
template: "{history} Print {foo}",
inputVariables: ["foo", "history"],
});
const chain = new LLMChain({
prompt,
llm: model,
memory: new BufferMemory(),
});
await expect(() =>
chain.call({
foo: "my favorite color",
signal: AbortSignal.timeout(20),
})
).rejects.toThrow();
});
test("Test memory + timeout", async () => {
const model = new OpenAI({ modelName: "gpt-3.5-turbo-instruct" });
const prompt = new PromptTemplate({
template: "{history} Print {foo}",
inputVariables: ["foo", "history"],
});
const chain = new LLMChain({
prompt,
llm: model,
memory: new BufferMemory(),
});
await expect(() =>
chain.call({
foo: "my favorite color",
timeout: 20,
})
).rejects.toThrow();
});
test("Test apply", async () => {
const model = new OpenAI({ modelName: "gpt-3.5-turbo-instruct" });
const prompt = new PromptTemplate({
template: "Print {foo}",
inputVariables: ["foo"],
});
const chain = new LLMChain({ prompt, llm: model });
// @eslint-disable-next-line/@typescript-eslint/ban-ts-comment
// @ts-expect-error unused var
const res = await chain.apply([{ foo: "my favorite color" }]);
// console.log({ res });
});
test("Test LLMChain with ChatOpenAI", async () => {
const model = new ChatOpenAI({ temperature: 0.9 });
const template = "What is a good name for a company that makes {product}?";
const prompt = new PromptTemplate({ template, inputVariables: ["product"] });
const humanMessagePrompt = new HumanMessagePromptTemplate(prompt);
const chatPromptTemplate = ChatPromptTemplate.fromMessages([
humanMessagePrompt,
]);
const chatChain = new LLMChain({ llm: model, prompt: chatPromptTemplate });
// @eslint-disable-next-line/@typescript-eslint/ban-ts-comment
// @ts-expect-error unused var
const res = await chatChain.call({ product: "colorful socks" });
// console.log({ res });
});
test("Test passing a runnable to an LLMChain", async () => {
const model = new ChatOpenAI({ modelName: "gpt-3.5-turbo-1106" });
const runnableModel = model.bind({
response_format: {
type: "json_object",
},
});
const prompt = PromptTemplate.fromTemplate(
"You are a bee --I mean a spelling bee. Respond with a JSON key of 'spelling':\nQuestion:{input}"
);
const chain = new LLMChain({ llm: runnableModel, prompt });
const response = await chain.invoke({ input: "How do you spell today?" });
expect(JSON.parse(response.text)).toMatchObject({
spelling: expect.any(String),
});
});
| |
145049
|
import { test, expect } from "@jest/globals";
import { Document } from "@langchain/core/documents";
import { BaseLLM } from "@langchain/core/language_models/llms";
import { LLMResult } from "@langchain/core/outputs";
import { loadQAMapReduceChain } from "../question_answering/load.js";
import { loadSummarizationChain } from "../index.js";
class FakeLLM extends BaseLLM {
nrMapCalls = 0;
nrReduceCalls = 0;
_llmType(): string {
return "fake";
}
async _generate(prompts: string[]): Promise<LLMResult> {
return {
generations: prompts.map((prompt) => {
let completion = "";
if (prompt.startsWith("Use the following portion")) {
this.nrMapCalls += 1;
completion = "a portion of context";
} else if (prompt.startsWith("Given the following extracted")) {
this.nrReduceCalls += 1;
completion = "a final answer";
}
return [
{
text: completion,
score: 0,
},
];
}),
};
}
}
test("Test MapReduceDocumentsChain", async () => {
const model = new FakeLLM({});
const chain = loadQAMapReduceChain(model);
const docs = [
new Document({ pageContent: "harrison went to harvard" }),
new Document({ pageContent: "ankush went to princeton" }),
];
const res = await chain.invoke({
input_documents: docs,
question: "Where did harrison go to college",
});
// console.log({ res });
expect(res).toEqual({
text: "a final answer",
});
expect(model.nrMapCalls).toBe(0); // below maxTokens
expect(model.nrReduceCalls).toBe(1);
});
test("Test MapReduceDocumentsChain with content above maxTokens and intermediate steps", async () => {
const model = new FakeLLM({});
const chain = loadQAMapReduceChain(model, {
returnIntermediateSteps: true,
});
const aString = "a".repeat(4000);
const bString = "b".repeat(4000);
const docs = [
new Document({ pageContent: aString }),
new Document({ pageContent: bString }),
];
const res = await chain.invoke({
input_documents: docs,
question: "Is the letter c present in the document",
});
// console.log({ res });
expect(res).toEqual({
text: "a final answer",
intermediateSteps: ["a portion of context", "a portion of context"],
});
expect(model.nrMapCalls).toBe(2); // above maxTokens
expect(model.nrReduceCalls).toBe(1);
});
test("Test RefineDocumentsChain", async () => {
const model = new FakeLLM({});
const chain = loadSummarizationChain(model, { type: "refine" });
const docs = [
new Document({ pageContent: "harrison went to harvard" }),
new Document({ pageContent: "ankush went to princeton" }),
];
expect(chain.inputKeys).toEqual(["input_documents"]);
// @eslint-disable-next-line/@typescript-eslint/ban-ts-comment
// @ts-expect-error unused var
const res = await chain.run(docs);
// console.log({ res });
});
| |
145051
|
import { test } from "@jest/globals";
import { OpenAI } from "@langchain/openai";
import { ConversationChain } from "../conversation.js";
test("Test ConversationChain", async () => {
const model = new OpenAI({ modelName: "gpt-3.5-turbo-instruct" });
const chain = new ConversationChain({ llm: model });
// @eslint-disable-next-line/@typescript-eslint/ban-ts-comment
// @ts-expect-error unused var
const res = await chain.call({ input: "my favorite color" });
// console.log({ res });
});
| |
145053
|
import { expect, test } from "@jest/globals";
import { OpenAI, OpenAIEmbeddings, ChatOpenAI } from "@langchain/openai";
import { PromptTemplate } from "@langchain/core/prompts";
import { ConversationalRetrievalQAChain } from "../conversational_retrieval_chain.js";
import { MemoryVectorStore } from "../../vectorstores/memory.js";
import { BufferMemory } from "../../memory/buffer_memory.js";
test("Test ConversationalRetrievalQAChain from LLM", async () => {
const model = new OpenAI({ modelName: "gpt-3.5-turbo-instruct" });
const vectorStore = await MemoryVectorStore.fromTexts(
["Hello world", "Bye bye", "hello nice world", "bye", "hi"],
[{ id: 2 }, { id: 1 }, { id: 3 }, { id: 4 }, { id: 5 }],
new OpenAIEmbeddings()
);
const chain = ConversationalRetrievalQAChain.fromLLM(
model,
vectorStore.asRetriever()
);
// @eslint-disable-next-line/@typescript-eslint/ban-ts-comment
// @ts-expect-error unused var
const res = await chain.call({ question: "foo", chat_history: "bar" });
// console.log({ res });
});
test("Test ConversationalRetrievalQAChain from LLM with flag option to return source", async () => {
const model = new OpenAI({ modelName: "gpt-3.5-turbo-instruct" });
const vectorStore = await MemoryVectorStore.fromTexts(
["Hello world", "Bye bye", "hello nice world", "bye", "hi"],
[{ id: 2 }, { id: 1 }, { id: 3 }, { id: 4 }, { id: 5 }],
new OpenAIEmbeddings()
);
const chain = ConversationalRetrievalQAChain.fromLLM(
model,
vectorStore.asRetriever(),
{
returnSourceDocuments: true,
}
);
const res = await chain.call({ question: "foo", chat_history: "bar" });
expect(res).toEqual(
expect.objectContaining({
text: expect.any(String),
sourceDocuments: expect.arrayContaining([
expect.objectContaining({
metadata: expect.objectContaining({
id: expect.any(Number),
}),
pageContent: expect.any(String),
}),
]),
})
);
});
test("Test ConversationalRetrievalQAChain from LLM with flag option to return source and memory set", async () => {
const model = new OpenAI({ modelName: "gpt-3.5-turbo-instruct" });
const vectorStore = await MemoryVectorStore.fromTexts(
["Hello world", "Bye bye", "hello nice world", "bye", "hi"],
[{ id: 2 }, { id: 1 }, { id: 3 }, { id: 4 }, { id: 5 }],
new OpenAIEmbeddings()
);
const chain = ConversationalRetrievalQAChain.fromLLM(
model,
vectorStore.asRetriever(),
{
returnSourceDocuments: true,
memory: new BufferMemory({
memoryKey: "chat_history",
inputKey: "question",
outputKey: "text",
}),
}
);
const res = await chain.call({ question: "foo", chat_history: "bar" });
expect(res).toEqual(
expect.objectContaining({
text: expect.any(String),
sourceDocuments: expect.arrayContaining([
expect.objectContaining({
metadata: expect.objectContaining({
id: expect.any(Number),
}),
pageContent: expect.any(String),
}),
]),
})
);
});
test("Test ConversationalRetrievalQAChain from LLM with override default prompts", async () => {
const model = new OpenAI({
modelName: "gpt-3.5-turbo-instruct",
temperature: 0,
});
const vectorStore = await MemoryVectorStore.fromTexts(
["Hello world", "Bye bye", "hello nice world", "bye", "hi"],
[{ id: 2 }, { id: 1 }, { id: 3 }, { id: 4 }, { id: 5 }],
new OpenAIEmbeddings()
);
const qa_template = `Use the following pieces of context to answer the question at the end. If you don't know the answer, just say "Sorry I dont know, I am learning from Aliens", don't try to make up an answer.
{context}
Question: {question}
Helpful Answer:`;
const chain = ConversationalRetrievalQAChain.fromLLM(
model,
vectorStore.asRetriever(),
{
qaTemplate: qa_template,
}
);
const res = await chain.call({
question: "What is better programming Language Python or Javascript ",
chat_history: "bar",
});
expect(res.text).toContain("I am learning from Aliens");
// console.log({ res });
});
test("Test ConversationalRetrievalQAChain from LLM with a chat model", async () => {
const model = new ChatOpenAI({
modelName: "gpt-3.5-turbo",
temperature: 0,
});
const vectorStore = await MemoryVectorStore.fromTexts(
["Hello world", "Bye bye", "hello nice world", "bye", "hi"],
[{ id: 2 }, { id: 1 }, { id: 3 }, { id: 4 }, { id: 5 }],
new OpenAIEmbeddings()
);
const qa_template = `Use the following pieces of context to answer the question at the end. If you don't know the answer, just say "Sorry I dont know, I am learning from Aliens", don't try to make up an answer.
{context}
Question: {question}
Helpful Answer:`;
const chain = ConversationalRetrievalQAChain.fromLLM(
model,
vectorStore.asRetriever(),
{
qaChainOptions: {
type: "stuff",
prompt: PromptTemplate.fromTemplate(qa_template),
},
}
);
const res = await chain.call({
question: "What is better programming Language Python or Javascript ",
chat_history: "bar",
});
expect(res.text).toContain("I am learning from Aliens");
// console.log({ res });
});
test("Test ConversationalRetrievalQAChain from LLM with a map reduce chain", async () => {
const model = new ChatOpenAI({
modelName: "gpt-3.5-turbo",
temperature: 0,
});
const vectorStore = await MemoryVectorStore.fromTexts(
["Hello world", "Bye bye", "hello nice world", "bye", "hi"],
[{ id: 2 }, { id: 1 }, { id: 3 }, { id: 4 }, { id: 5 }],
new OpenAIEmbeddings()
);
const chain = ConversationalRetrievalQAChain.fromLLM(
model,
vectorStore.asRetriever(),
{
qaChainOptions: {
type: "map_reduce",
},
}
);
// @eslint-disable-next-line/@typescript-eslint/ban-ts-comment
// @ts-expect-error unused var
const res = await chain.call({
question: "What is better programming Language Python or Javascript ",
chat_history: "bar",
});
// console.log({ res });
});
test("Test ConversationalRetrievalQAChain from LLM without memory", async () => {
const model = new OpenAI({
temperature: 0,
});
const vectorStore = await MemoryVectorStore.fromTexts(
[
"Mitochondria are the powerhouse of the cell",
"Foo is red",
"Bar is red",
"Buildings are made out of brick",
"Mitochondria are made of lipids",
],
[{ id: 2 }, { id: 1 }, { id: 3 }, { id: 4 }, { id: 5 }],
new OpenAIEmbeddings()
);
const chain = ConversationalRetrievalQAChain.fromLLM(
model,
vectorStore.asRetriever()
);
const question = "What is the powerhouse of the cell?";
const res = await chain.call({
question,
chat_history: "",
});
// console.log({ res });
// @eslint-disable-next-line/@typescript-eslint/ban-ts-comment
// @ts-expect-error unused var
const res2 = await chain.call({
question: "What are they made out of?",
chat_history: question + res.text,
});
// console.log({ res2 });
});
| |
145054
|
test("Test ConversationalRetrievalQAChain from LLM with a chat model without memory", async () => {
const model = new ChatOpenAI({
modelName: "gpt-3.5-turbo",
temperature: 0,
});
const vectorStore = await MemoryVectorStore.fromTexts(
[
"Mitochondria are the powerhouse of the cell",
"Foo is red",
"Bar is red",
"Buildings are made out of brick",
"Mitochondria are made of lipids",
],
[{ id: 2 }, { id: 1 }, { id: 3 }, { id: 4 }, { id: 5 }],
new OpenAIEmbeddings()
);
const chain = ConversationalRetrievalQAChain.fromLLM(
model,
vectorStore.asRetriever()
);
const question = "What is the powerhouse of the cell?";
const res = await chain.call({
question,
chat_history: "",
});
// console.log({ res });
// @eslint-disable-next-line/@typescript-eslint/ban-ts-comment
// @ts-expect-error unused var
const res2 = await chain.call({
question: "What are they made out of?",
chat_history: question + res.text,
});
// console.log({ res2 });
});
test("Test ConversationalRetrievalQAChain from LLM with memory", async () => {
const model = new OpenAI({
temperature: 0,
});
const vectorStore = await MemoryVectorStore.fromTexts(
[
"Mitochondria are the powerhouse of the cell",
"Foo is red",
"Bar is red",
"Buildings are made out of brick",
"Mitochondria are made of lipids",
],
[{ id: 2 }, { id: 1 }, { id: 3 }, { id: 4 }, { id: 5 }],
new OpenAIEmbeddings()
);
const chain = ConversationalRetrievalQAChain.fromLLM(
model,
vectorStore.asRetriever(),
{
memory: new BufferMemory({
memoryKey: "chat_history",
}),
}
);
// @eslint-disable-next-line/@typescript-eslint/ban-ts-comment
// @ts-expect-error unused var
const res = await chain.call({
question: "What is the powerhouse of the cell?",
});
// console.log({ res });
// @eslint-disable-next-line/@typescript-eslint/ban-ts-comment
// @ts-expect-error unused var
const res2 = await chain.call({
question: "What are they made out of?",
});
// console.log({ res2 });
});
test("Test ConversationalRetrievalQAChain from LLM with a chat model and memory", async () => {
const model = new ChatOpenAI({
modelName: "gpt-3.5-turbo",
temperature: 0,
});
const vectorStore = await MemoryVectorStore.fromTexts(
[
"Mitochondria are the powerhouse of the cell",
"Foo is red",
"Bar is red",
"Buildings are made out of brick",
"Mitochondria are made of lipids",
],
[{ id: 2 }, { id: 1 }, { id: 3 }, { id: 4 }, { id: 5 }],
new OpenAIEmbeddings()
);
const chain = ConversationalRetrievalQAChain.fromLLM(
model,
vectorStore.asRetriever(),
{
memory: new BufferMemory({
memoryKey: "chat_history",
returnMessages: true,
}),
}
);
// @eslint-disable-next-line/@typescript-eslint/ban-ts-comment
// @ts-expect-error unused var
const res = await chain.call({
question: "What is the powerhouse of the cell?",
});
// console.log({ res });
// @eslint-disable-next-line/@typescript-eslint/ban-ts-comment
// @ts-expect-error unused var
const res2 = await chain.call({
question: "What are they made out of?",
});
// console.log({ res2 });
});
test("Test ConversationalRetrievalQAChain from LLM with deprecated history syntax", async () => {
const model = new OpenAI({
temperature: 0,
});
const vectorStore = await MemoryVectorStore.fromTexts(
[
"Mitochondria are the powerhouse of the cell",
"Foo is red",
"Bar is red",
"Buildings are made out of brick",
"Mitochondria are made of lipids",
],
[{ id: 2 }, { id: 1 }, { id: 3 }, { id: 4 }, { id: 5 }],
new OpenAIEmbeddings()
);
const chain = ConversationalRetrievalQAChain.fromLLM(
model,
vectorStore.asRetriever()
);
const question = "What is the powerhouse of the cell?";
const res = await chain.call({
question,
chat_history: [],
});
// console.log({ res });
// @eslint-disable-next-line/@typescript-eslint/ban-ts-comment
// @ts-expect-error unused var
const res2 = await chain.call({
question: "What are they made out of?",
chat_history: [[question, res.text]],
});
// console.log({ res2 });
});
| |
145072
|
/* eslint-disable spaced-comment */
import { PromptTemplate } from "@langchain/core/prompts";
export const API_URL_RAW_PROMPT_TEMPLATE = `You are given the below API Documentation:
{api_docs}
Using this documentation, generate the full API url to call for answering the user question.
You should build the API url in order to get a response that is as short as possible, while still getting the necessary information to answer the question. Pay attention to deliberately exclude any unnecessary pieces of data in the API call.
Question:{question}
API url:`;
export const API_URL_PROMPT_TEMPLATE = /* #__PURE__ */ new PromptTemplate({
inputVariables: ["api_docs", "question"],
template: API_URL_RAW_PROMPT_TEMPLATE,
});
export const API_RESPONSE_RAW_PROMPT_TEMPLATE = `${API_URL_RAW_PROMPT_TEMPLATE} {api_url}
Here is the response from the API:
{api_response}
Summarize this response to answer the original question.
Summary:`;
export const API_RESPONSE_PROMPT_TEMPLATE = /* #__PURE__ */ new PromptTemplate({
inputVariables: ["api_docs", "question", "api_url", "api_response"],
template: API_RESPONSE_RAW_PROMPT_TEMPLATE,
});
| |
145086
|
/* eslint-disable spaced-comment */
import { BasePromptTemplate, PromptTemplate } from "@langchain/core/prompts";
export const DEFAULT_SQL_DATABASE_PROMPT = /*#__PURE__*/ new PromptTemplate({
template: `Given an input question, first create a syntactically correct {dialect} query to run, then look at the results of the query and return the answer. Unless the user specifies in his question a specific number of examples he wishes to obtain, always limit your query to at most {top_k} results. You can order the results by a relevant column to return the most interesting examples in the database.
Never query for all the columns from a specific table, only ask for a the few relevant columns given the question.
Pay attention to use only the column names that you can see in the schema description. Be careful to not query for columns that do not exist. Also, pay attention to which column is in which table.
Use the following format:
Question: "Question here"
SQLQuery: "SQL Query to run"
SQLResult: "Result of the SQLQuery"
Answer: "Final answer here"
Only use the tables listed below.
{table_info}
Question: {input}`,
inputVariables: ["dialect", "table_info", "input", "top_k"],
});
export const SQL_POSTGRES_PROMPT = /*#__PURE__*/ new PromptTemplate({
template: `You are a PostgreSQL expert. Given an input question, first create a syntactically correct PostgreSQL query to run, then look at the results of the query and return the answer to the input question.
Unless the user specifies in the question a specific number of examples to obtain, query for at most {top_k} results using the LIMIT clause as per PostgreSQL. You can order the results to return the most informative data in the database.
Never query for all columns from a table. You must query only the columns that are needed to answer the question. Wrap each column name in double quotes (") to denote them as delimited identifiers.
Pay attention to use only the column names you can see in the tables below. Be careful to not query for columns that do not exist. Also, pay attention to which column is in which table.
Use the following format:
Question: "Question here"
SQLQuery: "SQL Query to run"
SQLResult: "Result of the SQLQuery"
Answer: "Final answer here"
Only use the following tables:
{table_info}
Question: {input}`,
inputVariables: ["dialect", "table_info", "input", "top_k"],
});
export const SQL_SQLITE_PROMPT = /*#__PURE__*/ new PromptTemplate({
template: `You are a SQLite expert. Given an input question, first create a syntactically correct SQLite query to run, then look at the results of the query and return the answer to the input question.
Unless the user specifies in the question a specific number of examples to obtain, query for at most {top_k} results using the LIMIT clause as per SQLite. You can order the results to return the most informative data in the database.
Never query for all columns from a table. You must query only the columns that are needed to answer the question. Wrap each column name in double quotes (") to denote them as delimited identifiers.
Pay attention to use only the column names you can see in the tables below. Be careful to not query for columns that do not exist. Also, pay attention to which column is in which table.
Use the following format:
Question: "Question here"
SQLQuery: "SQL Query to run"
SQLResult: "Result of the SQLQuery"
Answer: "Final answer here"
Only use the following tables:
{table_info}
Question: {input}`,
inputVariables: ["dialect", "table_info", "input", "top_k"],
});
export const SQL_MYSQL_PROMPT = /*#__PURE__*/ new PromptTemplate({
template: `You are a MySQL expert. Given an input question, first create a syntactically correct MySQL query to run, then look at the results of the query and return the answer to the input question.
Unless the user specifies in the question a specific number of examples to obtain, query for at most {top_k} results using the LIMIT clause as per MySQL. You can order the results to return the most informative data in the database.
Never query for all columns from a table. You must query only the columns that are needed to answer the question. Wrap each column name in backticks (\`) to denote them as delimited identifiers.
Pay attention to use only the column names you can see in the tables below. Be careful to not query for columns that do not exist. Also, pay attention to which column is in which table.
Use the following format:
Question: "Question here"
SQLQuery: "SQL Query to run"
SQLResult: "Result of the SQLQuery"
Answer: "Final answer here"
Only use the following tables:
{table_info}
Question: {input}`,
inputVariables: ["dialect", "table_info", "input", "top_k"],
});
export const SQL_MSSQL_PROMPT = /*#__PURE__*/ new PromptTemplate({
template: `You are an MS SQL expert. Given an input question, first create a syntactically correct MS SQL query to run, then look at the results of the query and return the answer to the input question.
Unless the user specifies in the question a specific number of examples to obtain, query for at most {top_k} results using the TOP clause as per MS SQL. You can order the results to return the most informative data in the database.
Never query for all columns from a table. You must query only the columns that are needed to answer the question. Wrap each column name in square brackets ([]) to denote them as delimited identifiers.
Pay attention to use only the column names you can see in the tables below. Be careful to not query for columns that do not exist. Also, pay attention to which column is in which table.
Use the following format:
Question: "Question here"
SQLQuery: "SQL Query to run"
SQLResult: "Result of the SQLQuery"
Answer: "Final answer here"
Only use the following tables:
{table_info}
Question: {input}`,
inputVariables: ["dialect", "table_info", "input", "top_k"],
});
export const SQL_SAP_HANA_PROMPT = /*#__PURE__*/ new PromptTemplate({
template: `You are a SAP HANA expert. Given an input question, first create a syntactically correct SAP HANA query to run, then look at the results of the query and return the answer to the input question.
Unless the user specifies in the question a specific number of examples to obtain, query for at most {top_k} results using the LIMIT clause as per SAP HANA. You can order the results to return the most informative data in the database.
Never query for all columns from a table. You must query only the columns that are needed to answer the question. Wrap each column name in double quotes (") to denote them as delimited identifiers.
Pay attention to use only the column names you can see in the tables below. Be careful to not query for columns that do not exist. Also, pay attention to which column is in which table.
Always use a schema name when executing a query.
Use the following format:
Question: "Question here"
SQLQuery: "SQL Query to run"
SQLResult: "Result of the SQLQuery"
Answer: "Final answer here"
Only use the following tables:
{table_info}
Question: {input}`,
inputVariables: ["dialect", "table_info", "input", "top_k"],
});
export const SQL_ORACLE_PROMPT = /*#__PURE__*/ new PromptTemplate({
template: `You are a ORACLE expert. Given an input question, first create a syntactically correct ORACLE query to run, then look at the results of the query and return the answer to the input question.
Unless the user specifies in the question a specific number of examples to obtain, query for at most {top_k} results using the ROWNUM clause as per ORACLE. You can order the results to return the most informative data in the database.
Never query for all columns from a table. You must query only the columns that are needed to answer the question.
Pay attention to use only the column names you can see in the tables below. Be careful to not query for columns that do not exist. Also, pay attention to which column is in which table.
Use the following format:
Question: "Question here"
SQLQuery: "SQL Query to run"
SQLResult: "Result of the SQLQuery"
Answer: "Final answer here"
Only use the following tables:
{table_info}
Question: {input}`,
inputVariables: ["dialect", "table_info", "input", "top_k"],
});
export type SqlDialect =
| "oracle"
| "postgres"
| "sqlite"
| "mysql"
| "mssql"
| "sap hana";
export const SQL_PROMPTS_MAP: Record<SqlDialect, BasePromptTemplate> = {
oracle: SQL_ORACLE_PROMPT,
postgres: SQL_POSTGRES_PROMPT,
sqlite: SQL_SQLITE_PROMPT,
mysql: SQL_MYSQL_PROMPT,
mssql: SQL_MSSQL_PROMPT,
"sap hana": SQL_SAP_HANA_PROMPT,
};
| |
145123
|
// Next.js API route support: https://nextjs.org/docs/api-routes/introduction
// import all entrypoints to test, do not do this in your own app
import "../../entrypoints.js";
// Import a few things we'll use to test the exports
import { LLMChain } from "langchain/chains";
import { ChatOpenAI } from "@langchain/openai";
import {
ChatPromptTemplate,
HumanMessagePromptTemplate,
} from "@langchain/core/prompts";
import { OpenAI } from "@langchain/openai";
import { OpenAIEmbeddings } from "@langchain/openai";
import { CallbackManager } from "@langchain/core/callbacks/manager";
import { ChatAgent } from "langchain/agents";
import { NextRequest, NextResponse } from "next/server";
export const config = {
runtime: "edge",
};
export default async function handler(req: NextRequest) {
// Intantiate a few things to test the exports
new OpenAI({ openAIApiKey: process.env.OPENAI_API_KEY });
const emb = new OpenAIEmbeddings({
openAIApiKey: process.env.OPENAI_API_KEY,
});
const agent = ChatAgent.fromLLMAndTools(new ChatOpenAI(), []);
// Set up a streaming LLM
const encoder = new TextEncoder();
const stream = new TransformStream();
const writer = stream.writable.getWriter();
const llm = new ChatOpenAI({
streaming: true,
callbackManager: CallbackManager.fromHandlers({
handleLLMNewToken: async (token) => {
await writer.ready;
await writer.write(encoder.encode(`data: ${token}\n\n`));
},
handleLLMEnd: async () => {
await writer.ready;
await writer.close();
},
handleLLMError: async (e) => {
await writer.ready;
await writer.abort(e);
},
}),
});
// Test a chain + prompt + model
const chain = new LLMChain({
llm,
prompt: ChatPromptTemplate.fromMessages([
HumanMessagePromptTemplate.fromTemplate("{input}"),
]),
});
// Run the chain but don't await it, otherwise the response will start
// only after the chain is done
chain.run("hello").catch(console.error);
return new NextResponse(stream.readable, {
headers: {
"Content-Type": "text/event-stream",
"Cache-Control": "no-cache",
},
});
}
| |
145141
|
import { ChatOpenAI } from "@langchain/openai";
import { createOpenAIToolsAgent, AgentExecutor } from "langchain/agents";
import { pull } from "langchain/hub";
import type { ChatPromptTemplate } from "@langchain/core/prompts";
const model = new ChatOpenAI({
openAIApiKey: "sk-XXXX",
});
const prompt = await pull<ChatPromptTemplate>(
"hwchase17/openai-functions-agent"
);
const agent = await createOpenAIToolsAgent({
llm: model,
prompt,
tools: []
});
const agentExecutor = new AgentExecutor({
agent,
tools: [],
});
console.log(agentExecutor);
| |
145209
|
# Conceptual guide
This section contains introductions to key parts of LangChain.
## Architecture
LangChain as a framework consists of several pieces. The below diagram shows how they relate.
import ThemedImage from "@theme/ThemedImage";
import useBaseUrl from "@docusaurus/useBaseUrl";
<ThemedImage
alt="Diagram outlining the hierarchical organization of the LangChain framework, displaying the interconnected parts across multiple layers."
sources={{
light: useBaseUrl("/svg/langchain_stack_062024.svg"),
dark: useBaseUrl("/svg/langchain_stack_062024_dark.svg"),
}}
title="LangChain Framework Overview"
style={{ width: "100%" }}
/>
### `@langchain/core`
This package contains base abstractions of different components and ways to compose them together.
The interfaces for core components like LLMs, vector stores, retrievers and more are defined here.
No third party integrations are defined here.
The dependencies are kept purposefully very lightweight.
This package is a requirement of most others in the LangChain ecosystem, and must be installed separately.
### `@langchain/community`
This package contains third party integrations that are maintained by the LangChain community.
Key partner packages are separated out (see below).
This contains all integrations for various components (LLMs, vector stores, retrievers).
All dependencies in this package are optional to keep the package as lightweight as possible.
### Partner packages
While the long tail of integrations are in `@langchain/community`, we split popular integrations into their own packages (e.g. `langchain-openai`, `langchain-anthropic`, etc).
This was done in order to improve support for these important integrations.
### `langchain`
The main `langchain` package contains chains, agents, and retrieval strategies that make up an application's cognitive architecture.
These are NOT third party integrations.
All chains, agents, and retrieval strategies here are NOT specific to any one integration, but rather generic across all integrations.
### [LangGraph.js](https://langchain-ai.github.io/langgraphjs/)
LangGraph.js is an extension of `langchain` aimed at
building robust and stateful multi-actor applications with LLMs by modeling steps as edges and nodes in a graph.
LangGraph exposes high level interfaces for creating common types of agents, as well as a low-level API for composing custom flows.
### [LangSmith](https://docs.smith.langchain.com)
A developer platform that lets you debug, test, evaluate, and monitor LLM applications.
## Installation
If you want to work with high level abstractions, you should install the `langchain` package.
```bash npm2yarn
npm i langchain @langchain/core
```
If you want to work with specific integrations, you will need to install them separately.
See [here](/docs/integrations/platforms/) for a list of integrations and how to install them.
For working with LangSmith, you will need to set up a LangSmith developer account [here](https://smith.langchain.com) and get an API key.
After that, you can enable it by setting environment variables:
```shell
export LANGCHAIN_TRACING_V2=true
export LANGCHAIN_API_KEY=ls__...
# Reduce tracing latency if you are not in a serverless environment
# export LANGCHAIN_CALLBACKS_BACKGROUND=true
```
## LangChain Expression Language
<span data-heading-keywords="lcel"></span>
LangChain Expression Language, or LCEL, is a declarative way to easily compose chains together.
LCEL was designed from day 1 to **support putting prototypes in production, with no code changes**, from the simplest “prompt + LLM” chain to the most complex chains
(we’ve seen folks successfully run LCEL chains with 100s of steps in production). To highlight a few of the reasons you might want to use LCEL:
**First-class streaming support**
When you build your chains with LCEL you get the best possible time-to-first-token (time elapsed until the first chunk of output comes out). For some chains this means
eg. we stream tokens straight from an LLM to a streaming output parser, and you get back parsed, incremental chunks of output at the same rate as the LLM provider outputs the raw tokens.
**Optimized parallel execution**
Whenever your LCEL chains have steps that can be executed in parallel (eg if you fetch documents from multiple retrievers) we automatically do it for the smallest possible latency.
**Retries and fallbacks**
Configure retries and fallbacks for any part of your LCEL chain. This is a great way to make your chains more reliable at scale. We’re currently working on adding streaming
support for retries/fallbacks, so you can get the added reliability without any latency cost.
**Access intermediate results**
For more complex chains it’s often very useful to access the results of intermediate steps even before the final output is produced. This can be used to let end-users know
something is happening, or even just to debug your chain.
[**Seamless LangSmith tracing**](https://docs.smith.langchain.com)
As your chains get more and more complex, it becomes increasingly important to understand what exactly is happening at every step.
With LCEL, **all** steps are automatically logged to [LangSmith](https://docs.smith.langchain.com) for maximum observability and debuggability.
### Runnable interface {#interface}
<span data-heading-keywords="invoke,runnable"></span>
To make it as easy as possible to create custom chains, we've implemented a ["Runnable"](https://api.js.langchain.com/classes/langchain_core.runnables.Runnable.html) protocol.
Many LangChain components implement the `Runnable` protocol, including chat models, LLMs, output parsers, retrievers, prompt templates, and more. There are also several useful primitives for working with runnables, which you can read about below.
This is a standard interface, which makes it easy to define custom chains as well as invoke them in a standard way.
The standard interface includes:
- [`stream`](#stream): stream back chunks of the response
- [`invoke`](#invoke): call the chain on an input
- [`batch`](#batch): call the chain on an array of inputs
The **input type** and **output type** varies by component:
| Component | Input Type | Output Type |
| ------------ | ----------------------------------------------------- | --------------------- |
| Prompt | Object | PromptValue |
| ChatModel | Single string, list of chat messages or a PromptValue | ChatMessage |
| LLM | Single string, list of chat messages or a PromptValue | String |
| OutputParser | The output of an LLM or ChatModel | Depends on the parser |
| Retriever | Single string | List of Documents |
| Tool | Single string or object, depending on the tool | Depends on the tool |
## Components
LangChain provides standard, extendable interfaces and external integrations for various components useful for building with LLMs.
Some components LangChain implements, some components we rely on third-party integrations for, and others are a mix.
### Chat models
<span data-heading-keywords="chat model,chat models"></span>
Language models that use a sequence of messages as inputs and return chat messages as outputs (as opposed to using plain text).
These are generally newer models (older models are generally `LLMs`, see below).
Chat models support the assignment of distinct roles to conversation messages, helping to distinguish messages from the AI, users, and instructions such as system messages.
Although the underlying models are messages in, message out, the LangChain wrappers also allow these models to take a string as input.
This gives them the same interface as LLMs (and simpler to use).
When a string is passed in as input, it will be converted to a `HumanMessage` under the hood before being passed to the underlying model.
LangChain does not host any Chat Models, rather we rely on third party integrations.
We have some standardized parameters when constructing ChatModels:
- `model`: the name of the model
Chat Models also accept other parameters that are specific to that integration.
:::important
Some chat models have been fine-tuned for **tool calling** and provide a dedicated API for it.
Generally, such models are better at tool calling than non-fine-tuned models, and are recommended for use cases that require tool calling.
Please see the [tool calling section](/docs/concepts/#functiontool-calling) for more information.
:::
For specifics on how to use chat models, see the [relevant how-to guides here](/docs/how_to/#chat-models).
#### Multimodality
Some chat models are multimodal, accepting images, audio and even video as inputs.
These are still less common, meaning model providers haven't standardized on the "best" way to define the API.
Multimodal outputs are even less common. As such, we've kept our multimodal abstractions fairly light weight
and plan to further solidify the multimodal APIs and interaction patterns as the field matures.
In LangChain, most chat models that support multimodal inputs also accept those values in OpenAI's content blocks format.
So far this is restricted to image inputs. For models like Gemini which support video and other bytes input, the APIs also support the native, model-specific representations.
For specifics on how to use multimodal models, see the [relevant how-to guides here](/docs/how_to/#multimodal).
### LLMs
<span data-heading-keywords="llm,llms"></span>
| |
145210
|
:::caution
Pure text-in/text-out LLMs tend to be older or lower-level. Many popular models are best used as [chat completion models](/docs/concepts/#chat-models),
even for non-chat use cases.
You are probably looking for [the section above instead](/docs/concepts/#chat-models).
:::
Language models that takes a string as input and returns a string.
These are traditionally older models (newer models generally are [Chat Models](/docs/concepts/#chat-models), see above).
Although the underlying models are string in, string out, the LangChain wrappers also allow these models to take messages as input.
This gives them the same interface as [Chat Models](/docs/concepts/#chat-models).
When messages are passed in as input, they will be formatted into a string under the hood before being passed to the underlying model.
LangChain does not host any LLMs, rather we rely on third party integrations.
For specifics on how to use LLMs, see the [relevant how-to guides here](/docs/how_to/#llms).
### Message types
Some language models take an array of messages as input and return a message.
There are a few different types of messages.
All messages have a `role`, `content`, and `response_metadata` property.
The `role` describes WHO is saying the message. The standard roles are "user", "assistant", "system", and "tool".
LangChain has different message classes for different roles.
The `content` property describes the content of the message.
This can be a few different things:
- A string (most models deal this type of content)
- A List of objects (this is used for multi-modal input, where the object contains information about that input type and that input location)
Optionally, messages can have a `name` property which allows for differentiating between multiple speakers with the same role.
For example, if there are two users in the chat history it can be useful to differentiate between them. Not all models support this.
#### HumanMessage
This represents a message with role "user".
#### AIMessage
This represents a message with role "assistant". In addition to the `content` property, these messages also have:
**`response_metadata`**
The `response_metadata` property contains additional metadata about the response. The data here is often specific to each model provider.
This is where information like log-probs and token usage may be stored.
**`tool_calls`**
These represent a decision from an language model to call a tool. They are included as part of an `AIMessage` output.
They can be accessed from there with the `.tool_calls` property.
This property returns a list of `ToolCall`s. A `ToolCall` is an object with the following arguments:
- `name`: The name of the tool that should be called.
- `args`: The arguments to that tool.
- `id`: The id of that tool call.
#### SystemMessage
This represents a message with role "system", which tells the model how to behave. Not every model provider supports this.
#### ToolMessage
This represents a message with role "tool", which contains the result of calling a tool. In addition to `role` and `content`, this message has:
- a `tool_call_id` field which conveys the id of the call to the tool that was called to produce this result.
- an `artifact` field which can be used to pass along arbitrary artifacts of the tool execution which are useful to track but which should not be sent to the model.
#### (Legacy) FunctionMessage
This is a legacy message type, corresponding to OpenAI's legacy function-calling API. `ToolMessage` should be used instead to correspond to the updated tool-calling API.
This represents the result of a function call. In addition to `role` and `content`, this message has a `name` parameter which conveys the name of the function that was called to produce this result.
### Prompt templates
<span data-heading-keywords="prompt,prompttemplate,chatprompttemplate"></span>
Prompt templates help to translate user input and parameters into instructions for a language model.
This can be used to guide a model's response, helping it understand the context and generate relevant and coherent language-based output.
Prompt Templates take as input an object, where each key represents a variable in the prompt template to fill in.
Prompt Templates output a PromptValue. This PromptValue can be passed to an LLM or a ChatModel, and can also be cast to a string or an array of messages.
The reason this PromptValue exists is to make it easy to switch between strings and messages.
There are a few different types of prompt templates:
#### String PromptTemplates
These prompt templates are used to format a single string, and generally are used for simpler inputs.
For example, a common way to construct and use a PromptTemplate is as follows:
```typescript
import { PromptTemplate } from "@langchain/core/prompts";
const promptTemplate = PromptTemplate.fromTemplate(
"Tell me a joke about {topic}"
);
await promptTemplate.invoke({ topic: "cats" });
```
#### ChatPromptTemplates
These prompt templates are used to format an array of messages. These "templates" consist of an array of templates themselves.
For example, a common way to construct and use a ChatPromptTemplate is as follows:
```typescript
import { ChatPromptTemplate } from "@langchain/core/prompts";
const promptTemplate = ChatPromptTemplate.fromMessages([
["system", "You are a helpful assistant"],
["user", "Tell me a joke about {topic}"],
]);
await promptTemplate.invoke({ topic: "cats" });
```
In the above example, this ChatPromptTemplate will construct two messages when called.
The first is a system message, that has no variables to format.
The second is a HumanMessage, and will be formatted by the `topic` variable the user passes in.
#### MessagesPlaceholder
<span data-heading-keywords="messagesplaceholder"></span>
This prompt template is responsible for adding an array of messages in a particular place.
In the above ChatPromptTemplate, we saw how we could format two messages, each one a string.
But what if we wanted the user to pass in an array of messages that we would slot into a particular spot?
This is how you use MessagesPlaceholder.
```typescript
import {
ChatPromptTemplate,
MessagesPlaceholder,
} from "@langchain/core/prompts";
import { HumanMessage } from "@langchain/core/messages";
const promptTemplate = ChatPromptTemplate.fromMessages([
["system", "You are a helpful assistant"],
new MessagesPlaceholder("msgs"),
]);
promptTemplate.invoke({ msgs: [new HumanMessage({ content: "hi!" })] });
```
This will produce an array of two messages, the first one being a system message, and the second one being the HumanMessage we passed in.
If we had passed in 5 messages, then it would have produced 6 messages in total (the system message plus the 5 passed in).
This is useful for letting an array of messages be slotted into a particular spot.
An alternative way to accomplish the same thing without using the `MessagesPlaceholder` class explicitly is:
```typescript
const promptTemplate = ChatPromptTemplate.fromMessages([
["system", "You are a helpful assistant"],
["placeholder", "{msgs}"], // <-- This is the changed part
]);
```
For specifics on how to use prompt templates, see the [relevant how-to guides here](/docs/how_to/#prompt-templates).
### Example Selectors
One common prompting technique for achieving better performance is to include examples as part of the prompt.
This is known as [few-shot prompting](/docs/concepts/#few-shot-prompting).
This gives the language model concrete examples of how it should behave.
Sometimes these examples are hardcoded into the prompt, but for more advanced situations it may be nice to dynamically select them.
Example Selectors are classes responsible for selecting and then formatting examples into prompts.
For specifics on how to use example selectors, see the [relevant how-to guides here](/docs/how_to/#example-selectors).
### Output parsers
<span data-heading-keywords="output parser"></span>
:::note
The information here refers to parsers that take a text output from a model try to parse it into a more structured representation.
More and more models are supporting function (or tool) calling, which handles this automatically.
It is recommended to use function/tool calling rather than output parsing.
See documentation for that [here](/docs/concepts/#function-tool-calling).
:::
Responsible for taking the output of a model and transforming it to a more suitable format for downstream tasks.
Useful when you are using LLMs to generate structured data, or to normalize output from chat models and LLMs.
There are two main methods an output parser must implement:
- "Get format instructions": A method which returns a string containing instructions for how the output of a language model should be formatted.
- "Parse": A method which takes in a string (assumed to be the response from a language model) and parses it into some structure.
And then one optional one:
- "Parse with prompt": A method which takes in a string (assumed to be the response from a language model) and a prompt (assumed to be the prompt that generated such a response) and parses it into some structure. The prompt is largely provided in the event the OutputParser wants to retry or fix the output in some way, and needs information from the prompt to do so.
Output parsers accept a string or `BaseMessage` as input and can return an arbitrary type.
LangChain has many different types of output parsers. This is a list of output parsers LangChain supports. The table below has various pieces of information:
**Name**: The name of the output parser
| |
145211
|
**Supports Streaming**: Whether the output parser supports streaming.
**Input Type**: Expected input type. Most output parsers work on both strings and messages, but some (like OpenAI Functions) need a message with specific arguments.
**Output Type**: The output type of the object returned by the parser.
**Description**: Our commentary on this output parser and when to use it.
| Name | Supports Streaming | Input Type | Output Type | Description |
| ------------------------------------------------------------------------------------------------------------- | ------------------ | ------------------------- | --------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------- |
| [JSON](https://api.js.langchain.com/classes/langchain_core.output_parsers.JsonOutputParser.html) | ✅ | `string` \| `BaseMessage` | `Promise<T>` | Returns a JSON object as specified. You can specify a Zod schema and it will return JSON for that model. |
| [XML](https://api.js.langchain.com/classes/langchain_core.output_parsers.XMLOutputParser.html) | ✅ | `string` \| `BaseMessage` | `Promise<XMLResult>` | Returns a object of tags. Use when XML output is needed. Use with models that are good at writing XML (like Anthropic's). |
| [CSV](https://api.js.langchain.com/classes/langchain_core.output_parsers.CommaSeparatedListOutputParser.html) | ✅ | `string` \| `BaseMessage` | `Array[string]` | Returns an array of comma separated values. |
| [Structured](https://api.js.langchain.com/classes/langchain_core.output_parsers.StructuredOutputParser.html) | | `string` \| `BaseMessage` | `Promise<TypeOf<T>>` | Parse structured JSON from an LLM response. |
| [HTTP](https://api.js.langchain.com/classes/langchain.output_parsers.HttpResponseOutputParser.html) | ✅ | `string` | `Promise<Uint8Array>` | Parse an LLM response to then send over HTTP(s). Useful when invoking the LLM on the server/edge, and then sending the content/stream back to the client. |
| [Bytes](https://api.js.langchain.com/classes/langchain_core.output_parsers.BytesOutputParser.html) | ✅ | `string` \| `BaseMessage` | `Promise<Uint8Array>` | Parse an LLM response to then send over HTTP(s). Useful for streaming LLM responses from the server/edge to the client. |
| [Datetime](https://api.js.langchain.com/classes/langchain.output_parsers.DatetimeOutputParser.html) | | `string` | `Promise<Date>` | Parses response into a `Date`. |
| [Regex](https://api.js.langchain.com/classes/langchain.output_parsers.RegexParser.html) | | `string` | `Promise<Record<string, string>>` | Parses the given text using the regex pattern and returns a object with the parsed output. |
For specifics on how to use output parsers, see the [relevant how-to guides here](/docs/how_to/#output-parsers).
### Chat History
Most LLM applications have a conversational interface.
An essential component of a conversation is being able to refer to information introduced earlier in the conversation.
At bare minimum, a conversational system should be able to access some window of past messages directly.
The concept of `ChatHistory` refers to a class in LangChain which can be used to wrap an arbitrary chain.
This `ChatHistory` will keep track of inputs and outputs of the underlying chain, and append them as messages to a message database.
Future interactions will then load those messages and pass them into the chain as part of the input.
### Document
<span data-heading-keywords="document,documents"></span>
A Document object in LangChain contains information about some data. It has two attributes:
- `pageContent: string`: The content of this document. Currently is only a string.
- `metadata: Record<string, any>`: Arbitrary metadata associated with this document. Can track the document id, file name, etc.
### Document loaders
<span data-heading-keywords="document loader,document loaders"></span>
These classes load Document objects. LangChain has hundreds of integrations with various data sources to load data from: Slack, Notion, Google Drive, etc.
Each DocumentLoader has its own specific parameters, but they can all be invoked in the same way with the `.load` method.
An example use case is as follows:
```typescript
import { CSVLoader } from "@langchain/community/document_loaders/fs/csv";
const loader = new CSVLoader();
// <-- Integration specific parameters here
const docs = await loader.load();
```
For specifics on how to use document loaders, see the [relevant how-to guides here](/docs/how_to/#document-loaders).
### Text splitters
Once you've loaded documents, you'll often want to transform them to better suit your application. The simplest example is you may want to split a long document into smaller chunks that can fit into your model's context window. LangChain has a number of built-in document transformers that make it easy to split, combine, filter, and otherwise manipulate documents.
When you want to deal with long pieces of text, it is necessary to split up that text into chunks. As simple as this sounds, there is a lot of potential complexity here. Ideally, you want to keep the semantically related pieces of text together. What "semantically related" means could depend on the type of text. This notebook showcases several ways to do that.
At a high level, text splitters work as following:
1. Split the text up into small, semantically meaningful chunks (often sentences).
2. Start combining these small chunks into a larger chunk until you reach a certain size (as measured by some function).
3. Once you reach that size, make that chunk its own piece of text and then start creating a new chunk of text with some overlap (to keep context between chunks).
That means there are two different axes along which you can customize your text splitter:
1. How the text is split
2. How the chunk size is measured
For specifics on how to use text splitters, see the [relevant how-to guides here](/docs/how_to/#text-splitters).
### Embedding models
<span data-heading-keywords="embedding,embeddings"></span>
Embedding models create a vector representation of a piece of text. You can think of a vector as an array of numbers that captures the semantic meaning of the text.
By representing the text in this way, you can perform mathematical operations that allow you to do things like search for other pieces of text that are most similar in meaning.
These natural language search capabilities underpin many types of [context retrieval](/docs/concepts/#retrieval),
where we provide an LLM with the relevant data it needs to effectively respond to a query.

The `Embeddings` class is a class designed for interfacing with text embedding models. There are many different embedding model providers (OpenAI, Cohere, Hugging Face, etc) and local models, and this class is designed to provide a standard interface for all of them.
The base Embeddings class in LangChain provides two methods: one for embedding documents and one for embedding a query. The former takes as input multiple texts, while the latter takes a single text. The reason for having these as two separate methods is that some embedding providers have different embedding methods for documents (to be searched over) vs queries (the search query itself).
For specifics on how to use embedding models, see the [relevant how-to guides here](/docs/how_to/#embedding-models).
### Vector stores {#vectorstore}
<span data-heading-keywords="vector,vectorstore,vectorstores,vector store,vector stores"></span>
One of the most common ways to store and search over unstructured data is to embed it and store the resulting embedding vectors,
and then at query time to embed the unstructured query and retrieve the embedding vectors that are 'most similar' to the embedded query.
A vector store takes care of storing embedded data and performing vector search for you.
Most vector stores can also store metadata about embedded vectors and support filtering on that metadata before
similarity search, allowing you more control over returned documents.
Vectorstores can be converted to the retriever interface by doing:
```typescript
const vectorstore = new MyVectorStore();
const retriever = vectorstore.asRetriever();
```
For specifics on how to use vector stores, see the [relevant how-to guides here](/docs/how_to/#vectorstores).
### Retrievers
<span data-heading-keywords="retriever,retrievers"></span>
A retriever is an interface that returns relevant documents given an unstructured query.
They are more general than a vector store.
A retriever does not need to be able to store documents, only to return (or retrieve) them.
Retrievers can be created from vector stores, but are also broad enough to include [Exa search](/docs/integrations/retrievers/exa/) (web search) and [Amazon Kendra](/docs/integrations/retrievers/kendra-retriever/).
Retrievers accept a string query as input and return an array of `Document`s as output.
For specifics on how to use retrievers, see the [relevant how-to guides here](/docs/how_to/#retrievers).
### Key-value stores
For some techniques, such as [indexing and retrieval with multiple vectors per document](/docs/how_to/multi_vector/), having some sort of key-value (KV) storage is helpful.
| |
145212
|
LangChain includes a [`BaseStore`](https://api.js.langchain.com/classes/langchain_core.stores.BaseStore.html) interface,
which allows for storage of arbitrary data. However, LangChain components that require KV-storage accept a
more specific `BaseStore<string, Uint8Array>` instance that stores binary data (referred to as a `ByteStore`), and internally take care of
encoding and decoding data for their specific needs.
This means that as a user, you only need to think about one type of store rather than different ones for different types of data.
#### Interface
All [`BaseStores`](https://api.js.langchain.com/classes/langchain_core.stores.BaseStore.html) support the following interface. Note that the interface allows
for modifying **multiple** key-value pairs at once:
- `mget(keys: string[]): Promise<(undefined | Uint8Array)[]>`: get the contents of multiple keys, returning `None` if the key does not exist
- `mset(keyValuePairs: [string, Uint8Array][]): Promise<void>`: set the contents of multiple keys
- `mdelete(keys: string[]): Promise<void>`: delete multiple keys
- `yieldKeys(prefix?: string): AsyncGenerator<string>`: yield all keys in the store, optionally filtering by a prefix
For key-value store implementations, see [this section](/docs/integrations/stores/).
### Tools
<span data-heading-keywords="tool,tools"></span>
Tools are utilities designed to be called by a model: their inputs are designed to be generated by models, and their outputs are designed to be passed back to models.
Tools are needed whenever you want a model to control parts of your code or call out to external APIs.
A tool consists of:
1. The name of the tool.
2. A description of what the tool does.
3. A JSON schema defining the inputs to the tool.
4. A function.
When a tool is bound to a model, the name, description and JSON schema are provided as context to the model.
Given a list of tools and a set of instructions, a model can request to call one or more tools with specific inputs.
Typical usage may look like the following:
```ts
// Define a list of tools
const tools = [...];
const llmWithTools = llm.bindTools([tool]);
const aiMessage = await llmWithTools.invoke("do xyz...");
// AIMessage(tool_calls=[ToolCall(...), ...], ...)
```
The `AIMessage` returned from the model MAY have `tool_calls` associated with it.
Read [this guide](/docs/concepts/#aimessage) for more information on what the response type may look like.
Once the tools are chosen, you will usually want to invoke them and then pass the results back to the model so that it can complete whatever task
it's performing.
There are generally two different ways to invoke the tool and pass back the response:
#### Invoke with just the arguments
When you invoke a tool with just the arguments, you will get back the raw tool output (usually a string).
Here's what this looks like:
```ts
import { ToolMessage } from "@langchain/core/messages";
const toolCall = aiMessage.tool_calls[0]; // ToolCall(args={...}, id=..., ...)
const toolOutput = await tool.invoke(toolCall.args);
const toolMessage = new ToolMessage({
content: toolOutput,
name: toolCall.name,
tool_call_id: toolCall.id,
});
```
Note that the `content` field will generally be passed back to the model.
If you do not want the raw tool response to be passed to the model, but you still want to keep it around,
you can transform the tool output but also pass it as an artifact (read more about [`ToolMessage.artifact` here](/docs/concepts/#toolmessage))
```ts
// Same code as above
const responseForModel = someTransformation(response);
const toolMessage = new ToolMessage({
content: responseForModel,
tool_call_id: toolCall.id,
name: toolCall.name,
artifact: response,
});
```
#### Invoke with `ToolCall`
The other way to invoke a tool is to call it with the full `ToolCall` that was generated by the model.
When you do this, the tool will return a `ToolMessage`.
The benefits of this are that you don't have to write the logic yourself to transform the tool output into a ToolMessage.
Here's what this looks like:
```ts
const toolCall = aiMessage.tool_calls[0];
const toolMessage = await tool.invoke(toolCall);
```
If you are invoking the tool this way and want to include an [artifact](/docs/concepts/#toolmessage) for the `ToolMessage`, you will need to have the tool return a tuple
with two items: the `content` and the `artifact`.
Read more about [defining tools that return artifacts here](/docs/how_to/tool_artifacts/).
#### Best practices
When designing tools to be used by a model, it is important to keep in mind that:
- Chat models that have explicit [tool-calling APIs](/docs/concepts/#functiontool-calling) will be better at tool calling than non-fine-tuned models.
- Models will perform better if the tools have well-chosen names, descriptions, and JSON schemas. This another form of prompt engineering.
- Simple, narrowly scoped tools are easier for models to use than complex tools.
#### Related
For specifics on how to use tools, see the [tools how-to guides](/docs/how_to/#tools).
To use a pre-built tool, see the [tool integration docs](/docs/integrations/tools/).
### Toolkits
<span data-heading-keywords="toolkit,toolkits"></span>
Toolkits are collections of tools that are designed to be used together for specific tasks. They have convenient loading methods.
All Toolkits expose a `getTools` method which returns an array of tools.
You can therefore do:
```typescript
// Initialize a toolkit
const toolkit = new ExampleTookit(...)
// Get list of tools
const tools = toolkit.getTools()
```
### Agents
By themselves, language models can't take actions - they just output text.
A big use case for LangChain is creating **agents**.
Agents are systems that use an LLM as a reasoning engineer to determine which actions to take and what the inputs to those actions should be.
The results of those actions can then be fed back into the agent and it determine whether more actions are needed, or whether it is okay to finish.
[LangGraph](https://github.com/langchain-ai/langgraphjs) is an extension of LangChain specifically aimed at creating highly controllable and customizable agents.
Please check out that [documentation](https://langchain-ai.github.io/langgraphjs/) for a more in depth overview of agent concepts.
There is a legacy agent concept in LangChain that we are moving towards deprecating: `AgentExecutor`.
AgentExecutor was essentially a runtime for agents.
It was a great place to get started, however, it was not flexible enough as you started to have more customized agents.
In order to solve that we built LangGraph to be this flexible, highly-controllable runtime.
If you are still using AgentExecutor, do not fear: we still have a guide on [how to use AgentExecutor](/docs/how_to/agent_executor).
It is recommended, however, that you start to transition to [LangGraph](https://github.com/langchain-ai/langgraphjs).
In order to assist in this we have put together a [transition guide on how to do so](/docs/how_to/migrate_agent).
#### ReAct agents
<span data-heading-keywords="react,react agent"></span>
One popular architecture for building agents is [**ReAct**](https://arxiv.org/abs/2210.03629).
ReAct combines reasoning and acting in an iterative process - in fact the name "ReAct" stands for "Reason" and "Act".
The general flow looks like this:
- The model will "think" about what step to take in response to an input and any previous observations.
- The model will then choose an action from available tools (or choose to respond to the user).
- The model will generate arguments to that tool.
- The agent runtime (executor) will parse out the chosen tool and call it with the generated arguments.
- The executor will return the results of the tool call back to the model as an observation.
- This process repeats until the agent chooses to respond.
There are general prompting based implementations that do not require any model-specific features, but the most
reliable implementations use features like [tool calling](/docs/how_to/tool_calling/) to reliably format outputs
and reduce variance.
Please see the [LangGraph documentation](https://langchain-ai.github.io/langgraph/) for more information,
or [this how-to guide](/docs/how_to/migrate_agent/) for specific information on migrating to LangGraph.
### Callbacks
LangChain provides a callbacks system that allows you to hook into the various stages of your LLM application. This is useful for logging, monitoring, streaming, and other tasks.
You can subscribe to these events by using the `callbacks` argument available throughout the API. This argument is list of handler objects, which are expected to implement one or more of the methods described below in more detail.
#### Callback Events
| |
145213
|
| Event | Event Trigger | Associated Method |
| ---------------- | ------------------------------------------- | ---------------------- |
| Chat model start | When a chat model starts | `handleChatModelStart` |
| LLM start | When a llm starts | `handleLLMStart` |
| LLM new token | When an llm OR chat model emits a new token | `handleLLMNewToken` |
| LLM ends | When an llm OR chat model ends | `handleLLMEnd` |
| LLM errors | When an llm OR chat model errors | `handleLLMError` |
| Chain start | When a chain starts running | `handleChainStart` |
| Chain end | When a chain ends | `handleChainEnd` |
| Chain error | When a chain errors | `handleChainError` |
| Tool start | When a tool starts running | `handleToolStart` |
| Tool end | When a tool ends | `handleToolEnd` |
| Tool error | When a tool errors | `handleToolError` |
| Agent action | When an agent takes an action | `handleAgentAction` |
| Agent finish | When an agent ends | `handleAgentEnd` |
| Retriever start | When a retriever starts | `handleRetrieverStart` |
| Retriever end | When a retriever ends | `handleRetrieverEnd` |
| Retriever error | When a retriever errors | `handleRetrieverError` |
| Text | When arbitrary text is run | `handleText` |
#### Callback handlers
`CallbackHandlers` are objects that implement the [`CallbackHandler`](https://api.js.langchain.com/interfaces/langchain_core.callbacks_base.CallbackHandlerMethods.html) interface, which has a method for each event that can be subscribed to.
The `CallbackManager` will call the appropriate method on each handler when the event is triggered.
#### Passing callbacks
The `callbacks` property is available on most objects throughout the API (Models, Tools, Agents, etc.) in two different places:
- **Request callbacks**: Passed at the time of the request in addition to the input data.
Available on all standard `Runnable` objects. These callbacks are INHERITED by all children
of the object they are defined on. For example, `chain.invoke({foo: "bar"}, {callbacks: [handler]})`.
- **Constructor callbacks**: defined in the constructor, e.g. `new ChatAnthropic({ callbacks: [handler], tags: ["a-tag"] })`. In this case, the callbacks will be used for all calls made on that object, and will be scoped to that object only.
For example, if you initialize a chat model with constructor callbacks, then use it within a chain, the callbacks will only be invoked for calls to that model.
:::warning
Constructor callbacks are scoped only to the object they are defined on. They are **not** inherited by children
of the object.
:::
If you're creating a custom chain or runnable, you need to remember to propagate request time
callbacks to any child objects.
For specifics on how to use callbacks, see the [relevant how-to guides here](/docs/how_to/#callbacks).
## Techniques
### Streaming
<span data-heading-keywords="stream,streaming"></span>
Individual LLM calls often run for much longer than traditional resource requests.
This compounds when you build more complex chains or agents that require multiple reasoning steps.
Fortunately, LLMs generate output iteratively, which means it's possible to show sensible intermediate results
before the final response is ready. Consuming output as soon as it becomes available has therefore become a vital part of the UX
around building apps with LLMs to help alleviate latency issues, and LangChain aims to have first-class support for streaming.
Below, we'll discuss some concepts and considerations around streaming in LangChain.
#### `.stream()`
Most modules in LangChain include the `.stream()` method as an ergonomic streaming interface.
`.stream()` returns an iterator, which you can consume with a [`for await...of`](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Statements/for-await...of) loop. Here's an example with a chat model:
```ts
import { ChatAnthropic } from "@langchain/anthropic";
import { concat } from "@langchain/core/utils/stream";
import type { AIMessageChunk } from "@langchain/core/messages";
const model = new ChatAnthropic({ model: "claude-3-sonnet-20240229" });
const stream = await model.stream("what color is the sky?");
let gathered: AIMessageChunk | undefined = undefined;
for await (const chunk of stream) {
console.log(chunk);
if (gathered === undefined) {
gathered = chunk;
} else {
gathered = concat(gathered, chunk);
}
}
console.log(gathered);
```
For models (or other components) that don't support streaming natively, this iterator would just yield a single chunk, but
you could still use the same general pattern when calling them. Using `.stream()` will also automatically call the model in streaming mode
without the need to provide additional config.
The type of each outputted chunk depends on the type of component - for example, chat models yield [`AIMessageChunks`](https://api.js.langchain.com/classes/langchain_core.messages.AIMessageChunk.html).
Because this method is part of [LangChain Expression Language](/docs/concepts/#langchain-expression-language),
you can handle formatting differences from different outputs using an [output parser](/docs/concepts/#output-parsers) to transform
each yielded chunk.
You can check out [this guide](/docs/how_to/streaming/#using-stream) for more detail on how to use `.stream()`.
#### `.streamEvents()`
<span data-heading-keywords="streamEvents,stream events"></span>
While the `.stream()` method is intuitive, it can only return the final generated value of your chain. This is fine for single LLM calls,
but as you build more complex chains of several LLM calls together, you may want to use the intermediate values of
the chain alongside the final output - for example, returning sources alongside the final generation when building a chat
over documents app.
There are ways to do this [using callbacks](/docs/concepts/#callbacks-1), or by constructing your chain in such a way that it passes intermediate
values to the end with something like chained [`.assign()`](/docs/how_to/passthrough/) calls, but LangChain also includes an
`.streamEvents()` method that combines the flexibility of callbacks with the ergonomics of `.stream()`. When called, it returns an iterator
which yields [various types of events](/docs/how_to/streaming/#event-reference) that you can filter and process according
to the needs of your project.
Here's one small example that prints just events containing streamed chat model output:
```ts
import { StringOutputParser } from "@langchain/core/output_parsers";
import { ChatPromptTemplate } from "@langchain/core/prompts";
import { ChatAnthropic } from "@langchain/anthropic";
const model = new ChatAnthropic({ model: "claude-3-sonnet-20240229" });
const prompt = ChatPromptTemplate.fromTemplate("tell me a joke about {topic}");
const parser = new StringOutputParser();
const chain = prompt.pipe(model).pipe(parser);
const eventStream = await chain.streamEvents(
{ topic: "parrot" },
{ version: "v2" }
);
for await (const event of eventStream) {
const kind = event.event;
if (kind === "on_chat_model_stream") {
console.log(event);
}
}
```
You can roughly think of it as an iterator over callback events (though the format differs) - and you can use it on almost all LangChain components!
See [this guide](/docs/how_to/streaming/#using-stream-events) for more detailed information on how to use `.streamEvents()`,
or [this guide](/docs/how_to/callbacks_custom_events) for how to stream custom events from within a chain.
#### Callbacks
The lowest level way to stream outputs from LLMs in LangChain is via the [callbacks](/docs/concepts/#callbacks) system. You can pass a
callback handler that handles the [`handleLLMNewToken`](https://api.js.langchain.com/interfaces/langchain_core.callbacks_base.CallbackHandlerMethods.html#handleLLMNewToken) event into LangChain components. When that component is invoked, any
[LLM](/docs/concepts/#llms) or [chat model](/docs/concepts/#chat-models) contained in the component calls
the callback with the generated token. Within the callback, you could pipe the tokens into some other destination, e.g. a HTTP response.
You can also handle the [`handleLLMEnd`](https://api.js.langchain.com/interfaces/langchain_core.callbacks_base.CallbackHandlerMethods.html#handleLLMEnd) event to perform any necessary cleanup.
You can see [this how-to section](/docs/how_to/#callbacks) for more specifics on using callbacks.
Callbacks were the first technique for streaming introduced in LangChain. While powerful and generalizable,
they can be unwieldy for developers. For example:
| |
145214
|
- You need to explicitly initialize and manage some aggregator or other stream to collect results.
- The execution order isn't explicitly guaranteed, and you could theoretically have a callback run after the `.invoke()` method finishes.
- Providers would often make you pass an additional parameter to stream outputs instead of returning them all at once.
- You would often ignore the result of the actual model call in favor of callback results.
#### Tokens
The unit that most model providers use to measure input and output is via a unit called a **token**.
Tokens are the basic units that language models read and generate when processing or producing text.
The exact definition of a token can vary depending on the specific way the model was trained -
for instance, in English, a token could be a single word like "apple", or a part of a word like "app".
When you send a model a prompt, the words and characters in the prompt are encoded into tokens using a **tokenizer**.
The model then streams back generated output tokens, which the tokenizer decodes into human-readable text.
The below example shows how OpenAI models tokenize `LangChain is cool!`:

You can see that it gets split into 5 different tokens, and that the boundaries between tokens are not exactly the same as word boundaries.
The reason language models use tokens rather than something more immediately intuitive like "characters"
has to do with how they process and understand text. At a high-level, language models iteratively predict their next generated output based on
the initial input and their previous generations. Training the model using tokens language models to handle linguistic
units (like words or subwords) that carry meaning, rather than individual characters, which makes it easier for the model
to learn and understand the structure of the language, including grammar and context.
Furthermore, using tokens can also improve efficiency, since the model processes fewer units of text compared to character-level processing.
### Function/tool calling
:::info
We use the term tool calling interchangeably with function calling. Although
function calling is sometimes meant to refer to invocations of a single function,
we treat all models as though they can return multiple tool or function calls in
each message.
:::
Tool calling allows a [chat model](/docs/concepts/#chat-models) to respond to a given prompt by generating output that
matches a user-defined schema.
While the name implies that the model is performing
some action, this is actually not the case! The model only generates the arguments to a tool, and actually running the tool (or not) is up to the user.
One common example where you **wouldn't** want to call a function with the generated arguments
is if you want to [extract structured output matching some schema](/docs/concepts/#structured-output)
from unstructured text. You would give the model an "extraction" tool that takes
parameters matching the desired schema, then treat the generated output as your final
result.

Tool calling is not universal, but is supported by many popular LLM providers, including [Anthropic](/docs/integrations/chat/anthropic/),
[Cohere](/docs/integrations/chat/cohere/), [Google](/docs/integrations/chat/google_vertex_ai/),
[Mistral](/docs/integrations/chat/mistral/), [OpenAI](/docs/integrations/chat/openai/), and even for locally-running models via [Ollama](/docs/integrations/chat/ollama/).
LangChain provides a standardized interface for tool calling that is consistent across different models.
The standard interface consists of:
- `ChatModel.bind_tools()`: a method for specifying which tools are available for a model to call. This method accepts [LangChain tools](/docs/concepts/#tools) as well as model-specific formats.
- `AIMessage.tool_calls`: an attribute on the `AIMessage` returned from the model for accessing the tool calls requested by the model.
#### Tool usage
After the model calls tools, you can use the tool by invoking it, then passing the arguments back to the model.
LangChain provides the [`Tool`](/docs/concepts/#tools) abstraction to help you handle this.
The general flow is this:
1. Generate tool calls with a chat model in response to a query.
2. Invoke the appropriate tools using the generated tool call as arguments.
3. Format the result of the tool invocations as [`ToolMessages`](/docs/concepts/#toolmessage).
4. Pass the entire list of messages back to the model so that it can generate a final answer (or call more tools).

This is how tool calling [agents](/docs/concepts/#agents) perform tasks and answer queries.
Check out some more focused guides below:
- [How to use chat models to call tools](/docs/how_to/tool_calling/)
- [How to pass tool outputs to chat models](/docs/how_to/tool_results_pass_to_model/)
- [Building an agent with LangGraph](https://langchain-ai.github.io/langgraphjs/tutorials/introduction/)
### Structured output
LLMs are capable of generating arbitrary text. This enables the model to respond appropriately to a wide
range of inputs, but for some use-cases, it can be useful to constrain the LLM's output
to a specific format or structure. This is referred to as **structured output**.
For example, if the output is to be stored in a relational database,
it is much easier if the model generates output that adheres to a defined schema or format.
[Extracting specific information](/docs/tutorials/extraction/) from unstructured text is another
case where this is particularly useful. Most commonly, the output format will be JSON,
though other formats such as [XML](/docs/how_to/output_parser_xml/) can be useful too. Below, we'll discuss
a few ways to get structured output from models in LangChain.
#### `.withStructuredOutput()`
For convenience, some LangChain chat models support a [`.withStructuredOutput()`](/docs/how_to/structured_output/#the-.withstructuredoutput-method) method.
This method only requires a schema as input, and returns an object matching the requested schema.
Generally, this method is only present on models that support one of the more advanced methods described below,
and will use one of them under the hood. It takes care of importing a suitable output parser and
formatting the schema in the right format for the model.
Here's an example:
```ts
import { z } from "zod";
const joke = z.object({
setup: z.string().describe("The setup of the joke"),
punchline: z.string().describe("The punchline to the joke"),
rating: z.number().optional().describe("How funny the joke is, from 1 to 10"),
});
// Can also pass in JSON schema.
// It's also beneficial to pass in an additional "name" parameter to give the
// model more context around the type of output to generate.
const structuredLlm = model.withStructuredOutput(joke);
await structuredLlm.invoke("Tell me a joke about cats");
```
```
{
setup: "Why don't cats play poker in the wild?",
punchline: "Too many cheetahs.",
rating: 7
}
```
We recommend this method as a starting point when working with structured output:
- It uses other model-specific features under the hood, without the need to import an output parser.
- For the models that use tool calling, no special prompting is needed.
- If multiple underlying techniques are supported, you can supply a `method` parameter to
[toggle which one is used](/docs/how_to/structured_output/#specifying-the-output-method-advanced).
You may want or need to use other techiniques if:
- The chat model you are using does not support tool calling.
- You are working with very complex schemas and the model is having trouble generating outputs that conform.
For more information, check out this [how-to guide](/docs/how_to/structured_output/#the-.withstructuredoutput-method).
You can also check out [this table](/docs/integrations/chat/) for a list of models that support
`.withStructuredOutput()`.
#### Raw prompting
The most intuitive way to get a model to structure output is to ask nicely.
In addition to your query, you can give instructions describing what kind of output you'd like, then
parse the output using an [output parser](/docs/concepts/#output-parsers) to convert the raw
model message or string output into something more easily manipulated.
The biggest benefit to raw prompting is its flexibility:
- Raw prompting does not require any special model features, only sufficient reasoning capability to understand
the passed schema.
- You can prompt for any format you'd like, not just JSON. This can be useful if the model you
are using is more heavily trained on a certain type of data, such as XML or YAML.
However, there are some drawbacks too:
- LLMs are non-deterministic, and prompting a LLM to consistently output data in the exactly correct format
for smooth parsing can be surprisingly difficult and model-specific.
- Individual models have quirks depending on the data they were trained on, and optimizing prompts can be quite difficult.
Some may be better at interpreting [JSON schema](https://json-schema.org/), others may be best with TypeScript definitions,
and still others may prefer XML.
While features offered by model providers may increase reliability, prompting techniques remain important for tuning your
results no matter which method you choose.
#### JSON mode
<span data-heading-keywords="json mode"></span>
| |
145215
|
Some models, such as [Mistral](/docs/integrations/chat/mistral/), [OpenAI](/docs/integrations/chat/openai/),
[Together AI](/docs/integrations/chat/togetherai/) and [Ollama](/docs/integrations/chat/ollama/),
support a feature called **JSON mode**, usually enabled via config.
When enabled, JSON mode will constrain the model's output to always be some sort of valid JSON.
Often they require some custom prompting, but it's usually much less burdensome than completely raw prompting and
more along the lines of,
`"you must always return JSON"`. The [output also is generally easier to parse](/docs/how_to/output_parser_json/).
It's also generally simpler to use directly and more commonly available than tool calling, and can give
more flexibility around prompting and shaping results than tool calling.
Here's an example:
```ts
import { JsonOutputParser } from "@langchain/core/output_parsers";
import { ChatPromptTemplate } from "@langchain/core/prompts";
import { ChatOpenAI } from "@langchain/openai";
const model = new ChatOpenAI({
model: "gpt-4o",
modelKwargs: {
response_format: { type: "json_object" },
},
});
const TEMPLATE = `Answer the user's question to the best of your ability.
You must always output a JSON object with an "answer" key and a "followup_question" key.
{question}`;
const prompt = ChatPromptTemplate.fromTemplate(TEMPLATE);
const chain = prompt.pipe(model).pipe(new JsonOutputParser());
await chain.invoke({ question: "What is the powerhouse of the cell?" });
```
```
{
answer: "The powerhouse of the cell is the mitochondrion.",
followup_question: "Would you like to learn more about the functions of mitochondria?"
}
```
For a full list of model providers that support JSON mode, see [this table](/docs/integrations/chat/).
#### Tool calling {#structured-output-tool-calling}
For models that support it, [tool calling](/docs/concepts/#functiontool-calling) can be very convenient for structured output. It removes the
guesswork around how best to prompt schemas in favor of a built-in model feature.
It works by first binding the desired schema either directly or via a [LangChain tool](/docs/concepts/#tools) to a
[chat model](/docs/concepts/#chat-models) using the `.bind_tools()` method. The model will then generate an `AIMessage` containing
a `tool_calls` field containing `args` that match the desired shape.
There are several acceptable formats you can use to bind tools to a model in LangChain. Here's one example using [Zod](https://zod.dev):
```ts
import { z } from "zod";
import { zodToJsonSchema } from "zod-to-json-schema";
import { ChatOpenAI } from "@langchain/openai";
const toolSchema = z.object({
answer: z.string().describe("The answer to the user's question"),
followup_question: z
.string()
.describe("A followup question the user could ask"),
});
const model = new ChatOpenAI({
model: "gpt-4o",
temperature: 0,
});
const modelWithTools = model.bindTools([
{
type: "function",
function: {
name: "response_formatter",
description:
"Always use this tool to structure your response to the user.",
parameters: zodToJsonSchema(toolSchema),
},
},
]);
const aiMessage = await modelWithTools.invoke(
"What is the powerhouse of the cell?"
);
aiMessage.tool_calls?.[0].args;
```
```
{
answer: 'The powerhouse of the cell is the mitochondrion.',
followup_question: 'What is the main function of the mitochondrion in the cell?'
}
```
Tool calling is a generally consistent way to get a model to generate structured output, and is the default technique
used for the [`.withStructuredOutput()`](/docs/concepts/#withstructuredoutput) method when a model supports it.
The following how-to guides are good practical resources for using function/tool calling for structured output:
- [How to return structured data from an LLM](/docs/how_to/structured_output/)
- [How to use a model to call tools](/docs/how_to/tool_calling)
### Few-shot prompting
One of the most effective ways to improve model performance is to give a model examples of what you want it to do. The technique of adding example inputs and expected outputs to a model prompt is known as "few-shot prompting". There are a few things to think about when doing few-shot prompting:
1. How are examples generated?
2. How many examples are in each prompt?
3. How are examples selected at runtime?
4. How are examples formatted in the prompt?
Here are the considerations for each.
#### 1. Generating examples
The first and most important step of few-shot prompting is coming up with a good dataset of examples.
Good examples should be relevant at runtime, clear, informative, and provide information that was not already known to the model.
At a high-level, the basic ways to generate examples are:
- Manual: a person/people generates examples they think are useful.
- Better model: a better (presumably more expensive/slower) model's responses are used as examples for a worse (presumably cheaper/faster) model.
- User feedback: users (or labelers) leave feedback on interactions with the application and examples are generated based on that feedback (for example, all interactions with positive feedback could be turned into examples).
- LLM feedback: same as user feedback but the process is automated by having models evaluate themselves.
Which approach is best depends on your task. For tasks where a small number core principles need to be understood really well, it can be valuable hand-craft a few really good examples.
For tasks where the space of correct behaviors is broader and more nuanced, it can be useful to generate many examples in a more automated fashion so that there's a higher likelihood of there being some highly relevant examples for any runtime input.
**Single-turn v.s. multi-turn examples**
Another dimension to think about when generating examples is what the example is actually showing.
The simplest types of examples just have a user input and an expected model output. These are single-turn examples.
One more complex type if example is where the example is an entire conversation, usually in which a model initially responds incorrectly and a user then tells the model how to correct its answer.
This is called a multi-turn example. Multi-turn examples can be useful for more nuanced tasks where its useful to show common errors and spell out exactly why they're wrong and what should be done instead.
#### 2. Number of examples
Once we have a dataset of examples, we need to think about how many examples should be in each prompt.
The key tradeoff is that more examples generally improve performance, but larger prompts increase costs and latency.
And beyond some threshold having too many examples can start to confuse the model.
Finding the right number of examples is highly dependent on the model, the task, the quality of the examples, and your cost and latency constraints.
Anecdotally, the better the model is the fewer examples it needs to perform well and the more quickly you hit steeply diminishing returns on adding more examples.
But, the best/only way to reliably answer this question is to run some experiments with different numbers of examples.
#### 3. Selecting examples
Assuming we are not adding our entire example dataset into each prompt, we need to have a way of selecting examples from our dataset based on a given input. We can do this:
- Randomly
- By (semantic or keyword-based) similarity of the inputs
- Based on some other constraints, like token size
LangChain has a number of [`ExampleSelectors`](/docs/concepts/#example-selectors) which make it easy to use any of these techniques.
Generally, selecting by semantic similarity leads to the best model performance. But how important this is is again model and task specific, and is something worth experimenting with.
#### 4. Formatting examples
Most state-of-the-art models these days are chat models, so we'll focus on formatting examples for those. Our basic options are to insert the examples:
- In the system prompt as a string
- As their own messages
If we insert our examples into the system prompt as a string, we'll need to make sure it's clear to the model where each example begins and which parts are the input versus output. Different models respond better to different syntaxes, like [ChatML](https://learn.microsoft.com/en-us/azure/ai-services/openai/how-to/chat-markup-language), XML, TypeScript, etc.
If we insert our examples as messages, where each example is represented as a sequence of Human, AI messages, we might want to also assign [names](/docs/concepts/#messages) to our messages like `"exampleUser"` and `"exampleAssistant"` to make it clear that these messages correspond to different actors than the latest input message.
**Formatting tool call examples**
One area where formatting examples as messages can be tricky is when our example outputs have tool calls. This is because different models have different constraints on what types of message sequences are allowed when any tool calls are generated.
- Some models require that any AIMessage with tool calls be immediately followed by ToolMessages for every tool call,
- Some models additionally require that any ToolMessages be immediately followed by an AIMessage before the next HumanMessage,
- Some models require that tools are passed in to the model if there are any tool calls / ToolMessages in the chat history.
| |
145216
|
These requirements are model-specific and should be checked for the model you are using. If your model requires ToolMessages after tool calls and/or AIMessages after ToolMessages and your examples only include expected tool calls and not the actual tool outputs, you can try adding dummy ToolMessages / AIMessages to the end of each example with generic contents to satisfy the API constraints.
In these cases it's especially worth experimenting with inserting your examples as strings versus messages, as having dummy messages can adversely affect certain models.
You can see a case study of how Anthropic and OpenAI respond to different few-shot prompting techniques on two different tool calling benchmarks [here](https://blog.langchain.dev/few-shot-prompting-to-improve-tool-calling-performance/).
### Retrieval
LLMs are trained on a large but fixed dataset, limiting their ability to reason over private or recent information. Fine-tuning an LLM with specific facts is one way to mitigate this, but is often [poorly suited for factual recall](https://www.anyscale.com/blog/fine-tuning-is-for-form-not-facts) and [can be costly](https://www.glean.com/blog/how-to-build-an-ai-assistant-for-the-enterprise).
Retrieval is the process of providing relevant information to an LLM to improve its response for a given input. Retrieval augmented generation (RAG) is the process of grounding the LLM generation (output) using the retrieved information.
:::tip
- See our RAG from Scratch [video series](https://youtube.com/playlist?list=PLfaIDFEXuae2LXbO1_PKyVJiQ23ZztA0x&feature=shared).
The code examples are in Python but is useful for a general overview of RAG concepts for visual learners.
- For a high-level guide on retrieval, see this [tutorial on RAG](/docs/tutorials/rag/).
:::
RAG is only as good as the retrieved documents’ relevance and quality. Fortunately, an emerging set of techniques can be employed to design and improve RAG systems. We've focused on taxonomizing and summarizing many of these techniques (see below figure) and will share some high-level strategic guidance in the following sections.
You can and should experiment with using different pieces together. You might also find [this LangSmith guide](https://docs.smith.langchain.com/how_to_guides/evaluation/evaluate_llm_application) useful for showing how to evaluate different iterations of your app.

#### Query Translation
First, consider the user input(s) to your RAG system. Ideally, a RAG system can handle a wide range of inputs, from poorly worded questions to complex multi-part queries.
**Using an LLM to review and optionally modify the input is the central idea behind query translation.** This serves as a general buffer, optimizing raw user inputs for your retrieval system.
For example, this can be as simple as extracting keywords or as complex as generating multiple sub-questions for a complex query.
| Name | When to use | Description |
| --------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| [Multi-query](/docs/how_to/multiple_queries/) | When you need to cover multiple perspectives of a question. | Rewrite the user question from multiple perspectives, retrieve documents for each rewritten question, return the unique documents for all queries. |
| [Decomposition (Python cookbook)](https://github.com/langchain-ai/rag-from-scratch/blob/main/rag_from_scratch_5_to_9.ipynb) | When a question can be broken down into smaller subproblems. | Decompose a question into a set of subproblems / questions, which can either be solved sequentially (use the answer from first + retrieval to answer the second) or in parallel (consolidate each answer into final answer). |
| [Step-back (Python cookbook)](https://github.com/langchain-ai/rag-from-scratch/blob/main/rag_from_scratch_5_to_9.ipynb) | When a higher-level conceptual understanding is required. | First prompt the LLM to ask a generic step-back question about higher-level concepts or principles, and retrieve relevant facts about them. Use this grounding to help answer the user question. |
| [HyDE (Python cookbook)](https://github.com/langchain-ai/rag-from-scratch/blob/main/rag_from_scratch_5_to_9.ipynb) | If you have challenges retrieving relevant documents using the raw user inputs. | Use an LLM to convert questions into hypothetical documents that answer the question. Use the embedded hypothetical documents to retrieve real documents with the premise that doc-doc similarity search can produce more relevant matches. |
:::tip
See our Python RAG from Scratch videos for a few different specific approaches:
- [Multi-query](https://youtu.be/JChPi0CRnDY?feature=shared)
- [Decomposition](https://youtu.be/h0OPWlEOank?feature=shared)
- [Step-back](https://youtu.be/xn1jEjRyJ2U?feature=shared)
- [HyDE](https://youtu.be/SaDzIVkYqyY?feature=shared)
:::
#### Routing
Second, consider the data sources available to your RAG system. You want to query across more than one database or across structured and unstructured data sources. **Using an LLM to review the input and route it to the appropriate data source is a simple and effective approach for querying across sources.**
| Name | When to use | Description |
| ------------------------------------------------------------------------ | ----------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------- |
| [Logical routing](/docs/how_to/routing/) | When you can prompt an LLM with rules to decide where to route the input. | Logical routing can use an LLM to reason about the query and choose which datastore is most appropriate. |
| [Semantic routing](/docs/how_to/routing/#routing-by-semantic-similarity) | When semantic similarity is an effective way to determine where to route the input. | Semantic routing embeds both query and, typically a set of prompts. It then chooses the appropriate prompt based upon similarity. |
:::tip
See our Python RAG from Scratch video on [routing](https://youtu.be/pfpIndq7Fi8?feature=shared).
:::
#### Query Construction
Third, consider whether any of your data sources require specific query formats. Many structured databases use SQL. Vector stores often have specific syntax for applying keyword filters to document metadata. **Using an LLM to convert a natural language query into a query syntax is a popular and powerful approach.**
In particular, [text-to-SQL](/docs/tutorials/sql_qa/), [text-to-Cypher](/docs/tutorials/graph/), and [query analysis for metadata filters](/docs/tutorials/query_analysis/#query-analysis) are useful ways to interact with structured, graph, and vector databases respectively.
| Name | When to Use | Description |
| ---------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------ | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| [Text to SQL](/docs/tutorials/sql_qa/) | If users are asking questions that require information housed in a relational database, accessible via SQL. | This uses an LLM to transform user input into a SQL query. |
| [Text-to-Cypher](/docs/tutorials/graph/) | If users are asking questions that require information housed in a graph database, accessible via Cypher. | This uses an LLM to transform user input into a Cypher query. |
| [Self Query](/docs/how_to/self_query/) | If users are asking questions that are better answered by fetching documents based on metadata rather than similarity with the text. | This uses an LLM to transform user input into two things: (1) a string to look up semantically, (2) a metadata filter to go along with it. This is useful because oftentimes questions are about the METADATA of documents (not the content itself). |
:::tip
See our [blog post overview](https://blog.langchain.dev/query-construction/) and RAG from Scratch video on [query construction](https://youtu.be/kl6NwWYxvbM?feature=shared), the process of text-to-DSL where DSL is a domain specific language required to interact with a given database. This converts user questions into structured queries.
:::
#### Indexing
Fouth, consider the design of your document index. A simple and powerful idea is to **decouple the documents that you index for retrieval from the documents that you pass to the LLM for generation.** Indexing frequently uses embedding models with vector stores, which [compress the semantic information in documents to fixed-size vectors](/docs/concepts/#embedding-models).
Many RAG approaches focus on splitting documents into chunks and retrieving some number based on similarity to an input question for the LLM. But chunk size and chunk number can be difficult to set and affect results if they do not provide full context for the LLM to answer a question. Furthermore, LLMs are increasingly capable of processing millions of tokens.
| |
145233
|
:::tip
We're unifying model params across all packages. We now suggest using `model` instead of `modelName`, and `apiKey` for API keys.
:::
| |
145238
|
---
sidebar_position: 1
sidebar_label: v0.2
---
# LangChain v0.2
LangChain v0.2 was released in May 2024. This release includes a number of breaking changes and deprecations. This document contains a guide on upgrading to 0.2.x, as well as a list of deprecations and breaking changes.
:::note Reference
- [Migrating to Astream Events v2](/docs/versions/v0_2/migrating_astream_events)
:::
## Migration
This documentation will help you upgrade your code to LangChain `0.2.x.`. To prepare for migration, we first recommend you take the following steps:
1. install the 0.2.x versions of `@langchain/core`, langchain and upgrade to recent versions of other packages that you may be using (e.g. `@langchain/langgraph`, `@langchain/community`, `@langchain/openai`, etc.)
2. Verify that your code runs properly with the new packages (e.g., unit tests pass)
3. Install a recent version of `langchain-cli` , and use the tool to replace old imports used by your code with the new imports. (See instructions below.)
4. Manually resolve any remaining deprecation warnings
5. Re-run unit tests
### Upgrade to new imports
We created a tool to help migrate your code. This tool is still in **beta** and may not cover all cases, but
we hope that it will help you migrate your code more quickly.
The migration script has the following limitations:
1. It's limited to helping users move from old imports to new imports. It doesn't help address other deprecations.
2. It can't handle imports that involve `as` .
3. New imports are always placed in global scope, even if the old import that was replaced was located inside some local scope (e..g, function body).
4. It will likely miss some deprecated imports.
Here is an example of the import changes that the migration script can help apply automatically:
| From Package | To Package | Deprecated Import | New Import |
| ---------------------- | -------------------------- | -------------------------------------------------------------------------- | -------------------------------------------------------------------------------- |
| `langchain` | `@langchain/community` | `import { UpstashVectorStore } from "langchain/vectorstores/upstash"` | `import { UpstashVectorStore } from "@langchain/community/vectorstores/upstash"` |
| `@langchain/community` | `@langchain/openai` | `import { ChatOpenAI } from "@langchain/community/chat_models/openai"` | `import { ChatOpenAI } from "@langchain/openai"` |
| `langchain` | `@langchain/core` | `import { Document } from "langchain/schema/document"` | `import { Document } from "@langchain/core/documents"` |
| `langchain` | `@langchain/textsplitters` | `import { RecursiveCharacterTextSplitter } from "langchain/text_splitter"` | `import { RecursiveCharacterTextSplitter } from "@langchain/textsplitters"` |
#### Deprecation timeline
We have two main types of deprecations:
1. Code that was moved from `langchain` into another package (e.g, `@langchain/community`)
If you try to import it from `langchain`, it will fail since the entrypoint has been removed.
2. Code that has better alternatives available and will eventually be removed, so there's only a single way to do things. (e.g., `predictMessages` method in ChatModels has been deprecated in favor of `invoke`).
Many of these were marked for removal in 0.2. We have bumped the removal to 0.3.
#### Installation
:::note
The 0.2.X migration script is only available in version `0.0.14-rc.1` or later.
:::
```bash npm2yarn
npm i @langchain/scripts@0.0.14-rc.1
```
#### Usage
Given that the migration script is not perfect, you should make sure you have a backup of your code first (e.g., using version control like `git`).
For example, say your code still uses `import ChatOpenAI from "@langchain/community/chat_models/openai";`:
Invoking the migration script will replace this import with `import ChatOpenAI from "@langchain/openai";`.
```typescript
import { updateEntrypointsFrom0_x_xTo0_2_x } from "@langchain/scripts/migrations";
const pathToMyProject = "..."; // This path is used in the following glob pattern: `${projectPath}/**/*.{ts,tsx,js,jsx}`.
updateEntrypointsFrom0_x_xTo0_2_x({
projectPath: pathToMyProject,
shouldLog: true,
});
```
#### Other options
```typescript
updateEntrypointsFrom0_x_xTo0_2_x({
projectPath: pathToMyProject,
tsConfigPath: "tsconfig.json", // Path to the tsConfig file. This will be used to load all the project files into the script.
testRun: true, // If true, the script will not save any changes, but will log the changes that would be made.
files: ["..."], // A list of .ts file paths to check. If this is provided, the script will only check these files.
});
```
| |
145242
|
{
"cells": [
{
"cell_type": "markdown",
"id": "ce8457ed-c0b1-4a74-abbd-9d3d2211270f",
"metadata": {},
"source": [
"# Migrating off ConversationTokenBufferMemory\n",
"\n",
"Follow this guide if you're trying to migrate off one of the old memory classes listed below:\n",
"\n",
"\n",
"| Memory Type | Description |\n",
"|----------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------|\n",
"| `ConversationTokenBufferMemory` | Keeps only the most recent messages in the conversation under the constraint that the total number of tokens in the conversation does not exceed a certain limit. |\n",
"\n",
"`ConversationTokenBufferMemory` applies additional processing on top of the raw conversation history to trim the conversation history to a size that fits inside the context window of a chat model. \n",
"\n",
"This processing functionality can be accomplished using LangChain's built-in [trimMessages](https://api.js.langchain.com/functions/_langchain_core.messages.trimMessages.html) function."
]
},
{
"cell_type": "markdown",
"id": "79935247-acc7-4a05-a387-5d72c9c8c8cb",
"metadata": {},
"source": [
"```{=mdx}\n",
":::important\n",
"\n",
"We’ll begin by exploring a straightforward method that involves applying processing logic to the entire conversation history.\n",
"\n",
"While this approach is easy to implement, it has a downside: as the conversation grows, so does the latency, since the logic is re-applied to all previous exchanges in the conversation at each turn.\n",
"\n",
"More advanced strategies focus on incrementally updating the conversation history to avoid redundant processing.\n",
"\n",
"For instance, the LangGraph [how-to guide on summarization](https://langchain-ai.github.io/langgraphjs/how-tos/add-summary-conversation-history/) demonstrates\n",
"how to maintain a running summary of the conversation while discarding older messages, ensuring they aren't re-processed during later turns.\n",
":::\n",
"```"
]
},
{
"cell_type": "markdown",
"id": "d07f9459-9fb6-4942-99c9-64558aedd7d4",
"metadata": {},
"source": [
"## Set up\n",
"\n",
"### Dependencies\n",
"\n",
"```{=mdx}\n",
"import Npm2Yarn from \"@theme/Npm2Yarn\"\n",
"\n",
"<Npm2Yarn>\n",
" @langchain/openai @langchain/core zod\n",
"</Npm2Yarn>\n",
"```\n",
"\n",
"### Environment variables\n",
"\n",
"```typescript\n",
"process.env.OPENAI_API_KEY = \"YOUR_OPENAI_API_KEY\";\n",
"```\n",
"\n",
"```{=mdx}\n",
"<details open>\n",
"```"
]
},
{
"cell_type": "markdown",
"id": "7ce2d951",
"metadata": {},
"source": [
"## Reimplementing ConversationTokenBufferMemory logic\n",
"\n",
"Here, we'll use `trimMessages` to keeps the system message and the most recent messages in the conversation under the constraint that the total number of tokens in the conversation does not exceed a certain limit."
]
},
{
"cell_type": "code",
"execution_count": 1,
"id": "e1550bee",
"metadata": {},
"outputs": [],
"source": [
"import {\n",
" AIMessage,\n",
" HumanMessage,\n",
" SystemMessage,\n",
"} from \"@langchain/core/messages\";\n",
"\n",
"const messages = [\n",
" new SystemMessage(\"you're a good assistant, you always respond with a joke.\"),\n",
" new HumanMessage(\"i wonder why it's called langchain\"),\n",
" new AIMessage(\n",
" 'Well, I guess they thought \"WordRope\" and \"SentenceString\" just didn\\'t have the same ring to it!'\n",
" ),\n",
" new HumanMessage(\"and who is harrison chasing anyways\"),\n",
" new AIMessage(\n",
" \"Hmmm let me think.\\n\\nWhy, he's probably chasing after the last cup of coffee in the office!\"\n",
" ),\n",
" new HumanMessage(\"why is 42 always the answer?\"),\n",
" new AIMessage(\n",
" \"Because it's the only number that's constantly right, even when it doesn't add up!\"\n",
" ),\n",
" new HumanMessage(\"What did the cow say?\"),\n",
"]"
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "6442f74b-2c36-48fd-a3d1-c7c5d18c050f",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"SystemMessage {\n",
" \"content\": \"you're a good assistant, you always respond with a joke.\",\n",
" \"additional_kwargs\": {},\n",
" \"response_metadata\": {}\n",
"}\n",
"HumanMessage {\n",
" \"content\": \"and who is harrison chasing anyways\",\n",
" \"additional_kwargs\": {},\n",
" \"response_metadata\": {}\n",
"}\n",
"AIMessage {\n",
" \"content\": \"Hmmm let me think.\\n\\nWhy, he's probably chasing after the last cup of coffee in the office!\",\n",
" \"additional_kwargs\": {},\n",
" \"response_metadata\": {},\n",
" \"tool_calls\": [],\n",
" \"invalid_tool_calls\": []\n",
"}\n",
"HumanMessage {\n",
" \"content\": \"why is 42 always the answer?\",\n",
" \"additional_kwargs\": {},\n",
" \"response_metadata\": {}\n",
"}\n",
"AIMessage {\n",
" \"content\": \"Because it's the only number that's constantly right, even when it doesn't add up!\",\n",
" \"additional_kwargs\": {},\n",
" \"response_metadata\": {},\n",
" \"tool_calls\": [],\n",
" \"invalid_tool_calls\": []\n",
"}\n",
"HumanMessage {\n",
" \"content\": \"What did the cow say?\",\n",
" \"additional_kwargs\": {},\n",
" \"response_metadata\": {}\n",
"}\n"
]
}
],
"source": [
"import { trimMessages } from \"@langchain/core/messages\";\n",
"import { ChatOpenAI } from \"@langchain/openai\";\n",
"\n",
"const selectedMessages = await trimMessages(\n",
" messages,\n",
" {\n",
" // Please see API reference for trimMessages for other ways to specify a token counter.\n",
" tokenCounter: new ChatOpenAI({ model: \"gpt-4o\" }),\n",
" maxTokens: 80, // <-- token limit\n",
" // The startOn is specified\n",
" // to make sure we do not generate a sequence where\n",
" // a ToolMessage that contains the result of a tool invocation\n",
" // appears before the AIMessage that requested a tool invocation\n",
" // as this will cause some chat models to raise an error.\n",
" startOn: \"human\",\n",
" strategy: \"last\",\n",
" includeSystem: true, // <-- Keep the system message\n",
" }\n",
")\n",
"\n",
"for (const msg of selectedMessages) {\n",
" console.log(msg);\n",
"}"
]
},
{
"cell_type": "markdown",
"id": "0f05d272-2d22-44b7-9fa6-e617a48584b4",
"metadata": {},
"source": [
"```{=mdx}\n",
"</details>\n",
"```\n",
"\n",
"## Modern usage with LangGraph\n",
| |
145249
|
# INVALID_PROMPT_INPUT
A [prompt template](/docs/concepts#prompt-templates) received missing or invalid input variables.
One unexpected way this can occur is if you add a JSON object directly into a prompt template:
```ts
import { PromptTemplate } from "@langchain/core/prompts";
import { ChatOpenAI } from "@langchain/openai";
const prompt = PromptTemplate.fromTemplate(`You are a helpful assistant.
Here is an example of how you should respond:
{
"firstName": "John",
"lastName": "Doe",
"age": 21
}
Now, answer the following question:
{question}`);
```
You might think that the above prompt template should require a single input key named `question`, but the JSON object will be
interpreted as an additional variable because the curly braces (`{`) are not escaped, and should be preceded by a second brace instead, like this:
```ts
import { PromptTemplate } from "@langchain/core/prompts";
import { ChatOpenAI } from "@langchain/openai";
const prompt = PromptTemplate.fromTemplate(`You are a helpful assistant.
Here is an example of how you should respond:
{{
"firstName": "John",
"lastName": "Doe",
"age": 21
}}
Now, answer the following question:
{question}`);
```
## Troubleshooting
The following may help resolve this error:
- Double-check your prompt template to ensure that it is correct.
- If you are using default formatting and you are using curly braces `{` anywhere in your template, they should be double escaped like this: `{{`, as shown above.
- If you are using a [`MessagesPlaceholder`](/docs/concepts/#messagesplaceholder), make sure that you are passing in an array of messages or message-like objects.
- If you are using shorthand tuples to declare your prompt template, make sure that the variable name is wrapped in curly braces (`["placeholder", "{messages}"]`).
- Try viewing the inputs into your prompt template using [LangSmith](https://docs.smith.langchain.com/) or log statements to confirm they appear as expected.
- If you are pulling a prompt from the [LangChain Prompt Hub](https://smith.langchain.com/prompts), try pulling and logging it or running it in isolation with a sample input to confirm that it is what you expect.
| |
145251
|
# MESSAGE_COERCION_FAILURE
Several modules in LangChain take [`MessageLike`](https://api.js.langchain.com/types/_langchain_core.messages.BaseMessageLike.html)
objects in place of formal [`BaseMessage`](/docs/concepts#message-types) classes. These include OpenAI style message objects (`{ role: "user", content: "Hello world!" }`),
tuples, and plain strings (which are converted to [`HumanMessages`](/docs/concepts#humanmessage)).
If one of these modules receives a value outside of one of these formats, you will receive an error like the following:
```ts
const badlyFormattedMessageObject = {
role: "foo",
randomNonContentValue: "bar",
};
await model.invoke([badlyFormattedMessageObject]);
```
```
Error: Unable to coerce message from array: only human, AI, system, or tool message coercion is currently supported.
Received: {
"role": "foo",
"randomNonContentValue": "bar",
}
```
## Troubleshooting
The following may help resolve this error:
- Ensure that all inputs to chat models are an array of LangChain message classes or a supported message-like.
- Check that there is no stringification or other unexpected transformation occuring.
- Check the error's stack trace and add log or debugger statements.
| |
145258
|
# OUTPUT_PARSING_FAILURE
An [output parser](/docs/concepts#output-parsers) was unable to handle model output as expected.
To illustrate this, let's say you have an output parser that expects a chat model to output JSON surrounded by a markdown code tag (triple backticks). Here would be an example of good input:
````ts
AIMessage {
content: "```\n{\"foo\": \"bar\"}\n```"
}
````
Internally, our output parser might try to strip out the markdown fence and newlines and then run `JSON.parse()`.
If instead the chat model generated an output with malformed JSON like this:
````ts
AIMessage {
content: "```\n{\"foo\":\n```"
}
````
When our output parser attempts to parse this, the `JSON.parse()` call will fail.
Note that some prebuilt constructs like [legacy LangChain agents](/docs/how_to/agent_executor) and chains may use output parsers internally,
so you may see this error even if you're not visibly instantiating and using an output parser.
## Troubleshooting
The following may help resolve this error:
- Consider using [tool calling or other structured output techniques](/docs/how_to/structured_output/) if possible without an output parser to reliably output parseable values.
- If you are using a prebuilt chain or agent, use [LangGraph](https://langchain-ai.github.io/langgraphjs/) to compose your logic explicitly instead.
- Add more precise formatting instructions to your prompt. In the above example, adding `"You must always return valid JSON fenced by a markdown code block. Do not return any additional text."` to your input may help steer the model to returning the expected format.
- If you are using a smaller or less capable model, try using a more capable one.
- Add [LLM-powered retries](/docs/how_to/output_parser_fixing/).
| |
145259
|
# MODEL_AUTHENTICATION
Your model provider is denying you access to their service.
## Troubleshooting
The following may help resolve this error:
- Confirm that your API key or other credentials are correct.
- If you are relying on an environment variable to authenticate, confirm that the variable name is correct and that it has a value set.
- Note that some environments, like Cloudflare Workers, do not support environment variables.
- For some models, you can try explicitly passing an `apiKey` parameter to rule out any environment variable issues like this:
```ts
const model = new ChatOpenAI({
apiKey: "YOUR_KEY_HERE",
});
```
- If you are using a proxy or other custom endpoint, make sure that your custom provider does not expect an alternative authentication scheme.
| |
145262
|
# Few Shot Prompt Templates
Few shot prompting is a prompting technique which provides the Large Language Model (LLM) with a list of examples, and then asks the LLM to generate some text following the lead of the examples provided.
An example of this is the following:
Say you want your LLM to respond in a specific format. You can few shot prompt the LLM with a list of question answer pairs so it knows what format to respond in.
```txt
Respond to the users question in the with the following format:
Question: What is your name?
Answer: My name is John.
Question: What is your age?
Answer: I am 25 years old.
Question: What is your favorite color?
Answer:
```
Here we left the last `Answer:` undefined so the LLM can fill it in. The LLM will then generate the following:
```txt
Answer: I don't have a favorite color; I don't have preferences.
```
### Use Case
In the following example we're few shotting the LLM to rephrase questions into more general queries.
We provide two sets of examples with specific questions, and rephrased general questions. The `FewShotChatMessagePromptTemplate` will use our examples and when `.format` is called, we'll see those examples formatted into a string we can pass to the LLM.
```typescript
import {
ChatPromptTemplate,
FewShotChatMessagePromptTemplate,
} from "langchain/prompts";
```
```typescript
const examples = [
{
input: "Could the members of The Police perform lawful arrests?",
output: "what can the members of The Police do?",
},
{
input: "Jan Sindel's was born in what country?",
output: "what is Jan Sindel's personal history?",
},
];
const examplePrompt = ChatPromptTemplate.fromTemplate(`Human: {input}
AI: {output}`);
const fewShotPrompt = new FewShotChatMessagePromptTemplate({
examplePrompt,
examples,
inputVariables: [], // no input variables
});
```
```typescript
const formattedPrompt = await fewShotPrompt.format({});
console.log(formattedPrompt);
```
```typescript
[
HumanMessage {
lc_namespace: [ 'langchain', 'schema' ],
content: 'Human: Could the members of The Police perform lawful arrests?\n' +
'AI: what can the members of The Police do?',
additional_kwargs: {}
},
HumanMessage {
lc_namespace: [ 'langchain', 'schema' ],
content: "Human: Jan Sindel's was born in what country?\n" +
"AI: what is Jan Sindel's personal history?",
additional_kwargs: {}
}
]
```
Then, if we use this with another question, the LLM will rephrase the question how we want.
import IntegrationInstallTooltip from "@mdx_components/integration_install_tooltip.mdx";
<IntegrationInstallTooltip></IntegrationInstallTooltip>
```bash npm2yarn
npm install @langchain/openai @langchain/core
```
```typescript
import { ChatOpenAI } from "@langchain/openai";
```
```typescript
const model = new ChatOpenAI({});
const examples = [
{
input: "Could the members of The Police perform lawful arrests?",
output: "what can the members of The Police do?",
},
{
input: "Jan Sindel's was born in what country?",
output: "what is Jan Sindel's personal history?",
},
];
const examplePrompt = ChatPromptTemplate.fromTemplate(`Human: {input}
AI: {output}`);
const fewShotPrompt = new FewShotChatMessagePromptTemplate({
prefix:
"Rephrase the users query to be more general, using the following examples",
suffix: "Human: {input}",
examplePrompt,
examples,
inputVariables: ["input"],
});
const formattedPrompt = await fewShotPrompt.format({
input: "What's France's main city?",
});
const response = await model.invoke(formattedPrompt);
console.log(response);
```
```typescript
AIMessage {
lc_namespace: [ 'langchain', 'schema' ],
content: 'What is the capital of France?',
additional_kwargs: { function_call: undefined }
}
```
### Few Shotting With Functions
You can also partial with a function. The use case for this is when you have a variable you know that you always want to fetch in a common way. A prime example of this is with date or time. Imagine you have a prompt which you always want to have the current date. You can't hard code it in the prompt, and passing it along with the other input variables can be tedious. In this case, it's very handy to be able to partial the prompt with a function that always returns the current date.
```typescript
const getCurrentDate = () => {
return new Date().toISOString();
};
const prompt = new FewShotChatMessagePromptTemplate({
template: "Tell me a {adjective} joke about the day {date}",
inputVariables: ["adjective", "date"],
});
const partialPrompt = await prompt.partial({
date: getCurrentDate,
});
const formattedPrompt = await partialPrompt.format({
adjective: "funny",
});
console.log(formattedPrompt);
// Tell me a funny joke about the day 2023-07-13T00:54:59.287Z
```
### Few Shot vs Chat Few Shot
The chat and non chat few shot prompt templates act in a similar way. The below example will demonstrate using chat and non chat, and the differences with their outputs.
```typescript
import {
FewShotPromptTemplate,
FewShotChatMessagePromptTemplate,
} from "langchain/prompts";
```
```typescript
const examples = [
{
input: "Could the members of The Police perform lawful arrests?",
output: "what can the members of The Police do?",
},
{
input: "Jan Sindel's was born in what country?",
output: "what is Jan Sindel's personal history?",
},
];
const prompt = `Human: {input}
AI: {output}`;
const examplePromptTemplate = PromptTemplate.fromTemplate(prompt);
const exampleChatPromptTemplate = ChatPromptTemplate.fromTemplate(prompt);
const chatFewShotPrompt = new FewShotChatMessagePromptTemplate({
examplePrompt: exampleChatPromptTemplate,
examples,
inputVariables: [], // no input variables
});
const fewShotPrompt = new FewShotPromptTemplate({
examplePrompt: examplePromptTemplate,
examples,
inputVariables: [], // no input variables
});
```
```typescript
console.log("Chat Few Shot: ", await chatFewShotPrompt.formatMessages({}));
/**
Chat Few Shot: [
HumanMessage {
lc_namespace: [ 'langchain', 'schema' ],
content: 'Human: Could the members of The Police perform lawful arrests?\n' +
'AI: what can the members of The Police do?',
additional_kwargs: {}
},
HumanMessage {
lc_namespace: [ 'langchain', 'schema' ],
content: "Human: Jan Sindel's was born in what country?\n" +
"AI: what is Jan Sindel's personal history?",
additional_kwargs: {}
}
]
*/
```
```typescript
console.log("Few Shot: ", await fewShotPrompt.formatPromptValue({}));
/**
Few Shot:
Human: Could the members of The Police perform lawful arrests?
AI: what can the members of The Police do?
Human: Jan Sindel's was born in what country?
AI: what is Jan Sindel's personal history?
*/
```
Here we can see the main distinctions between `FewShotChatMessagePromptTemplate` and `FewShotPromptTemplate`: input and output values.
`FewShotChatMessagePromptTemplate` works by taking in a list of `ChatPromptTemplate` for examples, and its output is a list of instances of `BaseMessage`.
On the other hand, `FewShotPromptTemplate` works by taking in a `PromptTemplate` for examples, and its output is a string.
## With Non Chat Models
LangChain also provides a class for few shot prompt formatting for non chat models: `FewShotPromptTemplate`. The API is largely the same, but the output is formatted differently (chat messages vs strings).
### Partials With Functions
```typescript
import {
ChatPromptTemplate,
FewShotChatMessagePromptTemplate,
} from "langchain/prompts";
```
```typescript
const examplePrompt = PromptTemplate.fromTemplate("{foo}{bar}");
const prompt = new FewShotPromptTemplate({
prefix: "{foo}{bar}",
examplePrompt,
inputVariables: ["foo", "bar"],
});
const partialPrompt = await prompt.partial({
foo: () => Promise.resolve("boo"),
});
const formatted = await partialPrompt.format({ bar: "baz" });
console.log(formatted);
```
```txt
boobaz\n
```
### With Functions and Example Selector
```typescript
import {
ChatPromptTemplate,
FewShotChatMessagePromptTemplate,
} from "langchain/prompts";
```
| |
145263
|
```typescript
const examplePrompt = PromptTemplate.fromTemplate("An example about {x}");
const exampleSelector = await LengthBasedExampleSelector.fromExamples(
[{ x: "foo" }, { x: "bar" }],
{ examplePrompt, maxLength: 200 }
);
const prompt = new FewShotPromptTemplate({
prefix: "{foo}{bar}",
exampleSelector,
examplePrompt,
inputVariables: ["foo", "bar"],
});
const partialPrompt = await prompt.partial({
foo: () => Promise.resolve("boo"),
});
const formatted = await partialPrompt.format({ bar: "baz" });
console.log(formatted);
```
```txt
boobaz
An example about foo
An example about bar
```
| |
145278
|
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# How to do retrieval\n",
"\n",
":::info Prerequisites\n",
"\n",
"This guide assumes familiarity with the following:\n",
"\n",
"- [Chatbots](/docs/tutorials/chatbot)\n",
"- [Retrieval-augmented generation](/docs/tutorials/rag)\n",
"\n",
":::\n",
"\n",
"Retrieval is a common technique chatbots use to augment their responses with data outside a chat model’s training data. This section will cover how to implement retrieval in the context of chatbots, but it’s worth noting that retrieval is a very subtle and deep topic.\n",
"\n",
"## Setup\n",
"\n",
"You’ll need to install a few packages, and set any LLM API keys:\n",
"\n",
"```{=mdx}\n",
"import Npm2Yarn from \"@theme/Npm2Yarn\";\n",
"\n",
"<Npm2Yarn>\n",
" @langchain/openai @langchain/core cheerio\n",
"</Npm2Yarn>\n",
"```\n",
"\n",
"Let’s also set up a chat model that we’ll use for the below examples.\n",
"\n",
"```{=mdx}\n",
"import ChatModelTabs from \"@theme/ChatModelTabs\";\n",
"\n",
"<ChatModelTabs customVarName=\"llm\" />\n",
"```"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"// @lc-docs-hide-cell\n",
"\n",
"import { ChatOpenAI } from \"@langchain/openai\";\n",
"\n",
"const llm = new ChatOpenAI({\n",
" model: \"gpt-4o\",\n",
" temperature: 0,\n",
"});"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Creating a retriever\n",
"\n",
"We’ll use [the LangSmith documentation](https://docs.smith.langchain.com) as source material and store the content in a vectorstore for later retrieval. Note that this example will gloss over some of the specifics around parsing and storing a data source - you can see more [in-depth documentation on creating retrieval systems here](/docs/how_to/#qa-with-rag).\n",
"\n",
"Let’s use a document loader to pull text from the docs:"
]
},
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"\u001b[33m36687\u001b[39m"
]
},
"execution_count": 1,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"import \"cheerio\";\n",
"import { CheerioWebBaseLoader } from \"@langchain/community/document_loaders/web/cheerio\";\n",
"\n",
"const loader = new CheerioWebBaseLoader(\n",
" \"https://docs.smith.langchain.com/user_guide\"\n",
");\n",
"\n",
"const rawDocs = await loader.load();\n",
"\n",
"rawDocs[0].pageContent.length;"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Next, we split it into smaller chunks that the LLM’s context window can handle and store it in a vector database:"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [],
"source": [
"import { RecursiveCharacterTextSplitter } from \"langchain/text_splitter\";\n",
"\n",
"const textSplitter = new RecursiveCharacterTextSplitter({\n",
" chunkSize: 500,\n",
" chunkOverlap: 0,\n",
"});\n",
"\n",
"const allSplits = await textSplitter.splitDocuments(rawDocs);"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Then we embed and store those chunks in a vector database:"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"outputs": [],
"source": [
"import { OpenAIEmbeddings } from \"@langchain/openai\";\n",
"import { MemoryVectorStore } from \"langchain/vectorstores/memory\";\n",
"\n",
"const vectorstore = await MemoryVectorStore.fromDocuments(\n",
" allSplits,\n",
" new OpenAIEmbeddings()\n",
");"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"And finally, let’s create a retriever from our initialized vectorstore:"
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"[\n",
" Document {\n",
" pageContent: \"These test cases can be uploaded in bulk, created on the fly, or exported from application traces. L\"... 294 more characters,\n",
" metadata: {\n",
" source: \"https://docs.smith.langchain.com/user_guide\",\n",
" loc: { lines: { from: 7, to: 7 } }\n",
" }\n",
" },\n",
" Document {\n",
" pageContent: \"We provide native rendering of chat messages, functions, and retrieve documents.Initial Test SetWhi\"... 347 more characters,\n",
" metadata: {\n",
" source: \"https://docs.smith.langchain.com/user_guide\",\n",
" loc: { lines: { from: 6, to: 6 } }\n",
" }\n",
" },\n",
" Document {\n",
" pageContent: \"will help in curation of test cases that can help track regressions/improvements and development of \"... 393 more characters,\n",
" metadata: {\n",
" source: \"https://docs.smith.langchain.com/user_guide\",\n",
" loc: { lines: { from: 11, to: 11 } }\n",
" }\n",
" },\n",
" Document {\n",
" pageContent: \"that time period — this is especially handy for debugging production issues.LangSmith also allows fo\"... 396 more characters,\n",
" metadata: {\n",
" source: \"https://docs.smith.langchain.com/user_guide\",\n",
" loc: { lines: { from: 11, to: 11 } }\n",
" }\n",
" }\n",
"]\n"
]
}
],
"source": [
"const retriever = vectorstore.asRetriever(4);\n",
"\n",
"const docs = await retriever.invoke(\"how can langsmith help with testing?\");\n",
"\n",
"console.log(docs);"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"We can see that invoking the retriever above results in some parts of the LangSmith docs that contain information about testing that our chatbot can use as context when answering questions. And now we’ve got a retriever that can return related data from the LangSmith docs!\n",
"\n",
"## Document chains\n",
"\n",
"Now that we have a retriever that can return LangChain docs, let’s create a chain that can use them as context to answer questions. We’ll use a `createStuffDocumentsChain` helper function to \"stuff\" all of the input documents into the prompt. It will also handle formatting the docs as strings.\n",
"\n",
"In addition to a chat model, the function also expects a prompt that has a `context` variable, as well as a placeholder for chat history messages named `messages`. We’ll create an appropriate prompt and pass it as shown below:"
]
},
{
"cell_type": "code",
"execution_count": 5,
"metadata": {},
"outputs": [],
"source": [
"import { createStuffDocumentsChain } from \"langchain/chains/combine_documents\";\n",
| |
145286
|
{
"cells": [
{
"cell_type": "raw",
"id": "8165bd4c",
"metadata": {
"vscode": {
"languageId": "raw"
}
},
"source": [
"---\n",
"keywords: [memory]\n",
"---"
]
},
{
"cell_type": "markdown",
"id": "f47033eb",
"metadata": {},
"source": [
"# How to add message history\n",
"\n",
":::info Prerequisites\n",
"\n",
"This guide assumes familiarity with the following concepts:\n",
"\n",
"- [Chaining runnables](/docs/how_to/sequence/)\n",
"- [Prompt templates](/docs/concepts/#prompt-templates)\n",
"- [Chat Messages](/docs/concepts/#message-types)\n",
"\n",
":::\n",
"\n",
"```{=mdx}\n",
":::note\n",
"\n",
"This guide previously covered the [RunnableWithMessageHistory](https://api.js.langchain.com/classes/_langchain_core.runnables.RunnableWithMessageHistory.html) abstraction. You can access this version of the guide in the [v0.2 docs](https://js.langchain.com/v0.2/docs/how_to/message_history/).\n",
"\n",
"The LangGraph implementation offers a number of advantages over `RunnableWithMessageHistory`, including the ability to persist arbitrary components of an application's state (instead of only messages).\n",
"\n",
":::\n",
"```\n",
"\n",
"\n",
"Passing conversation state into and out a chain is vital when building a chatbot. LangGraph implements a built-in persistence layer, allowing chain states to be automatically persisted in memory, or external backends such as SQLite, Postgres or Redis. Details can be found in the LangGraph persistence documentation.\n",
"\n",
"In this guide we demonstrate how to add persistence to arbitrary LangChain runnables by wrapping them in a minimal LangGraph application. This lets us persist the message history and other elements of the chain's state, simplifying the development of multi-turn applications. It also supports multiple threads, enabling a single application to interact separately with multiple users.\n",
"\n",
"## Setup\n",
"\n",
"```{=mdx}\n",
"import Npm2Yarn from \"@theme/Npm2Yarn\";\n",
"\n",
"<Npm2Yarn>\n",
" @langchain/core @langchain/langgraph\n",
"</Npm2Yarn>\n",
"```\n",
"\n",
"Let’s also set up a chat model that we’ll use for the below examples.\n",
"\n",
"```{=mdx}\n",
"import ChatModelTabs from \"@theme/ChatModelTabs\";\n",
"\n",
"<ChatModelTabs customVarName=\"llm\" />\n",
"```\n"
]
},
{
"cell_type": "code",
"execution_count": 30,
"id": "8a4e4708",
"metadata": {},
"outputs": [],
"source": [
"// @lc-docs-hide-cell\n",
"\n",
"import { ChatOpenAI } from \"@langchain/openai\";\n",
"\n",
"const llm = new ChatOpenAI({\n",
" model: \"gpt-4o\",\n",
" temperature: 0,\n",
"});"
]
},
{
"cell_type": "markdown",
"id": "1f6121bc-2080-4ccc-acf0-f77de4bc951d",
"metadata": {},
"source": [
"## Example: message inputs\n",
"\n",
"Adding memory to a [chat model](/docs/concepts/#chat-models) provides a simple example. Chat models accept a list of messages as input and output a message. LangGraph includes a built-in `MessagesState` that we can use for this purpose.\n",
"\n",
"Below, we:\n",
"1. Define the graph state to be a list of messages;\n",
"2. Add a single node to the graph that calls a chat model;\n",
"3. Compile the graph with an in-memory checkpointer to store messages between runs.\n",
"\n",
":::info\n",
"\n",
"The output of a LangGraph application is its [state](https://langchain-ai.github.io/langgraphjs/concepts/low_level/).\n",
"\n",
":::"
]
},
{
"cell_type": "code",
"execution_count": 31,
"id": "f691a73a-a866-4354-9fff-8315605e2b8f",
"metadata": {},
"outputs": [],
"source": [
"import { START, END, MessagesAnnotation, StateGraph, MemorySaver } from \"@langchain/langgraph\";\n",
"\n",
"// Define the function that calls the model\n",
"const callModel = async (state: typeof MessagesAnnotation.State) => {\n",
" const response = await llm.invoke(state.messages);\n",
" // Update message history with response:\n",
" return { messages: response };\n",
"};\n",
"\n",
"// Define a new graph\n",
"const workflow = new StateGraph(MessagesAnnotation)\n",
" // Define the (single) node in the graph\n",
" .addNode(\"model\", callModel)\n",
" .addEdge(START, \"model\")\n",
" .addEdge(\"model\", END);\n",
"\n",
"// Add memory\n",
"const memory = new MemorySaver();\n",
"const app = workflow.compile({ checkpointer: memory });"
]
},
{
"cell_type": "markdown",
"id": "c0b396a8-f81e-4139-b4b2-75adf61d8179",
"metadata": {},
"source": [
"When we run the application, we pass in a configuration object that specifies a `thread_id`. This ID is used to distinguish conversational threads (e.g., between different users)."
]
},
{
"cell_type": "code",
"execution_count": 32,
"id": "e4309511-2140-4d91-8f5f-ea3661e6d179",
"metadata": {},
"outputs": [],
"source": [
"import { v4 as uuidv4 } from \"uuid\";\n",
"\n",
"const config = { configurable: { thread_id: uuidv4() } }"
]
},
{
"cell_type": "markdown",
"id": "108c45a2-4971-4120-ba64-9a4305a414bb",
"metadata": {},
"source": [
"We can then invoke the application:"
]
},
{
"cell_type": "code",
"execution_count": 33,
"id": "72a5ff6c-501f-4151-8dd9-f600f70554be",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"AIMessage {\n",
" \"id\": \"chatcmpl-ABTqCeKnMQmG9IH8dNF5vPjsgXtcM\",\n",
" \"content\": \"Hi Bob! How can I assist you today?\",\n",
" \"additional_kwargs\": {},\n",
" \"response_metadata\": {\n",
" \"tokenUsage\": {\n",
" \"completionTokens\": 10,\n",
" \"promptTokens\": 12,\n",
" \"totalTokens\": 22\n",
" },\n",
" \"finish_reason\": \"stop\",\n",
" \"system_fingerprint\": \"fp_e375328146\"\n",
" },\n",
" \"tool_calls\": [],\n",
" \"invalid_tool_calls\": [],\n",
" \"usage_metadata\": {\n",
" \"input_tokens\": 12,\n",
" \"output_tokens\": 10,\n",
" \"total_tokens\": 22\n",
" }\n",
"}\n"
]
}
],
"source": [
"const input = [\n",
" {\n",
" role: \"user\",\n",
" content: \"Hi! I'm Bob.\",\n",
" }\n",
"]\n",
"const output = await app.invoke({ messages: input }, config)\n",
| |
145296
|
" messageLog: [\n",
" AIMessageChunk {\n",
" \"id\": \"chatcmpl-A7eziUrDmLSSMoiOskhrfbsHqx4Sd\",\n",
" \"content\": \"\",\n",
" \"additional_kwargs\": {\n",
" \"tool_calls\": [\n",
" {\n",
" \"index\": 0,\n",
" \"id\": \"call_IQZr1yy2Ug6904VkQg6pWGgR\",\n",
" \"type\": \"function\",\n",
" \"function\": \"[Object]\"\n",
" }\n",
" ]\n",
" },\n",
" \"response_metadata\": {\n",
" \"prompt\": 0,\n",
" \"completion\": 0,\n",
" \"finish_reason\": \"tool_calls\",\n",
" \"system_fingerprint\": \"fp_483d39d857\"\n",
" },\n",
" \"tool_calls\": [\n",
" {\n",
" \"name\": \"magic_function\",\n",
" \"args\": {\n",
" \"input\": 3\n",
" },\n",
" \"id\": \"call_IQZr1yy2Ug6904VkQg6pWGgR\",\n",
" \"type\": \"tool_call\"\n",
" }\n",
" ],\n",
" \"tool_call_chunks\": [\n",
" {\n",
" \"name\": \"magic_function\",\n",
" \"args\": \"{\\\"input\\\":3}\",\n",
" \"id\": \"call_IQZr1yy2Ug6904VkQg6pWGgR\",\n",
" \"index\": 0,\n",
" \"type\": \"tool_call_chunk\"\n",
" }\n",
" ],\n",
" \"invalid_tool_calls\": [],\n",
" \"usage_metadata\": {\n",
" \"input_tokens\": 61,\n",
" \"output_tokens\": 14,\n",
" \"total_tokens\": 75\n",
" }\n",
" }\n",
" ]\n",
" },\n",
" observation: \"5\"\n",
" }\n",
" ]\n",
"}\n",
"{ output: \"The value of `magic_function(3)` is 5.\" }\n"
]
}
],
"source": [
"const langChainStream = await agentExecutor.stream({ input: query });\n",
"\n",
"for await (const step of langChainStream) {\n",
" console.log(step);\n",
"}"
]
},
{
"cell_type": "markdown",
"id": "cd371818",
"metadata": {},
"source": [
"#### In LangGraph\n",
"\n",
"In LangGraph, things are handled natively using the stream method.\n"
]
},
{
"cell_type": "code",
"execution_count": 12,
"id": "2be89a30",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"{\n",
" agent: {\n",
" messages: [\n",
" AIMessage {\n",
" \"id\": \"chatcmpl-A7ezu8hirCENjdjR2GpLjkzXFTEmp\",\n",
" \"content\": \"\",\n",
" \"additional_kwargs\": {\n",
" \"tool_calls\": [\n",
" {\n",
" \"id\": \"call_KhhNL0m3mlPoJiboFMoX8hzk\",\n",
" \"type\": \"function\",\n",
" \"function\": \"[Object]\"\n",
" }\n",
" ]\n",
" },\n",
" \"response_metadata\": {\n",
" \"tokenUsage\": {\n",
" \"completionTokens\": 14,\n",
" \"promptTokens\": 55,\n",
" \"totalTokens\": 69\n",
" },\n",
" \"finish_reason\": \"tool_calls\",\n",
" \"system_fingerprint\": \"fp_483d39d857\"\n",
" },\n",
" \"tool_calls\": [\n",
" {\n",
" \"name\": \"magic_function\",\n",
" \"args\": {\n",
" \"input\": 3\n",
" },\n",
" \"type\": \"tool_call\",\n",
" \"id\": \"call_KhhNL0m3mlPoJiboFMoX8hzk\"\n",
" }\n",
" ],\n",
" \"invalid_tool_calls\": [],\n",
" \"usage_metadata\": {\n",
" \"input_tokens\": 55,\n",
" \"output_tokens\": 14,\n",
" \"total_tokens\": 69\n",
" }\n",
" }\n",
" ]\n",
" }\n",
"}\n",
"{\n",
" tools: {\n",
" messages: [\n",
" ToolMessage {\n",
" \"content\": \"5\",\n",
" \"name\": \"magic_function\",\n",
" \"additional_kwargs\": {},\n",
" \"response_metadata\": {},\n",
" \"tool_call_id\": \"call_KhhNL0m3mlPoJiboFMoX8hzk\"\n",
" }\n",
" ]\n",
" }\n",
"}\n",
"{\n",
" agent: {\n",
" messages: [\n",
" AIMessage {\n",
" \"id\": \"chatcmpl-A7ezuTrh8GC550eKa1ZqRZGjpY5zh\",\n",
" \"content\": \"The value of `magic_function(3)` is 5.\",\n",
" \"additional_kwargs\": {},\n",
" \"response_metadata\": {\n",
" \"tokenUsage\": {\n",
" \"completionTokens\": 14,\n",
" \"promptTokens\": 78,\n",
" \"totalTokens\": 92\n",
" },\n",
" \"finish_reason\": \"stop\",\n",
" \"system_fingerprint\": \"fp_483d39d857\"\n",
" },\n",
" \"tool_calls\": [],\n",
" \"invalid_tool_calls\": [],\n",
" \"usage_metadata\": {\n",
" \"input_tokens\": 78,\n",
" \"output_tokens\": 14,\n",
" \"total_tokens\": 92\n",
" }\n",
" }\n",
" ]\n",
" }\n",
"}\n"
]
}
],
"source": [
"const langGraphStream = await app.stream(\n",
" { messages: [{ role: \"user\", content: query }] },\n",
" { streamMode: \"updates\" },\n",
");\n",
"\n",
"for await (const step of langGraphStream) {\n",
" console.log(step);\n",
"}"
]
},
{
"cell_type": "markdown",
"id": "ce023792",
"metadata": {},
"source": [
"## `returnIntermediateSteps`\n",
"\n",
"Setting this parameter on AgentExecutor allows users to access\n",
"intermediate_steps, which pairs agent actions (e.g., tool invocations) with\n",
"their outcomes."
]
},
{
"cell_type": "code",
"execution_count": 13,
"id": "77ce2771",
"metadata": {
"lines_to_next_cell": 2
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"[\n",
" {\n",
" action: {\n",
" tool: \"magic_function\",\n",
" toolInput: { input: 3 },\n",
" toolCallId: \"call_mbg1xgLEYEEWClbEaDe7p5tK\",\n",
" log: 'Invoking \"magic_function\" with {\"input\":3}\\n',\n",
| |
145297
|
" messageLog: [\n",
" AIMessageChunk {\n",
" \"id\": \"chatcmpl-A7f0NdSRSUJsBP6ENTpiQD4LzpBAH\",\n",
" \"content\": \"\",\n",
" \"additional_kwargs\": {\n",
" \"tool_calls\": [\n",
" {\n",
" \"index\": 0,\n",
" \"id\": \"call_mbg1xgLEYEEWClbEaDe7p5tK\",\n",
" \"type\": \"function\",\n",
" \"function\": \"[Object]\"\n",
" }\n",
" ]\n",
" },\n",
" \"response_metadata\": {\n",
" \"prompt\": 0,\n",
" \"completion\": 0,\n",
" \"finish_reason\": \"tool_calls\",\n",
" \"system_fingerprint\": \"fp_54e2f484be\"\n",
" },\n",
" \"tool_calls\": [\n",
" {\n",
" \"name\": \"magic_function\",\n",
" \"args\": {\n",
" \"input\": 3\n",
" },\n",
" \"id\": \"call_mbg1xgLEYEEWClbEaDe7p5tK\",\n",
" \"type\": \"tool_call\"\n",
" }\n",
" ],\n",
" \"tool_call_chunks\": [\n",
" {\n",
" \"name\": \"magic_function\",\n",
" \"args\": \"{\\\"input\\\":3}\",\n",
" \"id\": \"call_mbg1xgLEYEEWClbEaDe7p5tK\",\n",
" \"index\": 0,\n",
" \"type\": \"tool_call_chunk\"\n",
" }\n",
" ],\n",
" \"invalid_tool_calls\": [],\n",
" \"usage_metadata\": {\n",
" \"input_tokens\": 61,\n",
" \"output_tokens\": 14,\n",
" \"total_tokens\": 75\n",
" }\n",
" }\n",
" ]\n",
" },\n",
" observation: \"5\"\n",
" }\n",
"]\n"
]
}
],
"source": [
"const agentExecutorWithIntermediateSteps = new AgentExecutor({\n",
" agent,\n",
" tools,\n",
" returnIntermediateSteps: true,\n",
"});\n",
"\n",
"const result = await agentExecutorWithIntermediateSteps.invoke({\n",
" input: query,\n",
"});\n",
"\n",
"console.log(result.intermediateSteps);\n"
]
},
{
"cell_type": "markdown",
"id": "050845ae",
"metadata": {},
"source": [
"By default the\n",
"[react agent executor](https://langchain-ai.github.io/langgraphjs/reference/functions/prebuilt.createReactAgent.html)\n",
"in LangGraph appends all messages to the central state. Therefore, it is easy to\n",
"see any intermediate steps by just looking at the full state.\n"
]
},
{
"cell_type": "code",
"execution_count": 14,
"id": "2f9cdfa8",
"metadata": {
"lines_to_next_cell": 2
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"[\n",
" HumanMessage {\n",
" \"id\": \"46a825b2-13a3-4f19-b1aa-7716c53eb247\",\n",
" \"content\": \"what is the value of magic_function(3)?\",\n",
" \"additional_kwargs\": {},\n",
" \"response_metadata\": {}\n",
" },\n",
" AIMessage {\n",
" \"id\": \"chatcmpl-A7f0iUuWktC8gXztWZCjofqyCozY2\",\n",
" \"content\": \"\",\n",
" \"additional_kwargs\": {\n",
" \"tool_calls\": [\n",
" {\n",
" \"id\": \"call_ndsPDU58wsMeGaqr41cSlLlF\",\n",
" \"type\": \"function\",\n",
" \"function\": \"[Object]\"\n",
" }\n",
" ]\n",
" },\n",
" \"response_metadata\": {\n",
" \"tokenUsage\": {\n",
" \"completionTokens\": 14,\n",
" \"promptTokens\": 55,\n",
" \"totalTokens\": 69\n",
" },\n",
" \"finish_reason\": \"tool_calls\",\n",
" \"system_fingerprint\": \"fp_483d39d857\"\n",
" },\n",
" \"tool_calls\": [\n",
" {\n",
" \"name\": \"magic_function\",\n",
" \"args\": {\n",
" \"input\": 3\n",
" },\n",
" \"type\": \"tool_call\",\n",
" \"id\": \"call_ndsPDU58wsMeGaqr41cSlLlF\"\n",
" }\n",
" ],\n",
" \"invalid_tool_calls\": [],\n",
" \"usage_metadata\": {\n",
" \"input_tokens\": 55,\n",
" \"output_tokens\": 14,\n",
" \"total_tokens\": 69\n",
" }\n",
" },\n",
" ToolMessage {\n",
" \"id\": \"ac6aa309-bbfb-46cd-ba27-cbdbfd848705\",\n",
" \"content\": \"5\",\n",
" \"name\": \"magic_function\",\n",
" \"additional_kwargs\": {},\n",
" \"response_metadata\": {},\n",
" \"tool_call_id\": \"call_ndsPDU58wsMeGaqr41cSlLlF\"\n",
" },\n",
" AIMessage {\n",
" \"id\": \"chatcmpl-A7f0i7iHyDUV6is6sgwtcXivmFZ1x\",\n",
" \"content\": \"The value of `magic_function(3)` is 5.\",\n",
" \"additional_kwargs\": {},\n",
" \"response_metadata\": {\n",
" \"tokenUsage\": {\n",
" \"completionTokens\": 14,\n",
" \"promptTokens\": 78,\n",
" \"totalTokens\": 92\n",
" },\n",
" \"finish_reason\": \"stop\",\n",
" \"system_fingerprint\": \"fp_54e2f484be\"\n",
" },\n",
" \"tool_calls\": [],\n",
" \"invalid_tool_calls\": [],\n",
" \"usage_metadata\": {\n",
" \"input_tokens\": 78,\n",
" \"output_tokens\": 14,\n",
" \"total_tokens\": 92\n",
" }\n",
" }\n",
"]\n"
]
}
],
"source": [
"agentOutput = await app.invoke({\n",
" messages: [\n",
" { role: \"user\", content: query },\n",
" ]\n",
"});\n",
"\n",
"console.log(agentOutput.messages);"
]
},
{
"cell_type": "markdown",
"id": "f6e671e6",
"metadata": {},
"source": [
"## `maxIterations`\n",
"\n",
"`AgentExecutor` implements a `maxIterations` parameter, whereas this is\n",
"controlled via `recursionLimit` in LangGraph.\n",
"\n",
"Note that in the LangChain `AgentExecutor`, an \"iteration\" includes a full turn of tool\n",
"invocation and execution. In LangGraph, each step contributes to the recursion\n",
"limit, so we will need to multiply by two (and add one) to get equivalent\n",
"results.\n",
"\n",
"Here's an example of how you'd set this parameter with the legacy `AgentExecutor`:"
]
},
{
| |
145302
|
"data": {
"text/plain": [
"{ input: \u001b[32m\"hi!\"\u001b[39m, output: \u001b[32m\"Hello! How can I assist you today?\"\u001b[39m }"
]
},
"execution_count": 17,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"await agentExecutor.invoke({ input: \"hi!\" })"
]
},
{
"cell_type": "markdown",
"id": "71493a42",
"metadata": {},
"source": [
"In order to see exactly what is happening under the hood (and to make sure it's not calling a tool) we can take a look at the [LangSmith trace](https://smith.langchain.com/public/b8051e80-14fd-4931-be0f-6416280bc500/r)\n",
"\n",
"Let's now try it out on an example where it should be invoking the retriever"
]
},
{
"cell_type": "code",
"execution_count": 18,
"id": "3fa4780a",
"metadata": {
"scrolled": true
},
"outputs": [
{
"data": {
"text/plain": [
"{\n",
" input: \u001b[32m\"how can langsmith help with testing?\"\u001b[39m,\n",
" output: \u001b[32m\"LangSmith can assist with testing in several ways, particularly for applications built using large l\"\u001b[39m... 1474 more characters\n",
"}"
]
},
"execution_count": 18,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"await agentExecutor.invoke({ input: \"how can langsmith help with testing?\" })"
]
},
{
"cell_type": "markdown",
"id": "f2d94242",
"metadata": {},
"source": [
"Let's take a look at the [LangSmith trace](https://smith.langchain.com/public/35bd4f0f-aa2f-4ac2-b9a9-89ce0ca306ca/r) to make sure it's actually calling that.\n",
"\n",
"Now let's try one where it needs to call the search tool:"
]
},
{
"cell_type": "code",
"execution_count": 19,
"id": "77c2f769",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"{\n",
" input: \u001b[32m\"whats the weather in sf?\"\u001b[39m,\n",
" output: \u001b[32m\"The current weather in San Francisco is as follows:\\n\"\u001b[39m +\n",
" \u001b[32m\"\\n\"\u001b[39m +\n",
" \u001b[32m\"- **Temperature**: 15.6°C (60.1°F)\\n\"\u001b[39m +\n",
" \u001b[32m\"- **Conditio\"\u001b[39m... 303 more characters\n",
"}"
]
},
"execution_count": 19,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"await agentExecutor.invoke({ input: \"whats the weather in sf?\" })"
]
},
{
"cell_type": "markdown",
"id": "c174f838",
"metadata": {},
"source": [
"We can check out the [LangSmith trace](https://smith.langchain.com/public/dfde6f46-0e7b-4dfe-813c-87d7bfb2ade5/r) to make sure it's calling the search tool effectively."
]
},
{
"cell_type": "markdown",
"id": "022cbc8a",
"metadata": {},
"source": [
"## Adding in memory\n",
"\n",
"As mentioned earlier, this agent is stateless. This means it does not remember previous interactions. To give it memory we need to pass in previous `chat_history`.\n",
"\n",
"**Note**: The input variable needs to be called `chat_history` because of the prompt we are using. If we use a different prompt, we could change the variable name."
]
},
{
"cell_type": "code",
"execution_count": 20,
"id": "c4073e35",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"{\n",
" input: \u001b[32m\"hi! my name is bob\"\u001b[39m,\n",
" chat_history: [],\n",
" output: \u001b[32m\"Hello Bob! How can I assist you today?\"\u001b[39m\n",
"}"
]
},
"execution_count": 20,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"// Here we pass in an empty list of messages for chat_history because it is the first message in the chat\n",
"await agentExecutor.invoke({ input: \"hi! my name is bob\", chat_history: [] })"
]
},
{
"cell_type": "code",
"execution_count": 21,
"id": "550e0c6e",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"{\n",
" chat_history: [\n",
" { role: \u001b[32m\"user\"\u001b[39m, content: \u001b[32m\"hi! my name is bob\"\u001b[39m },\n",
" {\n",
" role: \u001b[32m\"assistant\"\u001b[39m,\n",
" content: \u001b[32m\"Hello Bob! How can I assist you today?\"\u001b[39m\n",
" }\n",
" ],\n",
" input: \u001b[32m\"what's my name?\"\u001b[39m,\n",
" output: \u001b[32m\"Your name is Bob. How can I help you today, Bob?\"\u001b[39m\n",
"}"
]
},
"execution_count": 21,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"await agentExecutor.invoke(\n",
" {\n",
" chat_history: [\n",
" { role: \"user\", content: \"hi! my name is bob\" },\n",
" { role: \"assistant\", content: \"Hello Bob! How can I assist you today?\" },\n",
" ],\n",
" input: \"what's my name?\",\n",
" }\n",
")"
]
},
{
"cell_type": "markdown",
"id": "07b3bcf2",
"metadata": {},
"source": [
"If we want to keep track of these messages automatically, we can wrap this in a RunnableWithMessageHistory.\n",
"\n",
"Because we have multiple inputs, we need to specify two things:\n",
"\n",
"- `inputMessagesKey`: The input key to use to add to the conversation history.\n",
"- `historyMessagesKey`: The key to add the loaded messages into.\n",
"\n",
"For more information on how to use this, see [this guide](/docs/how_to/message_history). "
]
},
{
"cell_type": "code",
"execution_count": 22,
"id": "8edd96e6",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"{\n",
" input: \u001b[32m\"hi! I'm bob\"\u001b[39m,\n",
" chat_history: [\n",
" HumanMessage {\n",
" \"content\": \"hi! I'm bob\",\n",
" \"additional_kwargs\": {},\n",
" \"response_metadata\": {}\n",
" },\n",
" AIMessage {\n",
" \"content\": \"Hello Bob! How can I assist you today?\",\n",
" \"additional_kwargs\": {},\n",
" \"response_metadata\": {},\n",
| |
145306
|
{
"cells": [
{
"cell_type": "markdown",
"id": "ea37db49-d389-4291-be73-885d06c1fb7e",
"metadata": {},
"source": [
"# How to do extraction without using function calling\n",
"\n",
":::info Prerequisites\n",
"\n",
"This guide assumes familiarity with the following:\n",
"\n",
"- [Extraction](/docs/tutorials/extraction)\n",
"\n",
":::\n",
"\n",
"LLMs that are able to follow prompt instructions well can be tasked with outputting information in a given format without using function calling.\n",
"\n",
"This approach relies on designing good prompts and then parsing the output of the LLMs to make them extract information well, though it lacks some of the guarantees provided by function calling or JSON mode.\n",
"\n",
"Here, we'll use Claude which is great at following instructions! See [here for more about Anthropic models](/docs/integrations/chat/anthropic).\n",
"\n",
"First, we'll install the integration package:\n",
"\n",
"```{=mdx}\n",
"import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n",
"import Npm2Yarn from \"@theme/Npm2Yarn\";\n",
"\n",
"<IntegrationInstallTooltip></IntegrationInstallTooltip>\n",
"\n",
"<Npm2Yarn>\n",
" @langchain/anthropic @langchain/core zod zod-to-json-schema\n",
"</Npm2Yarn>\n",
"```"
]
},
{
"cell_type": "code",
"execution_count": 1,
"id": "d71b32de-a6b4-45ed-83a9-ba1925f9470c",
"metadata": {},
"outputs": [],
"source": [
"import { ChatAnthropic } from \"@langchain/anthropic\";\n",
"\n",
"const model = new ChatAnthropic({\n",
" model: \"claude-3-sonnet-20240229\",\n",
" temperature: 0,\n",
"})"
]
},
{
"cell_type": "markdown",
"id": "3e412374-3beb-4bbf-966b-400c1f66a258",
"metadata": {},
"source": [
":::{.callout-tip}\n",
"All the same considerations for extraction quality apply for parsing approach.\n",
"\n",
"This tutorial is meant to be simple, but generally should really include reference examples to squeeze out performance!\n",
":::"
]
},
{
"cell_type": "markdown",
"id": "abc1a945-0f80-4953-add4-cd572b6f2a51",
"metadata": {},
"source": [
"## Using StructuredOutputParser\n",
"\n",
"The following example uses the built-in [`StructuredOutputParser`](/docs/how_to/output_parser_structured/) to parse the output of a chat model. We use the built-in prompt formatting instructions contained in the parser."
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "497eb023-c043-443d-ac62-2d4ea85fe1b0",
"metadata": {},
"outputs": [],
"source": [
"import { z } from \"zod\";\n",
"import { StructuredOutputParser } from \"langchain/output_parsers\";\n",
"import { ChatPromptTemplate } from \"@langchain/core/prompts\";\n",
"\n",
"let personSchema = z.object({\n",
" name: z.optional(z.string()).describe(\"The name of the person\"),\n",
" hair_color: z.optional(z.string()).describe(\"The color of the person's hair, if known\"),\n",
" height_in_meters: z.optional(z.string()).describe(\"Height measured in meters\")\n",
"}).describe(\"Information about a person.\");\n",
"\n",
"const parser = StructuredOutputParser.fromZodSchema(personSchema);\n",
"\n",
"const prompt = ChatPromptTemplate.fromMessages([\n",
" [\"system\", \"Answer the user query. Wrap the output in `json` tags\\n{format_instructions}\"],\n",
" [\"human\", \"{query}\"],\n",
"]);\n",
"\n",
"const partialedPrompt = await prompt.partial({\n",
" format_instructions: parser.getFormatInstructions(),\n",
"});"
]
},
{
"cell_type": "markdown",
"id": "c31aa2c8-05a9-4a12-80c5-ea1250dea0ae",
"metadata": {},
"source": [
"Let's take a look at what information is sent to the model"
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "20b99ffb-a114-49a9-a7be-154c525f8ada",
"metadata": {},
"outputs": [],
"source": [
"const query = \"Anna is 23 years old and she is 6 feet tall\";"
]
},
{
"cell_type": "code",
"execution_count": 4,
"id": "4f3a66ce-de19-4571-9e54-67504ae3fba7",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"[\n",
" SystemMessage {\n",
" lc_serializable: true,\n",
" lc_kwargs: {\n",
" content: \"Answer the user query. Wrap the output in `json` tags\\n\" +\n",
" \"You must format your output as a JSON value th\"... 1444 more characters,\n",
" additional_kwargs: {}\n",
" },\n",
" lc_namespace: [ \"langchain_core\", \"messages\" ],\n",
" content: \"Answer the user query. Wrap the output in `json` tags\\n\" +\n",
" \"You must format your output as a JSON value th\"... 1444 more characters,\n",
" name: undefined,\n",
" additional_kwargs: {}\n",
" },\n",
" HumanMessage {\n",
" lc_serializable: true,\n",
" lc_kwargs: {\n",
" content: \"Anna is 23 years old and she is 6 feet tall\",\n",
" additional_kwargs: {}\n",
" },\n",
" lc_namespace: [ \"langchain_core\", \"messages\" ],\n",
" content: \"Anna is 23 years old and she is 6 feet tall\",\n",
" name: undefined,\n",
" additional_kwargs: {}\n",
" }\n",
"]\n"
]
}
],
"source": [
"const promptValue = await partialedPrompt.invoke({ query });\n",
"\n",
"console.log(promptValue.toChatMessages());"
]
},
{
"cell_type": "code",
"execution_count": 5,
"id": "3a46b5fd-9242-4b8c-a4e2-3f04fc19b3a4",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"{ name: \u001b[32m\"Anna\"\u001b[39m, hair_color: \u001b[32m\"\"\u001b[39m, height_in_meters: \u001b[32m\"1.83\"\u001b[39m }"
]
},
"execution_count": 5,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"const chain = partialedPrompt.pipe(model).pipe(parser);\n",
"\n",
"await chain.invoke({ query });"
]
},
{
"cell_type": "markdown",
"id": "815b3b87-3bc6-4b56-835e-c6b6703cef5d",
"metadata": {},
"source": [
"## Custom Parsing\n",
"\n",
"You can also create a custom prompt and parser with `LangChain` and `LCEL`.\n",
"\n",
"You can use a raw function to parse the output from the model.\n",
"\n",
| |
145308
|
{
"cells": [
{
"cell_type": "markdown",
"id": "c3ee8d00",
"metadata": {},
"source": [
"# How to split by character\n",
"\n",
":::info Prerequisites\n",
"\n",
"This guide assumes familiarity with the following concepts:\n",
"\n",
"- [Text splitters](/docs/concepts#text-splitters)\n",
"\n",
":::\n",
"\n",
"This is the simplest method for splitting text. This splits based on a given character sequence, which defaults to `\"\\n\\n\"`. Chunk length is measured by number of characters.\n",
"\n",
"1. How the text is split: by single character separator.\n",
"2. How the chunk size is measured: by number of characters.\n",
"\n",
"To obtain the string content directly, use `.splitText()`.\n",
"\n",
"To create LangChain [Document](https://api.js.langchain.com/classes/langchain_core.documents.Document.html) objects (e.g., for use in downstream tasks), use `.createDocuments()`."
]
},
{
"cell_type": "code",
"execution_count": 1,
"id": "313fb032",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Document {\n",
" pageContent: \"Madam Speaker, Madam Vice President, our First Lady and Second Gentleman. Members of Congress and th\"... 839 more characters,\n",
" metadata: { loc: { lines: { from: 1, to: 17 } } }\n",
"}\n"
]
}
],
"source": [
"import { CharacterTextSplitter } from \"@langchain/textsplitters\";\n",
"import * as fs from \"node:fs\";\n",
"\n",
"// Load an example document\n",
"const rawData = await fs.readFileSync(\"../../../../examples/state_of_the_union.txt\");\n",
"const stateOfTheUnion = rawData.toString();\n",
"\n",
"const textSplitter = new CharacterTextSplitter({\n",
" separator: \"\\n\\n\",\n",
" chunkSize: 1000,\n",
" chunkOverlap: 200,\n",
"});\n",
"const texts = await textSplitter.createDocuments([stateOfTheUnion]);\n",
"console.log(texts[0])"
]
},
{
"cell_type": "markdown",
"id": "dadcb9d6",
"metadata": {},
"source": [
"You can also propagate metadata associated with each document to the output chunks:"
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "1affda60",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Document {\n",
" pageContent: \"Madam Speaker, Madam Vice President, our First Lady and Second Gentleman. Members of Congress and th\"... 839 more characters,\n",
" metadata: { document: 1, loc: { lines: { from: 1, to: 17 } } }\n",
"}\n"
]
}
],
"source": [
"const metadatas = [{ document: 1 }, { document: 2 }];\n",
"\n",
"const documents = await textSplitter.createDocuments(\n",
" [stateOfTheUnion, stateOfTheUnion], metadatas\n",
")\n",
"\n",
"console.log(documents[0])"
]
},
{
"cell_type": "markdown",
"id": "ee080e12-6f44-4311-b1ef-302520a41d66",
"metadata": {},
"source": [
"To obtain the string content directly, use `.splitText()`:"
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "2a830a9f",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"\u001b[32m\"Madam Speaker, Madam Vice President, our First Lady and Second Gentleman. Members of Congress and th\"\u001b[39m... 839 more characters"
]
},
"execution_count": 3,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"const chunks = await textSplitter.splitText(stateOfTheUnion);\n",
"\n",
"chunks[0];"
]
},
{
"cell_type": "markdown",
"id": "cd4dd67a",
"metadata": {},
"source": [
"## Next steps\n",
"\n",
"You've now learned a method for splitting text by character.\n",
"\n",
"Next, check out a [more advanced way of splitting by character](/docs/how_to/recursive_text_splitter), or the [full tutorial on retrieval-augmented generation](/docs/tutorials/rag)."
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Deno",
"language": "typescript",
"name": "deno"
},
"language_info": {
"file_extension": ".ts",
"mimetype": "text/x.typescript",
"name": "typescript",
"nb_converter": "script",
"pygments_lexer": "typescript",
"version": "5.3.3"
}
},
"nbformat": 4,
"nbformat_minor": 5
}
| |
145312
|
```bash
[chain/start] [1:chain:AgentExecutor] Entering Chain run with input: {
"input": "Who directed the 2023 film Oppenheimer and what is their age? What is their age in days (assume 365 days per year)?"
}
[chain/start] [1:chain:AgentExecutor > 2:chain:ToolCallingAgent] Entering Chain run with input: {
"input": "Who directed the 2023 film Oppenheimer and what is their age? What is their age in days (assume 365 days per year)?",
"steps": []
}
[chain/start] [1:chain:AgentExecutor > 2:chain:ToolCallingAgent > 3:chain:RunnableAssign] Entering Chain run with input: {
"input": ""
}
[chain/start] [1:chain:AgentExecutor > 2:chain:ToolCallingAgent > 3:chain:RunnableAssign > 4:chain:RunnableMap] Entering Chain run with input: {
"input": ""
}
[chain/start] [1:chain:AgentExecutor > 2:chain:ToolCallingAgent > 3:chain:RunnableAssign > 4:chain:RunnableMap > 5:chain:RunnableLambda] Entering Chain run with input: {
"input": ""
}
[chain/end] [1:chain:AgentExecutor > 2:chain:ToolCallingAgent > 3:chain:RunnableAssign > 4:chain:RunnableMap > 5:chain:RunnableLambda] [0ms] Exiting Chain run with output: {
"output": []
}
[chain/end] [1:chain:AgentExecutor > 2:chain:ToolCallingAgent > 3:chain:RunnableAssign > 4:chain:RunnableMap] [1ms] Exiting Chain run with output: {
"agent_scratchpad": []
}
[chain/end] [1:chain:AgentExecutor > 2:chain:ToolCallingAgent > 3:chain:RunnableAssign] [1ms] Exiting Chain run with output: {
"input": "Who directed the 2023 film Oppenheimer and what is their age? What is their age in days (assume 365 days per year)?",
"steps": [],
"agent_scratchpad": []
}
[chain/start] [1:chain:AgentExecutor > 2:chain:ToolCallingAgent > 6:prompt:ChatPromptTemplate] Entering Chain run with input: {
"input": "Who directed the 2023 film Oppenheimer and what is their age? What is their age in days (assume 365 days per year)?",
"steps": [],
"agent_scratchpad": []
}
[chain/end] [1:chain:AgentExecutor > 2:chain:ToolCallingAgent > 6:prompt:ChatPromptTemplate] [0ms] Exiting Chain run with output: {
"lc": 1,
"type": "constructor",
"id": [
"langchain_core",
"prompt_values",
"ChatPromptValue"
],
"kwargs": {
"messages": [
{
"lc": 1,
"type": "constructor",
"id": [
"langchain_core",
"messages",
"SystemMessage"
],
"kwargs": {
"content": "You are a helpful assistant",
"additional_kwargs": {},
"response_metadata": {}
}
},
{
"lc": 1,
"type": "constructor",
"id": [
"langchain_core",
"messages",
"HumanMessage"
],
"kwargs": {
"content": "Who directed the 2023 film Oppenheimer and what is their age? What is their age in days (assume 365 days per year)?",
"additional_kwargs": {},
"response_metadata": {}
}
}
]
}
}
[llm/start] [1:chain:AgentExecutor > 2:chain:ToolCallingAgent > 7:llm:ChatAnthropic] Entering LLM run with input: {
"messages": [
[
{
"lc": 1,
"type": "constructor",
"id": [
"langchain_core",
"messages",
"SystemMessage"
],
"kwargs": {
"content": "You are a helpful assistant",
"additional_kwargs": {},
"response_metadata": {}
}
},
{
"lc": 1,
"type": "constructor",
"id": [
"langchain_core",
"messages",
"HumanMessage"
],
"kwargs": {
"content": "Who directed the 2023 film Oppenheimer and what is their age? What is their age in days (assume 365 days per year)?",
"additional_kwargs": {},
"response_metadata": {}
}
}
]
]
}
[llm/start] [1:llm:ChatAnthropic] Entering LLM run with input: {
"messages": [
[
{
"lc": 1,
"type": "constructor",
"id": [
"langchain_core",
"messages",
"SystemMessage"
],
"kwargs": {
"content": "You are a helpful assistant",
"additional_kwargs": {},
"response_metadata": {}
}
},
{
"lc": 1,
"type": "constructor",
"id": [
"langchain_core",
"messages",
"HumanMessage"
],
"kwargs": {
"content": "Who directed the 2023 film Oppenheimer and what is their age? What is their age in days (assume 365 days per year)?",
"additional_kwargs": {},
"response_metadata": {}
}
}
]
]
}
[llm/end] [1:chain:AgentExecutor > 2:chain:ToolCallingAgent > 7:llm:ChatAnthropic] [1.98s] Exiting LLM run with output: {
"generations": [
[
{
"text": "",
"message": {
"lc": 1,
"type": "constructor",
"id": [
"langchain_core",
"messages",
"AIMessageChunk"
],
"kwargs": {
"content": [
{
"type": "tool_use",
"id": "toolu_01NUVejujVo2y8WGVtZ49KAN",
"name": "tavily_search_results_json",
"input": {
"input": "Oppenheimer 2023 film director age"
}
}
],
"additional_kwargs": {
"id": "msg_015MqAHr84dBCAqBgjou41Km",
"type": "message",
"role": "assistant",
"model": "claude-3-sonnet-20240229",
"stop_sequence": null,
"usage": {
"input_tokens": 409,
"output_tokens": 68
},
"stop_reason": "tool_use"
},
"tool_call_chunks": [
{
"name": "tavily_search_results_json",
"args": "{\"input\":\"Oppenheimer 2023 film director age\"}",
"id": "toolu_01NUVejujVo2y8WGVtZ49KAN",
"index": 0
}
],
"tool_calls": [
{
"name": "tavily_search_results_json",
"args": {
"input": "Oppenheimer 2023 film director age"
},
"id": "toolu_01NUVejujVo2y8WGVtZ49KAN"
}
],
"invalid_tool_calls": [],
"response_metadata": {}
}
}
}
]
]
}
[llm/end] [1:llm:ChatAnthropic] [1.98s] Exiting LLM run with output: {
"generations": [
[
{
"text": "",
"message": {
"lc": 1,
"type": "constructor",
"id": [
"langchain_core",
"messages",
"AIMessageChunk"
],
"kwargs": {
"content": [
{
"type": "tool_use",
"id": "toolu_01NUVejujVo2y8WGVtZ49KAN",
"name": "tavily_search_results_json",
"input": {
"input": "Oppenheimer 2023 film director age"
}
}
],
"additional_kwargs": {
"id": "msg_015MqAHr84dBCAqBgjou41Km",
"type": "message",
"role": "assistant",
"model": "claude-3-sonnet-20240229",
"stop_sequence": null,
"usage": {
"input_tokens": 409,
"output_tokens": 68
},
"stop_reason": "tool_use"
},
"tool_call_chunks": [
{
"name": "tavily_search_results_json",
"args": "{\"input\":\"Oppenheimer 2023 film director age\"}",
| |
145390
|
"execution_count": 1,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"import { ConsoleCallbackHandler } from \"@langchain/core/tracers/console\";\n",
"import { ChatPromptTemplate } from \"@langchain/core/prompts\";\n",
"import { ChatAnthropic } from \"@langchain/anthropic\";\n",
"\n",
"const handler = new ConsoleCallbackHandler();\n",
"\n",
"const prompt = ChatPromptTemplate.fromTemplate(`What is 1 + {number}?`);\n",
"const model = new ChatAnthropic({\n",
" model: \"claude-3-sonnet-20240229\",\n",
" callbacks: [handler],\n",
"});\n",
"\n",
"const chain = prompt.pipe(model);\n",
"\n",
"await chain.invoke({ number: \"2\" });"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"You can see that we only see events from the chat model run - none from the prompt or broader chain.\n",
"\n",
"## Next steps\n",
"\n",
"You've now learned how to pass callbacks into a constructor.\n",
"\n",
"Next, check out the other how-to guides in this section, such as how to create your own [custom callback handlers](/docs/how_to/custom_callbacks)."
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Deno",
"language": "typescript",
"name": "deno"
},
"language_info": {
"file_extension": ".ts",
"mimetype": "text/x.typescript",
"name": "typescript",
"nb_converter": "script",
"pygments_lexer": "typescript",
"version": "5.3.3"
}
},
"nbformat": 4,
"nbformat_minor": 2
}
| |
145397
|
---
sidebar_position: 2
---
# How to embed text data
:::info
Head to [Integrations](/docs/integrations/text_embedding) for documentation on built-in integrations with text embedding providers.
:::
:::info Prerequisites
This guide assumes familiarity with the following concepts:
- [Embeddings](/docs/concepts/#embedding-models)
:::
Embeddings create a vector representation of a piece of text. This is useful because it means we can think about text in the vector space, and do things like semantic search where we look for pieces of text that are most similar in the vector space.
The base Embeddings class in LangChain exposes two methods: one for embedding documents and one for embedding a query. The former takes as input multiple texts, while the latter takes a single text. The reason for having these as two separate methods is that some embedding providers have different embedding methods for documents (to be searched over) vs queries (the search query itself).
## Get started
Below is an example of how to use the OpenAI embeddings. Embeddings occasionally have different embedding methods for queries versus documents, so the embedding class exposes a `embedQuery` and `embedDocuments` method.
import IntegrationInstallTooltip from "@mdx_components/integration_install_tooltip.mdx";
<IntegrationInstallTooltip></IntegrationInstallTooltip>
```bash npm2yarn
npm install @langchain/openai @langchain/core
```
## Get started
```typescript
import { OpenAIEmbeddings } from "@langchain/openai";
const embeddings = new OpenAIEmbeddings();
```
## Embed queries
```typescript
const res = await embeddings.embedQuery("Hello world");
/*
[
-0.004845875, 0.004899438, -0.016358767, -0.024475135, -0.017341806,
0.012571548, -0.019156644, 0.009036391, -0.010227379, -0.026945334,
0.022861943, 0.010321903, -0.023479493, -0.0066544134, 0.007977734,
0.0026371893, 0.025206111, -0.012048521, 0.012943339, 0.013094575,
-0.010580265, -0.003509951, 0.004070787, 0.008639394, -0.020631202,
... 1511 more items
]
*/
```
## Embed documents
```typescript
const documentRes = await embeddings.embedDocuments(["Hello world", "Bye bye"]);
/*
[
[
-0.004845875, 0.004899438, -0.016358767, -0.024475135, -0.017341806,
0.012571548, -0.019156644, 0.009036391, -0.010227379, -0.026945334,
0.022861943, 0.010321903, -0.023479493, -0.0066544134, 0.007977734,
0.0026371893, 0.025206111, -0.012048521, 0.012943339, 0.013094575,
-0.010580265, -0.003509951, 0.004070787, 0.008639394, -0.020631202,
... 1511 more items
]
[
-0.009446913, -0.013253193, 0.013174579, 0.0057552797, -0.038993083,
0.0077763423, -0.0260478, -0.0114384955, -0.0022683728, -0.016509168,
0.041797023, 0.01787183, 0.00552271, -0.0049789557, 0.018146982,
-0.01542166, 0.033752076, 0.006112323, 0.023872782, -0.016535373,
-0.006623321, 0.016116094, -0.0061090477, -0.0044155475, -0.016627092,
... 1511 more items
]
]
*/
```
## Next steps
You've now learned how to use embeddings models with queries and text.
Next, check out how to [avoid excessively recomputing embeddings with caching](/docs/how_to/caching_embeddings), or the [full tutorial on retrieval-augmented generation](/docs/tutorials/rag).
| |
145402
|
{
"cells": [
{
"cell_type": "raw",
"metadata": {},
"source": [
"---\n",
"sidebar_position: 1\n",
"---"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# How to add memory to chatbots\n",
"\n",
"A key feature of chatbots is their ability to use content of previous conversation turns as context. This state management can take several forms, including:\n",
"\n",
"- Simply stuffing previous messages into a chat model prompt.\n",
"- The above, but trimming old messages to reduce the amount of distracting information the model has to deal with.\n",
"- More complex modifications like synthesizing summaries for long running conversations.\n",
"\n",
"We'll go into more detail on a few techniques below!\n",
"\n",
":::note\n",
"\n",
"This how-to guide previously built a chatbot using [RunnableWithMessageHistory](https://v03.api.js.langchain.com/classes/_langchain_core.runnables.RunnableWithMessageHistory.html). You can access this version of the tutorial in the [v0.2 docs](https://js.langchain.com/v0.2/docs/how_to/chatbots_memory/).\n",
"\n",
"The LangGraph implementation offers a number of advantages over `RunnableWithMessageHistory`, including the ability to persist arbitrary components of an application's state (instead of only messages).\n",
"\n",
":::\n",
"\n",
"## Setup\n",
"\n",
"You'll need to install a few packages, select your chat model, and set its enviroment variable.\n",
"\n",
"```{=mdx}\n",
"import Npm2Yarn from \"@theme/Npm2Yarn\"\n",
"\n",
"<Npm2Yarn>\n",
" @langchain/core @langchain/langgraph\n",
"</Npm2Yarn>\n",
"```\n",
"\n",
"Let's set up a chat model that we'll use for the below examples.\n",
"\n",
"```{=mdx}\n",
"import ChatModelTabs from \"@theme/ChatModelTabs\";\n",
"\n",
"<ChatModelTabs />\n",
"```"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Message passing\n",
"\n",
"The simplest form of memory is simply passing chat history messages into a chain. Here's an example:"
]
},
{
"cell_type": "code",
"execution_count": 22,
"metadata": {},
"outputs": [],
"source": [
"// @lc-docs-hide-cell\n",
"\n",
"import { ChatOpenAI } from \"@langchain/openai\";\n",
"\n",
"const llm = new ChatOpenAI({ model: \"gpt-4o\" })"
]
},
{
"cell_type": "code",
"execution_count": 23,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"AIMessage {\n",
" \"id\": \"chatcmpl-ABSxUXVIBitFRBh9MpasB5jeEHfCA\",\n",
" \"content\": \"I said \\\"J'adore la programmation,\\\" which means \\\"I love programming\\\" in French.\",\n",
" \"additional_kwargs\": {},\n",
" \"response_metadata\": {\n",
" \"tokenUsage\": {\n",
" \"completionTokens\": 18,\n",
" \"promptTokens\": 58,\n",
" \"totalTokens\": 76\n",
" },\n",
" \"finish_reason\": \"stop\",\n",
" \"system_fingerprint\": \"fp_e375328146\"\n",
" },\n",
" \"tool_calls\": [],\n",
" \"invalid_tool_calls\": [],\n",
" \"usage_metadata\": {\n",
" \"input_tokens\": 58,\n",
" \"output_tokens\": 18,\n",
" \"total_tokens\": 76\n",
" }\n",
"}\n"
]
}
],
"source": [
"import { HumanMessage, AIMessage } from \"@langchain/core/messages\";\n",
"import {\n",
" ChatPromptTemplate,\n",
" MessagesPlaceholder,\n",
"} from \"@langchain/core/prompts\";\n",
"\n",
"const prompt = ChatPromptTemplate.fromMessages([\n",
" [\n",
" \"system\",\n",
" \"You are a helpful assistant. Answer all questions to the best of your ability.\",\n",
" ],\n",
" new MessagesPlaceholder(\"messages\"),\n",
"]);\n",
"\n",
"const chain = prompt.pipe(llm);\n",
"\n",
"await chain.invoke({\n",
" messages: [\n",
" new HumanMessage(\n",
" \"Translate this sentence from English to French: I love programming.\"\n",
" ),\n",
" new AIMessage(\"J'adore la programmation.\"),\n",
" new HumanMessage(\"What did you just say?\"),\n",
" ],\n",
"});"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"We can see that by passing the previous conversation into a chain, it can use it as context to answer questions. This is the basic concept underpinning chatbot memory - the rest of the guide will demonstrate convenient techniques for passing or reformatting messages."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Automatic history management\n",
"\n",
"The previous examples pass messages to the chain (and model) explicitly. This is a completely acceptable approach, but it does require external management of new messages. LangChain also provides a way to build applications that have memory using LangGraph's persistence. You can enable persistence in LangGraph applications by providing a `checkpointer` when compiling the graph."
]
},
{
"cell_type": "code",
"execution_count": 24,
"metadata": {},
"outputs": [],
"source": [
"import { START, END, MessagesAnnotation, StateGraph, MemorySaver } from \"@langchain/langgraph\";\n",
"\n",
"\n",
"// Define the function that calls the model\n",
"const callModel = async (state: typeof MessagesAnnotation.State) => {\n",
" const systemPrompt = \n",
" \"You are a helpful assistant. \" +\n",
" \"Answer all questions to the best of your ability.\";\n",
" const messages = [{ role: \"system\", content: systemPrompt }, ...state.messages];\n",
" const response = await llm.invoke(messages);\n",
" return { messages: response };\n",
"};\n",
"\n",
"const workflow = new StateGraph(MessagesAnnotation)\n",
"// Define the node and edge\n",
" .addNode(\"model\", callModel)\n",
" .addEdge(START, \"model\")\n",
" .addEdge(\"model\", END);\n",
"\n",
"// Add simple in-memory checkpointer\n",
"// highlight-start\n",
"const memory = new MemorySaver();\n",
"const app = workflow.compile({ checkpointer: memory });\n",
"// highlight-end"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
" We'll pass the latest input to the conversation here and let the LangGraph keep track of the conversation history using the checkpointer:"
]
},
{
"cell_type": "code",
"execution_count": 25,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"{\n",
" messages: [\n",
" HumanMessage {\n",
" \"id\": \"227b82a9-4084-46a5-ac79-ab9a3faa140e\",\n",
" \"content\": \"Translate to French: I love programming.\",\n",
" \"additional_kwargs\": {},\n",
" \"response_metadata\": {}\n",
" },\n",
" AIMessage {\n",
" \"id\": \"chatcmpl-ABSxVrvztgnasTeMSFbpZQmyYqjJZ\",\n",
| |
145405
|
"We can use this same pattern in other ways too. For example, we could use an additional LLM call to generate a summary of the conversation before calling our app. Let's recreate our chat history:"
]
},
{
"cell_type": "code",
"execution_count": 30,
"metadata": {},
"outputs": [],
"source": [
"const demoEphemeralChatHistory2 = [\n",
" { role: \"user\", content: \"Hey there! I'm Nemo.\" },\n",
" { role: \"assistant\", content: \"Hello!\" },\n",
" { role: \"user\", content: \"How are you today?\" },\n",
" { role: \"assistant\", content: \"Fine thanks!\" },\n",
"];"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"And now, let's update the model-calling function to distill previous interactions into a summary:"
]
},
{
"cell_type": "code",
"execution_count": 31,
"metadata": {},
"outputs": [],
"source": [
"import { START, END, MessagesAnnotation, StateGraph, MemorySaver } from \"@langchain/langgraph\";\n",
"import { RemoveMessage } from \"@langchain/core/messages\";\n",
"\n",
"\n",
"// Define the function that calls the model\n",
"const callModel3 = async (state: typeof MessagesAnnotation.State) => {\n",
" const systemPrompt = \n",
" \"You are a helpful assistant. \" +\n",
" \"Answer all questions to the best of your ability. \" +\n",
" \"The provided chat history includes a summary of the earlier conversation.\";\n",
" const systemMessage = { role: \"system\", content: systemPrompt };\n",
" const messageHistory = state.messages.slice(0, -1); // exclude the most recent user input\n",
" \n",
" // Summarize the messages if the chat history reaches a certain size\n",
" if (messageHistory.length >= 4) {\n",
" const lastHumanMessage = state.messages[state.messages.length - 1];\n",
" // Invoke the model to generate conversation summary\n",
" const summaryPrompt = \n",
" \"Distill the above chat messages into a single summary message. \" +\n",
" \"Include as many specific details as you can.\";\n",
" const summaryMessage = await llm.invoke([\n",
" ...messageHistory,\n",
" { role: \"user\", content: summaryPrompt }\n",
" ]);\n",
"\n",
" // Delete messages that we no longer want to show up\n",
" const deleteMessages = state.messages.map(m => new RemoveMessage({ id: m.id }));\n",
" // Re-add user message\n",
" const humanMessage = { role: \"user\", content: lastHumanMessage.content };\n",
" // Call the model with summary & response\n",
" const response = await llm.invoke([systemMessage, summaryMessage, humanMessage]);\n",
" return { messages: [summaryMessage, humanMessage, response, ...deleteMessages] };\n",
" } else {\n",
" const response = await llm.invoke([systemMessage, ...state.messages]);\n",
" return { messages: response };\n",
" }\n",
"};\n",
"\n",
"const workflow3 = new StateGraph(MessagesAnnotation)\n",
" // Define the node and edge\n",
" .addNode(\"model\", callModel3)\n",
" .addEdge(START, \"model\")\n",
" .addEdge(\"model\", END);\n",
"\n",
"// Add simple in-memory checkpointer\n",
"const app3 = workflow3.compile({ checkpointer: new MemorySaver() });"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Let's see if it remembers the name we gave it:"
]
},
{
"cell_type": "code",
"execution_count": 32,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"{\n",
" messages: [\n",
" AIMessage {\n",
" \"id\": \"chatcmpl-ABSxXjFDj6WRo7VLSneBtlAxUumPE\",\n",
" \"content\": \"Nemo greeted the assistant and asked how it was doing, to which the assistant responded that it was fine.\",\n",
" \"additional_kwargs\": {},\n",
" \"response_metadata\": {\n",
" \"tokenUsage\": {\n",
" \"completionTokens\": 22,\n",
" \"promptTokens\": 60,\n",
" \"totalTokens\": 82\n",
" },\n",
" \"finish_reason\": \"stop\",\n",
" \"system_fingerprint\": \"fp_e375328146\"\n",
" },\n",
" \"tool_calls\": [],\n",
" \"invalid_tool_calls\": [],\n",
" \"usage_metadata\": {\n",
" \"input_tokens\": 60,\n",
" \"output_tokens\": 22,\n",
" \"total_tokens\": 82\n",
" }\n",
" },\n",
" HumanMessage {\n",
" \"id\": \"8b1309b7-c09e-47fb-9ab3-34047f6973e3\",\n",
" \"content\": \"What did I say my name was?\",\n",
" \"additional_kwargs\": {},\n",
" \"response_metadata\": {}\n",
" },\n",
" AIMessage {\n",
" \"id\": \"chatcmpl-ABSxYAQKiBsQ6oVypO4CLFDsi1HRH\",\n",
" \"content\": \"You mentioned that your name is Nemo.\",\n",
" \"additional_kwargs\": {},\n",
" \"response_metadata\": {\n",
" \"tokenUsage\": {\n",
" \"completionTokens\": 8,\n",
" \"promptTokens\": 73,\n",
" \"totalTokens\": 81\n",
" },\n",
" \"finish_reason\": \"stop\",\n",
" \"system_fingerprint\": \"fp_52a7f40b0b\"\n",
" },\n",
" \"tool_calls\": [],\n",
" \"invalid_tool_calls\": [],\n",
" \"usage_metadata\": {\n",
" \"input_tokens\": 73,\n",
" \"output_tokens\": 8,\n",
" \"total_tokens\": 81\n",
" }\n",
" }\n",
" ]\n",
"}\n"
]
}
],
"source": [
"await app3.invoke(\n",
" {\n",
" messages: [\n",
" ...demoEphemeralChatHistory2,\n",
" { role: \"user\", content: \"What did I say my name was?\" }\n",
" ]\n",
" },\n",
" {\n",
" configurable: { thread_id: \"4\" }\n",
" }\n",
");"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Note that invoking the app again will keep accumulating the history until it reaches the specified number of messages (four in our case). At that point we will generate another summary generated from the initial summary plus new messages and so on."
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Deno",
"language": "typescript",
"name": "deno"
},
"language_info": {
"codemirror_mode": {
"mode": "typescript",
"name": "javascript",
"typescript": true
},
"file_extension": ".ts",
"mimetype": "text/typescript",
"name": "typescript",
"version": "3.7.2"
}
},
"nbformat": 4,
"nbformat_minor": 4
}
| |
145420
|
# How to write a custom retriever class
:::info Prerequisites
This guide assumes familiarity with the following concepts:
- [Retrievers](/docs/concepts/#retrievers)
:::
To create your own retriever, you need to extend the [`BaseRetriever`](https://api.js.langchain.com/classes/langchain_core.retrievers.BaseRetriever.html) class
and implement a `_getRelevantDocuments` method that takes a `string` as its first parameter (and an optional `runManager` for tracing).
This method should return an array of `Document`s fetched from some source. This process can involve calls to a database, to the web using `fetch`, or any other source.
Note the underscore before `_getRelevantDocuments()`. The base class wraps the non-prefixed version in order to automatically handle tracing of the original call.
Here's an example of a custom retriever that returns static documents:
```ts
import {
BaseRetriever,
type BaseRetrieverInput,
} from "@langchain/core/retrievers";
import type { CallbackManagerForRetrieverRun } from "@langchain/core/callbacks/manager";
import { Document } from "@langchain/core/documents";
export interface CustomRetrieverInput extends BaseRetrieverInput {}
export class CustomRetriever extends BaseRetriever {
lc_namespace = ["langchain", "retrievers"];
constructor(fields?: CustomRetrieverInput) {
super(fields);
}
async _getRelevantDocuments(
query: string,
runManager?: CallbackManagerForRetrieverRun
): Promise<Document[]> {
// Pass `runManager?.getChild()` when invoking internal runnables to enable tracing
// const additionalDocs = await someOtherRunnable.invoke(params, runManager?.getChild());
return [
// ...additionalDocs,
new Document({
pageContent: `Some document pertaining to ${query}`,
metadata: {},
}),
new Document({
pageContent: `Some other document pertaining to ${query}`,
metadata: {},
}),
];
}
}
```
Then, you can call `.invoke()` as follows:
```ts
const retriever = new CustomRetriever({});
await retriever.invoke("LangChain docs");
```
```
[
Document {
pageContent: 'Some document pertaining to LangChain docs',
metadata: {}
},
Document {
pageContent: 'Some other document pertaining to LangChain docs',
metadata: {}
}
]
```
## Next steps
You've now seen an example of implementing your own custom retriever.
Next, check out the individual sections for deeper dives on specific retrievers, or the [broader tutorial on RAG](/docs/tutorials/rag).
| |
145421
|
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# How to return sources\n",
"\n",
":::info Prerequisites\n",
"\n",
"This guide assumes familiarity with the following:\n",
"\n",
"- [Retrieval-augmented generation](/docs/tutorials/rag/)\n",
"\n",
":::\n",
"\n",
"Often in Q&A applications it’s important to show users the sources that were used to generate the answer. The simplest way to do this is for the chain to return the Documents that were retrieved in each generation.\n",
"\n",
"We'll be using the [LLM Powered Autonomous Agents](https://lilianweng.github.io/posts/2023-06-23-agent/) blog post by Lilian Weng for retrieval content this notebook."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Setup\n",
"### Dependencies\n",
"\n",
"We’ll use an OpenAI chat model and embeddings and a Memory vector store in this walkthrough, but everything shown here works with any [ChatModel](/docs/concepts/#chat-models) or [LLM](/docs/concepts#llms), [Embeddings](/docs/concepts#embedding-models), and [VectorStore](/docs/concepts#vectorstores) or [Retriever](/docs/concepts#retrievers).\n",
"\n",
"We’ll use the following packages:\n",
"\n",
"```bash\n",
"npm install --save langchain @langchain/openai cheerio\n",
"```\n",
"\n",
"We need to set environment variable `OPENAI_API_KEY`:\n",
"\n",
"```bash\n",
"export OPENAI_API_KEY=YOUR_KEY\n",
"```\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### LangSmith\n",
"\n",
"Many of the applications you build with LangChain will contain multiple steps with multiple invocations of LLM calls. As these applications get more and more complex, it becomes crucial to be able to inspect what exactly is going on inside your chain or agent. The best way to do this is with [LangSmith](https://smith.langchain.com/).\n",
"\n",
"Note that LangSmith is not needed, but it is helpful. If you do want to use LangSmith, after you sign up at the link above, make sure to set your environment variables to start logging traces:\n",
"\n",
"\n",
"```bash\n",
"export LANGCHAIN_TRACING_V2=true\n",
"export LANGCHAIN_API_KEY=YOUR_KEY\n",
"\n",
"# Reduce tracing latency if you are not in a serverless environment\n",
"# export LANGCHAIN_CALLBACKS_BACKGROUND=true\n",
"```"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Chain without sources\n",
"\n",
"Here is the Q&A app we built over the [LLM Powered Autonomous Agents](https://lilianweng.github.io/posts/2023-06-23-agent/) blog post by Lilian Weng in the [Quickstart](/docs/tutorials/qa_chat_history/)."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import \"cheerio\";\n",
"import { CheerioWebBaseLoader } from \"@langchain/community/document_loaders/web/cheerio\";\n",
"import { RecursiveCharacterTextSplitter } from \"langchain/text_splitter\";\n",
"import { MemoryVectorStore } from \"langchain/vectorstores/memory\"\n",
"import { OpenAIEmbeddings, ChatOpenAI } from \"@langchain/openai\";\n",
"import { pull } from \"langchain/hub\";\n",
"import { ChatPromptTemplate } from \"@langchain/core/prompts\";\n",
"import { formatDocumentsAsString } from \"langchain/util/document\";\n",
"import { RunnableSequence, RunnablePassthrough } from \"@langchain/core/runnables\";\n",
"import { StringOutputParser } from \"@langchain/core/output_parsers\";\n",
"\n",
"const loader = new CheerioWebBaseLoader(\n",
" \"https://lilianweng.github.io/posts/2023-06-23-agent/\"\n",
");\n",
"\n",
"const docs = await loader.load();\n",
"\n",
"const textSplitter = new RecursiveCharacterTextSplitter({ chunkSize: 1000, chunkOverlap: 200 });\n",
"const splits = await textSplitter.splitDocuments(docs);\n",
"const vectorStore = await MemoryVectorStore.fromDocuments(splits, new OpenAIEmbeddings());\n",
"\n",
"// Retrieve and generate using the relevant snippets of the blog.\n",
"const retriever = vectorStore.asRetriever();\n",
"const prompt = await pull<ChatPromptTemplate>(\"rlm/rag-prompt\");\n",
"const llm = new ChatOpenAI({ model: \"gpt-3.5-turbo\", temperature: 0 });\n",
"\n",
"const ragChain = RunnableSequence.from([\n",
" {\n",
" context: retriever.pipe(formatDocumentsAsString),\n",
" question: new RunnablePassthrough(),\n",
" },\n",
" prompt,\n",
" llm,\n",
" new StringOutputParser()\n",
"]);"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Let's see what this prompt actually looks like:"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"You are an assistant for question-answering tasks. Use the following pieces of retrieved context to answer the question. If you don't know the answer, just say that you don't know. Use three sentences maximum and keep the answer concise.\n",
"Question: {question} \n",
"Context: {context} \n",
"Answer:\n"
]
}
],
"source": [
"console.log(prompt.promptMessages.map((msg) => msg.prompt.template).join(\"\\n\"));"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"\u001b[32m\"Task decomposition is a technique used to break down complex tasks into smaller and simpler steps. T\"\u001b[39m... 254 more characters"
]
},
"execution_count": 3,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"await ragChain.invoke(\"What is task decomposition?\")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Adding sources\n",
"\n",
"With LCEL, we can easily pass the retrieved documents through the chain and return them in the final response:"
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"{\n",
" question: \u001b[32m\"What is Task Decomposition\"\u001b[39m,\n",
" context: [\n",
" Document {\n",
" pageContent: \u001b[32m\"Fig. 1. Overview of a LLM-powered autonomous agent system.\\n\"\u001b[39m +\n",
" \u001b[32m\"Component One: Planning#\\n\"\u001b[39m +\n",
" \u001b[32m\"A complicated ta\"\u001b[39m... 898 more characters,\n",
" metadata: {\n",
" source: \u001b[32m\"https://lilianweng.github.io/posts/2023-06-23-agent/\"\u001b[39m,\n",
" loc: { lines: \u001b[36m[Object]\u001b[39m }\n",
" }\n",
" },\n",
" Document {\n",
| |
145422
|
" pageContent: \u001b[32m'Task decomposition can be done (1) by LLM with simple prompting like \"Steps for XYZ.\\\\n1.\", \"What are'\u001b[39m... 887 more characters,\n",
" metadata: {\n",
" source: \u001b[32m\"https://lilianweng.github.io/posts/2023-06-23-agent/\"\u001b[39m,\n",
" loc: { lines: \u001b[36m[Object]\u001b[39m }\n",
" }\n",
" },\n",
" Document {\n",
" pageContent: \u001b[32m\"Agent System Overview\\n\"\u001b[39m +\n",
" \u001b[32m\" \\n\"\u001b[39m +\n",
" \u001b[32m\" Component One: Planning\\n\"\u001b[39m +\n",
" \u001b[32m\" \"\u001b[39m... 850 more characters,\n",
" metadata: {\n",
" source: \u001b[32m\"https://lilianweng.github.io/posts/2023-06-23-agent/\"\u001b[39m,\n",
" loc: { lines: \u001b[36m[Object]\u001b[39m }\n",
" }\n",
" },\n",
" Document {\n",
" pageContent: \u001b[32m\"Resources:\\n\"\u001b[39m +\n",
" \u001b[32m\"1. Internet access for searches and information gathering.\\n\"\u001b[39m +\n",
" \u001b[32m\"2. Long Term memory management\"\u001b[39m... 456 more characters,\n",
" metadata: {\n",
" source: \u001b[32m\"https://lilianweng.github.io/posts/2023-06-23-agent/\"\u001b[39m,\n",
" loc: { lines: \u001b[36m[Object]\u001b[39m }\n",
" }\n",
" }\n",
" ],\n",
" answer: \u001b[32m\"Task decomposition is a technique used to break down complex tasks into smaller and simpler steps fo\"\u001b[39m... 230 more characters\n",
"}"
]
},
"execution_count": 4,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"import {\n",
" RunnableMap,\n",
" RunnablePassthrough,\n",
" RunnableSequence\n",
"} from \"@langchain/core/runnables\";\n",
"import { formatDocumentsAsString } from \"langchain/util/document\";\n",
"\n",
"const ragChainWithSources = RunnableMap.from({\n",
" // Return raw documents here for now since we want to return them at\n",
" // the end - we'll format in the next step of the chain\n",
" context: retriever,\n",
" question: new RunnablePassthrough(),\n",
"}).assign({\n",
" answer: RunnableSequence.from([\n",
" (input) => {\n",
" return {\n",
" // Now we format the documents as strings for the prompt\n",
" context: formatDocumentsAsString(input.context),\n",
" question: input.question\n",
" };\n",
" },\n",
" prompt,\n",
" llm,\n",
" new StringOutputParser()\n",
" ]),\n",
"})\n",
"\n",
"await ragChainWithSources.invoke(\"What is Task Decomposition\")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Check out the [LangSmith trace](https://smith.langchain.com/public/c3753531-563c-40d4-a6bf-21bfe8741d10/r) here to see the internals of the chain.\n",
"\n",
"## Next steps\n",
"\n",
"You've now learned how to return sources from your QA chains.\n",
"\n",
"Next, check out some of the other guides around RAG, such as [how to stream responses](/docs/how_to/qa_streaming)."
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Deno",
"language": "typescript",
"name": "deno"
},
"language_info": {
"file_extension": ".ts",
"mimetype": "text/x.typescript",
"name": "typescript",
"nb_converter": "script",
"pygments_lexer": "typescript",
"version": "5.3.3"
}
},
"nbformat": 4,
"nbformat_minor": 2
}
| |
145428
|
" documents, new OpenAIEmbeddings(),\n",
");\n",
"\n",
"const retriever = vectorstore.asRetriever({\n",
" k: 1,\n",
" searchType: \"similarity\",\n",
"});"
]
},
{
"cell_type": "markdown",
"id": "9ba737ac-43a2-4a6f-b855-5bd0305017f1",
"metadata": {},
"source": [
"We next create a pre-built [LangGraph agent](/docs/how_to/migrate_agent/) and provide it with the tool:"
]
},
{
"cell_type": "code",
"execution_count": 6,
"id": "c939cf2a-60e9-4afd-8b47-84d76ccb13f5",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"AGENT: AIMessage {\n",
" \"id\": \"chatcmpl-9m9RIN1GQVeXcrVdp0lNBTcZFVHb9\",\n",
" \"content\": \"\",\n",
" \"additional_kwargs\": {\n",
" \"tool_calls\": [\n",
" {\n",
" \"id\": \"call_n30LPDbegmytrj5GdUxZt9xn\",\n",
" \"type\": \"function\",\n",
" \"function\": \"[Object]\"\n",
" }\n",
" ]\n",
" },\n",
" \"response_metadata\": {\n",
" \"tokenUsage\": {\n",
" \"completionTokens\": 17,\n",
" \"promptTokens\": 52,\n",
" \"totalTokens\": 69\n",
" },\n",
" \"finish_reason\": \"tool_calls\"\n",
" },\n",
" \"tool_calls\": [\n",
" {\n",
" \"name\": \"pet_info_retriever\",\n",
" \"args\": {\n",
" \"input\": \"dogs\"\n",
" },\n",
" \"type\": \"tool_call\",\n",
" \"id\": \"call_n30LPDbegmytrj5GdUxZt9xn\"\n",
" }\n",
" ],\n",
" \"invalid_tool_calls\": [],\n",
" \"usage_metadata\": {\n",
" \"input_tokens\": 52,\n",
" \"output_tokens\": 17,\n",
" \"total_tokens\": 69\n",
" }\n",
"}\n",
"----\n",
"TOOLS: ToolMessage {\n",
" \"content\": \"[{\\\"pageContent\\\":\\\"Dogs are great companions, known for their loyalty and friendliness.\\\",\\\"metadata\\\":{}}]\",\n",
" \"name\": \"pet_info_retriever\",\n",
" \"additional_kwargs\": {},\n",
" \"response_metadata\": {},\n",
" \"tool_call_id\": \"call_n30LPDbegmytrj5GdUxZt9xn\"\n",
"}\n",
"----\n",
"AGENT: AIMessage {\n",
" \"id\": \"chatcmpl-9m9RJ3TT3ITfv6R0Tb7pcrNOUtnm8\",\n",
" \"content\": \"Dogs are known for being great companions, known for their loyalty and friendliness.\",\n",
" \"additional_kwargs\": {},\n",
" \"response_metadata\": {\n",
" \"tokenUsage\": {\n",
" \"completionTokens\": 18,\n",
" \"promptTokens\": 104,\n",
" \"totalTokens\": 122\n",
" },\n",
" \"finish_reason\": \"stop\"\n",
" },\n",
" \"tool_calls\": [],\n",
" \"invalid_tool_calls\": [],\n",
" \"usage_metadata\": {\n",
" \"input_tokens\": 104,\n",
" \"output_tokens\": 18,\n",
" \"total_tokens\": 122\n",
" }\n",
"}\n",
"----\n"
]
}
],
"source": [
"import { createReactAgent } from \"@langchain/langgraph/prebuilt\";\n",
"\n",
"const tools = [\n",
" retriever.asTool({\n",
" name: \"pet_info_retriever\",\n",
" description: \"Get information about pets.\",\n",
" schema: z.string(),\n",
" })\n",
"];\n",
"\n",
"const agent = createReactAgent({ llm: llm, tools });\n",
"\n",
"const stream = await agent.stream({\"messages\": [[\"human\", \"What are dogs known for?\"]]});\n",
"\n",
"for await (const chunk of stream) {\n",
" // Log output from the agent or tools node\n",
" if (chunk.agent) {\n",
" console.log(\"AGENT:\", chunk.agent.messages[0]);\n",
" } else if (chunk.tools) {\n",
" console.log(\"TOOLS:\", chunk.tools.messages[0]);\n",
" }\n",
" console.log(\"----\");\n",
"}"
]
},
{
"cell_type": "markdown",
"id": "96f2ac9c-36f4-4b7a-ae33-f517734c86aa",
"metadata": {},
"source": [
"This [LangSmith trace](https://smith.langchain.com/public/5e141617-ae82-44af-8fe0-b64dbd007826/r) shows what's going on under the hood for the above run.\n",
"\n",
"Going further, we can even create a tool from a full [RAG chain](/docs/tutorials/rag/):"
]
},
{
"cell_type": "code",
"execution_count": 7,
"id": "bea518c9-c711-47c2-b8cc-dbd102f71f09",
"metadata": {},
"outputs": [],
"source": [
"import { StringOutputParser } from \"@langchain/core/output_parsers\";\n",
"import { ChatPromptTemplate } from \"@langchain/core/prompts\";\n",
"import { RunnableSequence } from \"@langchain/core/runnables\";\n",
"\n",
"const SYSTEM_TEMPLATE = `\n",
"You are an assistant for question-answering tasks.\n",
"Use the below context to answer the question. If\n",
"you don't know the answer, say you don't know.\n",
"Use three sentences maximum and keep the answer\n",
"concise.\n",
"\n",
"Answer in the style of {answer_style}.\n",
"\n",
"Context: {context}`;\n",
"\n",
"const prompt = ChatPromptTemplate.fromMessages([\n",
" [\"system\", SYSTEM_TEMPLATE],\n",
" [\"human\", \"{question}\"],\n",
"]);\n",
"\n",
"const ragChain = RunnableSequence.from([\n",
" {\n",
" context: (input, config) => retriever.invoke(input.question, config),\n",
" question: (input) => input.question,\n",
" answer_style: (input) => input.answer_style,\n",
" },\n",
" prompt,\n",
" llm,\n",
" new StringOutputParser(),\n",
"]);"
]
},
{
"cell_type": "markdown",
"id": "4570615b-8f96-4d97-ae01-1c08b14be584",
"metadata": {},
"source": [
"Below we again invoke the agent. Note that the agent populates the required parameters in its `tool_calls`:"
]
},
{
"cell_type": "code",
"execution_count": 8,
"id": "06409913-a2ad-400f-a202-7b8dd2ef483a",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"AGENT: AIMessage {\n",
| |
145433
|
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# How to pass callbacks in at runtime\n",
"\n",
":::info Prerequisites\n",
"\n",
"This guide assumes familiarity with the following concepts:\n",
"\n",
"- [Callbacks](/docs/concepts/#callbacks)\n",
"\n",
":::\n",
"\n",
"In many cases, it is advantageous to pass in handlers instead when running the object. When we pass through [`CallbackHandlers`](https://api.js.langchain.com/interfaces/langchain_core.callbacks_base.CallbackHandlerMethods.html) using the `callbacks` keyword arg when executing an run, those callbacks will be issued by all nested objects involved in the execution. For example, when a handler is passed through to an Agent, it will be used for all callbacks related to the agent and all the objects involved in the agent's execution, in this case, the Tools and LLM.\n",
"\n",
"This prevents us from having to manually attach the handlers to each individual nested object. Here's an example using LangChain's built-in [`ConsoleCallbackHandler`](https://api.js.langchain.com/classes/langchain_core.tracers_console.ConsoleCallbackHandler.html):"
]
},
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"\u001b[32m[chain/start]\u001b[39m [\u001b[90m\u001b[1m1:chain:RunnableSequence\u001b[22m\u001b[39m] Entering Chain run with input: {\n",
" \"number\": \"2\"\n",
"}\n",
"\u001b[32m[chain/start]\u001b[39m [\u001b[90m1:chain:RunnableSequence > \u001b[1m2:prompt:ChatPromptTemplate\u001b[22m\u001b[39m] Entering Chain run with input: {\n",
" \"number\": \"2\"\n",
"}\n",
"\u001b[36m[chain/end]\u001b[39m [\u001b[90m1:chain:RunnableSequence > \u001b[1m2:prompt:ChatPromptTemplate\u001b[22m\u001b[39m] [1ms] Exiting Chain run with output: {\n",
" \"lc\": 1,\n",
" \"type\": \"constructor\",\n",
" \"id\": [\n",
" \"langchain_core\",\n",
" \"prompt_values\",\n",
" \"ChatPromptValue\"\n",
" ],\n",
" \"kwargs\": {\n",
" \"messages\": [\n",
" {\n",
" \"lc\": 1,\n",
" \"type\": \"constructor\",\n",
" \"id\": [\n",
" \"langchain_core\",\n",
" \"messages\",\n",
" \"HumanMessage\"\n",
" ],\n",
" \"kwargs\": {\n",
" \"content\": \"What is 1 + 2?\",\n",
" \"additional_kwargs\": {},\n",
" \"response_metadata\": {}\n",
" }\n",
" }\n",
" ]\n",
" }\n",
"}\n",
"\u001b[32m[llm/start]\u001b[39m [\u001b[90m1:chain:RunnableSequence > \u001b[1m3:llm:ChatAnthropic\u001b[22m\u001b[39m] Entering LLM run with input: {\n",
" \"messages\": [\n",
" [\n",
" {\n",
" \"lc\": 1,\n",
" \"type\": \"constructor\",\n",
" \"id\": [\n",
" \"langchain_core\",\n",
" \"messages\",\n",
" \"HumanMessage\"\n",
" ],\n",
" \"kwargs\": {\n",
" \"content\": \"What is 1 + 2?\",\n",
" \"additional_kwargs\": {},\n",
" \"response_metadata\": {}\n",
" }\n",
" }\n",
" ]\n",
" ]\n",
"}\n",
"\u001b[36m[llm/end]\u001b[39m [\u001b[90m1:chain:RunnableSequence > \u001b[1m3:llm:ChatAnthropic\u001b[22m\u001b[39m] [766ms] Exiting LLM run with output: {\n",
" \"generations\": [\n",
" [\n",
" {\n",
" \"text\": \"1 + 2 = 3\",\n",
" \"message\": {\n",
" \"lc\": 1,\n",
" \"type\": \"constructor\",\n",
" \"id\": [\n",
" \"langchain_core\",\n",
" \"messages\",\n",
" \"AIMessage\"\n",
" ],\n",
" \"kwargs\": {\n",
" \"content\": \"1 + 2 = 3\",\n",
" \"tool_calls\": [],\n",
" \"invalid_tool_calls\": [],\n",
" \"additional_kwargs\": {\n",
" \"id\": \"msg_01SGGkFVbUbH4fK7JS7agerD\",\n",
" \"type\": \"message\",\n",
" \"role\": \"assistant\",\n",
" \"model\": \"claude-3-sonnet-20240229\",\n",
" \"stop_sequence\": null,\n",
" \"usage\": {\n",
" \"input_tokens\": 16,\n",
" \"output_tokens\": 13\n",
" },\n",
" \"stop_reason\": \"end_turn\"\n",
" },\n",
" \"response_metadata\": {\n",
" \"id\": \"msg_01SGGkFVbUbH4fK7JS7agerD\",\n",
" \"model\": \"claude-3-sonnet-20240229\",\n",
" \"stop_sequence\": null,\n",
" \"usage\": {\n",
" \"input_tokens\": 16,\n",
" \"output_tokens\": 13\n",
" },\n",
" \"stop_reason\": \"end_turn\"\n",
" }\n",
" }\n",
" }\n",
" }\n",
" ]\n",
" ],\n",
" \"llmOutput\": {\n",
" \"id\": \"msg_01SGGkFVbUbH4fK7JS7agerD\",\n",
" \"model\": \"claude-3-sonnet-20240229\",\n",
" \"stop_sequence\": null,\n",
" \"usage\": {\n",
" \"input_tokens\": 16,\n",
" \"output_tokens\": 13\n",
" },\n",
" \"stop_reason\": \"end_turn\"\n",
" }\n",
"}\n",
"\u001b[36m[chain/end]\u001b[39m [\u001b[90m\u001b[1m1:chain:RunnableSequence\u001b[22m\u001b[39m] [778ms] Exiting Chain run with output: {\n",
" \"lc\": 1,\n",
" \"type\": \"constructor\",\n",
" \"id\": [\n",
" \"langchain_core\",\n",
" \"messages\",\n",
" \"AIMessage\"\n",
" ],\n",
" \"kwargs\": {\n",
" \"content\": \"1 + 2 = 3\",\n",
" \"tool_calls\": [],\n",
" \"invalid_tool_calls\": [],\n",
" \"additional_kwargs\": {\n",
" \"id\": \"msg_01SGGkFVbUbH4fK7JS7agerD\",\n",
" \"type\": \"message\",\n",
" \"role\": \"assistant\",\n",
" \"model\": \"claude-3-sonnet-20240229\",\n",
" \"stop_sequence\": null,\n",
" \"usage\": {\n",
" \"input_tokens\": 16,\n",
" \"output_tokens\": 13\n",
" },\n",
| |
145442
|
# How to partially format prompt templates
:::info Prerequisites
This guide assumes familiarity with the following concepts:
- [Prompt templates](/docs/concepts/#prompt-templates)
:::
Like partially binding arguments to a function, it can make sense to "partial" a prompt template - e.g. pass in
a subset of the required values, as to create a new prompt template which expects only the remaining subset of values.
LangChain supports this in two ways:
1. Partial formatting with string values.
2. Partial formatting with functions that return string values.
In the examples below, we go over the motivations for both use cases as well as how to do it in LangChain.
## Partial with strings
One common use case for wanting to partial a prompt template is if you get access to some of the variables in a
prompt before others. For example, suppose you have a prompt template that requires two variables, `foo` and `baz`.
If you get the `foo` value early on in your chain, but the `baz` value later, it can be inconvenient to pass both variables all the way through the chain.
Instead, you can partial the prompt template with the `foo` value, and then pass the partialed prompt template along and just use that.
Below is an example of doing this:
```typescript
import { PromptTemplate } from "langchain/prompts";
const prompt = new PromptTemplate({
template: "{foo}{bar}",
inputVariables: ["foo", "bar"],
});
const partialPrompt = await prompt.partial({
foo: "foo",
});
const formattedPrompt = await partialPrompt.format({
bar: "baz",
});
console.log(formattedPrompt);
// foobaz
```
You can also just initialize the prompt with the partialed variables.
```typescript
const prompt = new PromptTemplate({
template: "{foo}{bar}",
inputVariables: ["bar"],
partialVariables: {
foo: "foo",
},
});
const formattedPrompt = await prompt.format({
bar: "baz",
});
console.log(formattedPrompt);
// foobaz
```
## Partial With Functions
You can also partial with a function. The use case for this is when you have a variable you know that you always want to fetch in a common way. A prime example of this is with date or time. Imagine you have a prompt which you always want to have the current date. You can't hard code it in the prompt, and passing it along with the other input variables can be tedious. In this case, it's very handy to be able to partial the prompt with a function that always returns the current date.
```typescript
const getCurrentDate = () => {
return new Date().toISOString();
};
const prompt = new PromptTemplate({
template: "Tell me a {adjective} joke about the day {date}",
inputVariables: ["adjective", "date"],
});
const partialPrompt = await prompt.partial({
date: getCurrentDate,
});
const formattedPrompt = await partialPrompt.format({
adjective: "funny",
});
console.log(formattedPrompt);
// Tell me a funny joke about the day 2023-07-13T00:54:59.287Z
```
You can also just initialize the prompt with the partialed variables:
```typescript
const prompt = new PromptTemplate({
template: "Tell me a {adjective} joke about the day {date}",
inputVariables: ["adjective"],
partialVariables: {
date: getCurrentDate,
},
});
const formattedPrompt = await prompt.format({
adjective: "funny",
});
console.log(formattedPrompt);
// Tell me a funny joke about the day 2023-07-13T00:54:59.287Z
```
## Next steps
You've now learned how to partially apply variables to your prompt templates.
Next, check out the other how-to guides on prompt templates in this section, like [adding few-shot examples to your prompt templates](/docs/how_to/few_shot_examples_chat).
| |
145443
|
# How to generate multiple embeddings per document
:::info Prerequisites
This guide assumes familiarity with the following concepts:
- [Retrievers](/docs/concepts/#retrievers)
- [Text splitters](/docs/concepts/#text-splitters)
- [Retrieval-augmented generation (RAG)](/docs/tutorials/rag)
:::
Embedding different representations of an original document, then returning the original document when any of the representations result in a search hit, can allow you to
tune and improve your retrieval performance. LangChain has a base [`MultiVectorRetriever`](https://api.js.langchain.com/classes/langchain.retrievers_multi_vector.MultiVectorRetriever.html) designed to do just this!
A lot of the complexity lies in how to create the multiple vectors per document.
This guide covers some of the common ways to create those vectors and use the `MultiVectorRetriever`.
Some methods to create multiple vectors per document include:
- smaller chunks: split a document into smaller chunks, and embed those (e.g. the [`ParentDocumentRetriever`](/docs/how_to/parent_document_retriever))
- summary: create a summary for each document, embed that along with (or instead of) the document
- hypothetical questions: create hypothetical questions that each document would be appropriate to answer, embed those along with (or instead of) the document
Note that this also enables another method of adding embeddings - manually. This is great because you can explicitly add questions or queries that should lead to a document being recovered, giving you more control.
## Smaller chunks
Often times it can be useful to retrieve larger chunks of information, but embed smaller chunks.
This allows for embeddings to capture the semantic meaning as closely as possible, but for as much context as possible to be passed downstream.
NOTE: this is what the ParentDocumentRetriever does. Here we show what is going on under the hood.
import IntegrationInstallTooltip from "@mdx_components/integration_install_tooltip.mdx";
<IntegrationInstallTooltip></IntegrationInstallTooltip>
```bash npm2yarn
npm install @langchain/openai @langchain/community @langchain/core
```
import CodeBlock from "@theme/CodeBlock";
import SmallChunksExample from "@examples/retrievers/multi_vector_small_chunks.ts";
<CodeBlock language="typescript">{SmallChunksExample}</CodeBlock>
## Summary
Oftentimes a summary may be able to distill more accurately what a chunk is about, leading to better retrieval.
Here we show how to create summaries, and then embed those.
import SummaryExample from "@examples/retrievers/multi_vector_summary.ts";
<CodeBlock language="typescript">{SummaryExample}</CodeBlock>
## Hypothetical queries
An LLM can also be used to generate a list of hypothetical questions that could be asked of a particular document.
These questions can then be embedded and used to retrieve the original document:
import HypotheticalExample from "@examples/retrievers/multi_vector_hypothetical.ts";
<CodeBlock language="typescript">{HypotheticalExample}</CodeBlock>
## Next steps
You've now learned a few ways to generate multiple embeddings per document.
Next, check out the individual sections for deeper dives on specific retrievers, the [broader tutorial on RAG](/docs/tutorials/rag), or this section to learn how to
[create your own custom retriever over any data source](/docs/how_to/custom_retriever/).
| |
145446
|
# How to init any model in one line
import CodeBlock from "@theme/CodeBlock";
Many LLM applications let end users specify what model provider and model they want the application to be powered by.
This requires writing some logic to initialize different ChatModels based on some user configuration.
The `initChatModel()` helper method makes it easy to initialize a number of different model integrations without having to worry about import paths and class names.
Keep in mind this feature is only for chat models.
:::info Prerequisites
This guide assumes familiarity with the following concepts:
- [Chat models](/docs/concepts/#chat-models)
- [LangChain Expression Language (LCEL)](/docs/concepts#langchain-expression-language)
- [Tool calling](/docs/concepts#tools)
:::
:::caution Compatibility
**This feature is only intended to be used in Node environments. Use in non Node environments or with bundlers is not guaranteed to work and not officially supported.**
`initChatModel` requires `langchain>=0.2.11`. See [this guide](/docs/how_to/installation/#installing-integration-packages) for some considerations to take when upgrading.
See the [initChatModel()](https://api.js.langchain.com/functions/langchain.chat_models_universal.initChatModel.html) API reference for a full list of supported integrations.
Make sure you have the integration packages installed for any model providers you want to support. E.g. you should have `@langchain/openai` installed to init an OpenAI model.
:::
## Basic usage
import BasicExample from "@examples/models/chat/configurable/basic.ts";
<CodeBlock language="typescript">{BasicExample}</CodeBlock>
## Inferring model provider
For common and distinct model names `initChatModel()` will attempt to infer the model provider.
See the [API reference](https://api.js.langchain.com/functions/langchain.chat_models_universal.initChatModel.html) for a full list of inference behavior.
E.g. any model that starts with `gpt-3...` or `gpt-4...` will be inferred as using model provider `openai`.
import InferringProviderExample from "@examples/models/chat/configurable/inferring_model_provider.ts";
<CodeBlock language="typescript">{InferringProviderExample}</CodeBlock>
## Creating a configurable model
You can also create a runtime-configurable model by specifying `configurableFields`.
If you don't specify a `model` value, then "model" and "modelProvider" be configurable by default.
import ConfigurableModelExample from "@examples/models/chat/configurable/configurable_model.ts";
<CodeBlock language="typescript">{ConfigurableModelExample}</CodeBlock>
### Configurable model with default values
We can create a configurable model with default model values, specify which parameters are configurable, and add prefixes to configurable params:
import ConfigurableModelWithDefaultsExample from "@examples/models/chat/configurable/configurable_model_with_defaults.ts";
<CodeBlock language="typescript">
{ConfigurableModelWithDefaultsExample}
</CodeBlock>
### Using a configurable model declaratively
We can call declarative operations like `bindTools`, `withStructuredOutput`, `withConfig`, etc. on a configurable model and chain a configurable model in the same way that we would a regularly instantiated chat model object.
import ConfigurableModelDeclarativelyExample from "@examples/models/chat/configurable/configurable_model_declaratively.ts";
<CodeBlock language="typescript">
{ConfigurableModelDeclarativelyExample}
</CodeBlock>
| |
145455
|
{
"cells": [
{
"cell_type": "raw",
"id": "04171ad7",
"metadata": {
"vscode": {
"languageId": "raw"
}
},
"source": [
"---\n",
"keywords: [custom tool, custom tools]\n",
"---"
]
},
{
"cell_type": "markdown",
"id": "5436020b",
"metadata": {},
"source": [
"# How to create Tools\n",
"\n",
":::info Prerequisites\n",
"\n",
"This guide assumes familiarity with the following concepts:\n",
"\n",
"- [LangChain tools](/docs/concepts#tools)\n",
"- [Agents](/docs/concepts/#agents)\n",
"\n",
":::\n",
"\n",
"When constructing your own agent, you will need to provide it with a list of Tools that it can use. While LangChain includes some prebuilt tools, it can often be more useful to use tools that use custom logic. This guide will walk you through some ways you can create custom tools.\n",
"\n",
"The biggest difference here is that the first function requires an object with multiple input fields, while the second one only accepts an object with a single field. Some older agents only work with functions that require single inputs, so it's important to understand the distinction.\n",
"\n",
"LangChain has a handful of ways to construct tools for different applications. Below I'll show the two most common ways to create tools, and where you might use each."
]
},
{
"cell_type": "markdown",
"id": "82bb159d",
"metadata": {},
"source": [
"## Tool schema\n",
"\n",
"```{=mdx}\n",
":::caution Compatibility\n",
"Only available in `@langchain/core` version 0.2.19 and above.\n",
":::\n",
"```\n",
"\n",
"The simplest way to create a tool is through the [`StructuredToolParams`](https://api.js.langchain.com/interfaces/_langchain_core.tools.StructuredToolParams.html) schema. Every chat model which supports tool calling in LangChain accepts binding tools to the model through this schema. This schema has only three fields\n",
"\n",
"- `name` - The name of the tool.\n",
"- `schema` - The schema of the tool, defined with a Zod object.\n",
"- `description` (optional) - A description of the tool.\n",
"\n",
"This schema does not include a function to pair with the tool, and for this reason it should only be used in situations where the generated output does not need to be passed as the input argument to a function."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "4d129789",
"metadata": {},
"outputs": [],
"source": [
"import { z } from \"zod\";\n",
"import { StructuredToolParams } from \"@langchain/core/tools\";\n",
"\n",
"const simpleToolSchema: StructuredToolParams = {\n",
" name: \"get_current_weather\",\n",
" description: \"Get the current weather for a location\",\n",
" schema: z.object({\n",
" city: z.string().describe(\"The city to get the weather for\"),\n",
" state: z.string().optional().describe(\"The state to get the weather for\"),\n",
" })\n",
"}"
]
},
{
"cell_type": "markdown",
"id": "f6ec6ee8",
"metadata": {},
"source": [
"## `tool` function\n",
"\n",
"```{=mdx}\n",
":::caution Compatibility\n",
"Only available in `@langchain/core` version 0.2.7 and above.\n",
":::\n",
"```\n",
"\n",
"The [`tool`](https://api.js.langchain.com/classes/langchain_core.tools.Tool.html) wrapper function is a convenience method for turning a JavaScript function into a tool. It requires the function itself along with some additional arguments that define your tool. You should use this over `StructuredToolParams` tools when the resulting tool call executes a function. The most important are:\n",
"\n",
"- The tool's `name`, which the LLM will use as context as well as to reference the tool\n",
"- An optional, but recommended `description`, which the LLM will use as context to know when to use the tool\n",
"- A `schema`, which defines the shape of the tool's input\n",
"\n",
"The `tool` function will return an instance of the [`StructuredTool`](https://api.js.langchain.com/classes/langchain_core.tools.StructuredTool.html) class, so it is compatible with all the existing tool calling infrastructure in the LangChain library."
]
},
{
"cell_type": "code",
"execution_count": 1,
"id": "ecc1ce9d",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"\u001b[32m\"The sum of 1 and 2 is 3\"\u001b[39m"
]
},
"execution_count": 1,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"import { z } from \"zod\";\n",
"import { tool } from \"@langchain/core/tools\";\n",
"\n",
"const adderSchema = z.object({\n",
" a: z.number(),\n",
" b: z.number(),\n",
"});\n",
"const adderTool = tool(async (input): Promise<string> => {\n",
" const sum = input.a + input.b;\n",
" return `The sum of ${input.a} and ${input.b} is ${sum}`;\n",
"}, {\n",
" name: \"adder\",\n",
" description: \"Adds two numbers together\",\n",
" schema: adderSchema,\n",
"});\n",
"\n",
"await adderTool.invoke({ a: 1, b: 2 });"
]
},
{
"cell_type": "markdown",
"id": "213ee344",
"metadata": {},
"source": [
"## `DynamicStructuredTool`\n",
"\n",
"You can also use the [`DynamicStructuredTool`](https://api.js.langchain.com/classes/langchain_core.tools.DynamicStructuredTool.html) class to declare tools. Here's an example - note that tools must always return strings!"
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "833dda4a",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"\u001b[32m\"72\"\u001b[39m"
]
},
"execution_count": 2,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"import { DynamicStructuredTool } from \"@langchain/core/tools\";\n",
"import { z } from \"zod\";\n",
"\n",
"const multiplyTool = new DynamicStructuredTool({\n",
" name: \"multiply\",\n",
" description: \"multiply two numbers together\",\n",
" schema: z.object({\n",
" a: z.number().describe(\"the first number to multiply\"),\n",
" b: z.number().describe(\"the second number to multiply\"),\n",
" }),\n",
" func: async ({ a, b }: { a: number; b: number; }) => {\n",
" return (a * b).toString();\n",
" },\n",
"});\n",
"\n",
"await multiplyTool.invoke({ a: 8, b: 9, });"
]
},
{
"cell_type": "markdown",
"id": "c7326b23",
"metadata": {},
"source": [
"## `DynamicTool`\n",
"\n",
"For older agents that require tools which accept only a single input, you can pass the relevant parameters to the [`DynamicTool`](https://api.js.langchain.com/classes/langchain_core.tools.DynamicTool.html) class. This is useful when working with older agents that only support tools that accept a single input. In this case, no schema is required:"
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "b0ce7de8",
"metadata": {},
"outputs": [
{
| |
145469
|
{
"cells": [
{
"cell_type": "raw",
"id": "beba2e0e",
"metadata": {},
"source": [
"---\n",
"sidebar_position: 2\n",
"---"
]
},
{
"cell_type": "markdown",
"id": "bb0735c0",
"metadata": {},
"source": [
"# How to use few shot examples in chat models\n",
"\n",
"This guide covers how to prompt a chat model with example inputs and outputs. Providing the model with a few such examples is called few-shotting, and is a simple yet powerful way to guide generation and in some cases drastically improve model performance.\n",
"\n",
"There does not appear to be solid consensus on how best to do few-shot prompting, and the optimal prompt compilation will likely vary by model. Because of this, we provide few-shot prompt templates like the [FewShotChatMessagePromptTemplate](https://api.js.langchain.com/classes/langchain_core.prompts.FewShotChatMessagePromptTemplate.html) as a flexible starting point, and you can modify or replace them as you see fit.\n",
"\n",
"The goal of few-shot prompt templates are to dynamically select examples based on an input, and then format the examples in a final prompt to provide for the model.\n",
"\n",
"**Note:** The following code examples are for chat models only, since `FewShotChatMessagePromptTemplates` are designed to output formatted [chat messages](/docs/concepts/#message-types) rather than pure strings. For similar few-shot prompt examples for pure string templates compatible with completion models (LLMs), see the [few-shot prompt templates](/docs/how_to/few_shot_examples/) guide.\n",
"\n",
":::info Prerequisites\n",
"\n",
"This guide assumes familiarity with the following concepts:\n",
"\n",
"- [Prompt templates](/docs/concepts/#prompt-templates)\n",
"- [Example selectors](/docs/concepts/#example-selectors)\n",
"- [Chat models](/docs/concepts/#chat-model)\n",
"- [Vectorstores](/docs/concepts/#vectorstores)\n",
"\n",
":::"
]
},
{
"cell_type": "markdown",
"id": "d716f2de-cc29-4823-9360-a808c7bfdb86",
"metadata": {
"tags": []
},
"source": [
"## Fixed Examples\n",
"\n",
"The most basic (and common) few-shot prompting technique is to use fixed prompt examples. This way you can select a chain, evaluate it, and avoid worrying about additional moving parts in production.\n",
"\n",
"The basic components of the template are:\n",
"- `examples`: An array of object examples to include in the final prompt.\n",
"- `examplePrompt`: converts each example into 1 or more messages through its [`formatMessages`](https://api.js.langchain.com/classes/langchain_core.prompts.FewShotChatMessagePromptTemplate.html#formatMessages) method. A common example would be to convert each example into one human message and one AI message response, or a human message followed by a function call message.\n",
"\n",
"Below is a simple demonstration. First, define the examples you'd like to include:"
]
},
{
"cell_type": "code",
"execution_count": 4,
"id": "0fc5a02a-6249-4e92-95c3-30fff9671e8b",
"metadata": {
"tags": []
},
"outputs": [],
"source": [
"import {\n",
" ChatPromptTemplate,\n",
" FewShotChatMessagePromptTemplate,\n",
"} from \"@langchain/core/prompts\"\n",
"\n",
"const examples = [\n",
" { input: \"2+2\", output: \"4\" },\n",
" { input: \"2+3\", output: \"5\" },\n",
"]"
]
},
{
"cell_type": "markdown",
"id": "e8710ecc-2aa0-4172-a74c-250f6bc3d9e2",
"metadata": {},
"source": [
"Next, assemble them into the few-shot prompt template."
]
},
{
"cell_type": "code",
"execution_count": 7,
"id": "65e72ad1-9060-47d0-91a1-bc130c8b98ac",
"metadata": {
"tags": []
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"[\n",
" HumanMessage {\n",
" lc_serializable: true,\n",
" lc_kwargs: { content: \"2+2\", additional_kwargs: {}, response_metadata: {} },\n",
" lc_namespace: [ \"langchain_core\", \"messages\" ],\n",
" content: \"2+2\",\n",
" name: undefined,\n",
" additional_kwargs: {},\n",
" response_metadata: {}\n",
" },\n",
" AIMessage {\n",
" lc_serializable: true,\n",
" lc_kwargs: {\n",
" content: \"4\",\n",
" tool_calls: [],\n",
" invalid_tool_calls: [],\n",
" additional_kwargs: {},\n",
" response_metadata: {}\n",
" },\n",
" lc_namespace: [ \"langchain_core\", \"messages\" ],\n",
" content: \"4\",\n",
" name: undefined,\n",
" additional_kwargs: {},\n",
" response_metadata: {},\n",
" tool_calls: [],\n",
" invalid_tool_calls: []\n",
" },\n",
" HumanMessage {\n",
" lc_serializable: true,\n",
" lc_kwargs: { content: \"2+3\", additional_kwargs: {}, response_metadata: {} },\n",
" lc_namespace: [ \"langchain_core\", \"messages\" ],\n",
" content: \"2+3\",\n",
" name: undefined,\n",
" additional_kwargs: {},\n",
" response_metadata: {}\n",
" },\n",
" AIMessage {\n",
" lc_serializable: true,\n",
" lc_kwargs: {\n",
" content: \"5\",\n",
" tool_calls: [],\n",
" invalid_tool_calls: [],\n",
" additional_kwargs: {},\n",
" response_metadata: {}\n",
" },\n",
" lc_namespace: [ \"langchain_core\", \"messages\" ],\n",
" content: \"5\",\n",
" name: undefined,\n",
" additional_kwargs: {},\n",
" response_metadata: {},\n",
" tool_calls: [],\n",
" invalid_tool_calls: []\n",
" }\n",
"]\n"
]
}
],
"source": [
"// This is a prompt template used to format each individual example.\n",
"const examplePrompt = ChatPromptTemplate.fromMessages(\n",
" [\n",
" [\"human\", \"{input}\"],\n",
" [\"ai\", \"{output}\"],\n",
" ]\n",
")\n",
"const fewShotPrompt = new FewShotChatMessagePromptTemplate({\n",
" examplePrompt,\n",
" examples,\n",
" inputVariables: [], // no input variables\n",
"})\n",
"\n",
"const result = await fewShotPrompt.invoke({});\n",
"console.log(result.toChatMessages())"
]
},
{
"cell_type": "markdown",
"id": "5490bd59-b28f-46a4-bbdf-0191802dd3c5",
"metadata": {},
"source": [
"Finally, we assemble the final prompt as shown below, passing `fewShotPrompt` directly into the `fromMessages` factory method, and use it with a model:"
]
},
{
"cell_type": "code",
"execution_count": 8,
"id": "9f86d6d9-50de-41b6-b6c7-0f9980cc0187",
"metadata": {
"tags": []
},
"outputs": [],
"source": [
"const finalPrompt = ChatPromptTemplate.fromMessages(\n",
" [\n",
" [\"system\", \"You are a wondrous wizard of math.\"],\n",
" fewShotPrompt,\n",
| |
145475
|
# How to invoke runnables in parallel
:::info Prerequisites
This guide assumes familiarity with the following concepts:
- [LangChain Expression Language (LCEL)](/docs/concepts/#langchain-expression-language)
- [Chaining runnables](/docs/how_to/sequence/)
:::
The [`RunnableParallel`](https://api.js.langchain.com/classes/langchain_core.runnables.RunnableParallel.html) (also known as a `RunnableMap`) primitive is an object whose values are runnables (or things that can be coerced to runnables, like functions).
It runs all of its values in parallel, and each value is called with the initial input to the `RunnableParallel`. The final return value is an object with the results of each value under its appropriate key.
## Formatting with `RunnableParallels`
`RunnableParallels` are useful for parallelizing operations, but can also be useful for manipulating the output of one Runnable to match the input format of the next Runnable in a sequence. You can use them to split or fork the chain so that multiple components can process the input in parallel. Later, other components can join or merge the results to synthesize a final response. This type of chain creates a computation graph that looks like the following:
```text
Input
/ \
/ \
Branch1 Branch2
\ /
\ /
Combine
```
Below, the input to each chain in the `RunnableParallel` is expected to be an object with a key for `"topic"`.
We can satisfy that requirement by invoking our chain with an object matching that structure.
import IntegrationInstallTooltip from "@mdx_components/integration_install_tooltip.mdx";
<IntegrationInstallTooltip></IntegrationInstallTooltip>
```bash npm2yarn
npm install @langchain/anthropic @langchain/cohere @langchain/core
```
import CodeBlock from "@theme/CodeBlock";
import BasicExample from "@examples/guides/expression_language/runnable_maps_basic.ts";
<CodeBlock language="typescript">{BasicExample}</CodeBlock>
## Manipulating outputs/inputs
Maps can be useful for manipulating the output of one Runnable to match the input format of the next Runnable in a sequence.
Note below that the object within the `RunnableSequence.from()` call is automatically coerced into a runnable map. All keys of the object must
have values that are runnables or can be themselves coerced to runnables (functions to `RunnableLambda`s or objects to `RunnableMap`s).
This coercion will also occur when composing chains via the `.pipe()` method.
import SequenceExample from "@examples/guides/expression_language/runnable_maps_sequence.ts";
<CodeBlock language="typescript">{SequenceExample}</CodeBlock>
Here the input to prompt is expected to be a map with keys "context" and "question". The user input is just the question. So we need to get the context using our retriever and passthrough the user input under the "question" key.
## Next steps
You now know some ways to format and parallelize chain steps with `RunnableParallel`.
Next, you might be interested in [using custom logic](/docs/how_to/functions/) in your chains.
| |
145476
|
{
"cells": [
{
"cell_type": "markdown",
"id": "72b1b316",
"metadata": {},
"source": [
"# How to parse JSON output\n",
"\n",
"While some model providers support [built-in ways to return structured output](/docs/how_to/structured_output), not all do. We can use an output parser to help users to specify an arbitrary JSON schema via the prompt, query a model for outputs that conform to that schema, and finally parse that schema as JSON.\n",
"\n",
":::{.callout-note}\n",
"Keep in mind that large language models are leaky abstractions! You'll have to use an LLM with sufficient capacity to generate well-formed JSON.\n",
":::\n",
"\n",
":::info Prerequisites\n",
"\n",
"This guide assumes familiarity with the following concepts:\n",
"\n",
"- [Chat models](/docs/concepts/#chat-models)\n",
"- [Output parsers](/docs/concepts/#output-parsers)\n",
"- [Prompt templates](/docs/concepts/#prompt-templates)\n",
"- [Structured output](/docs/how_to/structured_output)\n",
"- [Chaining runnables together](/docs/how_to/sequence/)\n",
"\n",
":::"
]
},
{
"cell_type": "markdown",
"id": "ae909b7a",
"metadata": {},
"source": [
"The [`JsonOutputParser`](https://api.js.langchain.com/classes/langchain_core.output_parsers.JsonOutputParser.html) is one built-in option for prompting for and then parsing JSON output."
]
},
{
"cell_type": "markdown",
"id": "6c667607",
"metadata": {},
"source": [
"```{=mdx}\n",
"import ChatModelTabs from \"@theme/ChatModelTabs\";\n",
"\n",
"<ChatModelTabs />\n",
"```"
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "4ccf45a3",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"{\n",
" setup: \u001b[32m\"Why don't scientists trust atoms?\"\u001b[39m,\n",
" punchline: \u001b[32m\"Because they make up everything!\"\u001b[39m\n",
"}"
]
},
"execution_count": 2,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"import { ChatOpenAI } from \"@langchain/openai\";\n",
"const model = new ChatOpenAI({\n",
" model: \"gpt-4o\",\n",
" temperature: 0,\n",
"})\n",
"\n",
"import { JsonOutputParser } from \"@langchain/core/output_parsers\"\n",
"import { ChatPromptTemplate } from \"@langchain/core/prompts\"\n",
"\n",
"// Define your desired data structure. Only used for typing the parser output.\n",
"interface Joke {\n",
" setup: string\n",
" punchline: string\n",
"}\n",
"\n",
"// A query and format instructions used to prompt a language model.\n",
"const jokeQuery = \"Tell me a joke.\";\n",
"const formatInstructions = \"Respond with a valid JSON object, containing two fields: 'setup' and 'punchline'.\"\n",
"\n",
"// Set up a parser + inject instructions into the prompt template.\n",
"const parser = new JsonOutputParser<Joke>()\n",
"\n",
"const prompt = ChatPromptTemplate.fromTemplate(\n",
" \"Answer the user query.\\n{format_instructions}\\n{query}\\n\"\n",
");\n",
"\n",
"const partialedPrompt = await prompt.partial({\n",
" format_instructions: formatInstructions\n",
"});\n",
"\n",
"const chain = partialedPrompt.pipe(model).pipe(parser);\n",
"\n",
"await chain.invoke({ query: jokeQuery });"
]
},
{
"cell_type": "markdown",
"id": "37d801be",
"metadata": {},
"source": [
"## Streaming\n",
"\n",
"The `JsonOutputParser` also supports streaming partial chunks. This is useful when the model returns partial JSON output in multiple chunks. The parser will keep track of the partial chunks and return the final JSON output when the model finishes generating the output."
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "0309256d",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"{}\n",
"{ setup: \"\" }\n",
"{ setup: \"Why\" }\n",
"{ setup: \"Why don't\" }\n",
"{ setup: \"Why don't scientists\" }\n",
"{ setup: \"Why don't scientists trust\" }\n",
"{ setup: \"Why don't scientists trust atoms\" }\n",
"{ setup: \"Why don't scientists trust atoms?\", punchline: \"\" }\n",
"{ setup: \"Why don't scientists trust atoms?\", punchline: \"Because\" }\n",
"{\n",
" setup: \"Why don't scientists trust atoms?\",\n",
" punchline: \"Because they\"\n",
"}\n",
"{\n",
" setup: \"Why don't scientists trust atoms?\",\n",
" punchline: \"Because they make\"\n",
"}\n",
"{\n",
" setup: \"Why don't scientists trust atoms?\",\n",
" punchline: \"Because they make up\"\n",
"}\n",
"{\n",
" setup: \"Why don't scientists trust atoms?\",\n",
" punchline: \"Because they make up everything\"\n",
"}\n",
"{\n",
" setup: \"Why don't scientists trust atoms?\",\n",
" punchline: \"Because they make up everything!\"\n",
"}\n"
]
}
],
"source": [
"for await (const s of await chain.stream({ query: jokeQuery })) {\n",
" console.log(s)\n",
"}"
]
},
{
"cell_type": "markdown",
"id": "1eefe12b",
"metadata": {},
"source": [
"## Next steps\n",
"\n",
"You've now learned one way to prompt a model to return structured JSON. Next, check out the [broader guide on obtaining structured output](/docs/how_to/structured_output) for other techniques."
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Deno",
"language": "typescript",
"name": "deno"
},
"language_info": {
"file_extension": ".ts",
"mimetype": "text/x.typescript",
"name": "typescript",
"nb_converter": "script",
"pygments_lexer": "typescript",
"version": "5.3.3"
}
},
"nbformat": 4,
"nbformat_minor": 5
}
| |
145477
|
---
keywords: [similaritySearchWithScore]
---
# How to create and query vector stores
:::info
Head to [Integrations](/docs/integrations/vectorstores) for documentation on built-in integrations with vectorstore providers.
:::
:::info Prerequisites
This guide assumes familiarity with the following concepts:
- [Vector stores](/docs/concepts/#vectorstores)
- [Embeddings](/docs/concepts/#embedding-models)
- [Document loaders](/docs/concepts#document-loaders)
:::
One of the most common ways to store and search over unstructured data is to embed it and store the resulting embedding
vectors, and then at query time to embed the unstructured query and retrieve the embedding vectors that are
'most similar' to the embedded query. A vector store takes care of storing embedded data and performing vector search
for you.
This walkthrough uses a basic, unoptimized implementation called [`MemoryVectorStore`](https://api.js.langchain.com/classes/langchain.vectorstores_memory.MemoryVectorStore.html) that stores embeddings in-memory and does an exact, linear search for the most similar embeddings.
LangChain contains many built-in integrations - see [this section](/docs/how_to/vectorstores/#which-one-to-pick) for more, or the [full list of integrations](/docs/integrations/vectorstores/).
## Creating a new index
Most of the time, you'll need to load and prepare the data you want to search over. Here's an example that loads a recent speech from a file:
import ExampleLoader from "@examples/indexes/vector_stores/memory_fromdocs.ts";
<CodeBlock language="typescript">{ExampleLoader}</CodeBlock>
Most of the time, you'll need to split the loaded text as a preparation step. See [this section](/docs/concepts/#text-splitters) to learn more about text splitters.
## Creating a new index from texts
If you have already prepared the data you want to search over, you can initialize a vector store directly from text chunks:
import CodeBlock from "@theme/CodeBlock";
import ExampleTexts from "@examples/indexes/vector_stores/memory.ts";
import IntegrationInstallTooltip from "@mdx_components/integration_install_tooltip.mdx";
<IntegrationInstallTooltip></IntegrationInstallTooltip>
```bash npm2yarn
npm install @langchain/openai @langchain/core
```
<CodeBlock language="typescript">{ExampleTexts}</CodeBlock>
## Which one to pick?
Here's a quick guide to help you pick the right vector store for your use case:
- If you're after something that can just run inside your Node.js application, in-memory, without any other servers to stand up, then go for [HNSWLib](/docs/integrations/vectorstores/hnswlib), [Faiss](/docs/integrations/vectorstores/faiss), [LanceDB](/docs/integrations/vectorstores/lancedb) or [CloseVector](/docs/integrations/vectorstores/closevector)
- If you're looking for something that can run in-memory in browser-like environments, then go for [MemoryVectorStore](/docs/integrations/vectorstores/memory) or [CloseVector](/docs/integrations/vectorstores/closevector)
- If you come from Python and you were looking for something similar to FAISS, try [HNSWLib](/docs/integrations/vectorstores/hnswlib) or [Faiss](/docs/integrations/vectorstores/faiss)
- If you're looking for an open-source full-featured vector database that you can run locally in a docker container, then go for [Chroma](/docs/integrations/vectorstores/chroma)
- If you're looking for an open-source vector database that offers low-latency, local embedding of documents and supports apps on the edge, then go for [Zep](/docs/integrations/vectorstores/zep)
- If you're looking for an open-source production-ready vector database that you can run locally (in a docker container) or hosted in the cloud, then go for [Weaviate](/docs/integrations/vectorstores/weaviate).
- If you're using Supabase already then look at the [Supabase](/docs/integrations/vectorstores/supabase) vector store to use the same Postgres database for your embeddings too
- If you're looking for a production-ready vector store you don't have to worry about hosting yourself, then go for [Pinecone](/docs/integrations/vectorstores/pinecone)
- If you are already utilizing SingleStore, or if you find yourself in need of a distributed, high-performance database, you might want to consider the [SingleStore](/docs/integrations/vectorstores/singlestore) vector store.
- If you are looking for an online MPP (Massively Parallel Processing) data warehousing service, you might want to consider the [AnalyticDB](/docs/integrations/vectorstores/analyticdb) vector store.
- If you're in search of a cost-effective vector database that allows run vector search with SQL, look no further than [MyScale](/docs/integrations/vectorstores/myscale).
- If you're in search of a vector database that you can load from both the browser and server side, check out [CloseVector](/docs/integrations/vectorstores/closevector). It's a vector database that aims to be cross-platform.
- If you're looking for a scalable, open-source columnar database with excellent performance for analytical queries, then consider [ClickHouse](/docs/integrations/vectorstores/clickhouse).
## Next steps
You've now learned how to load data into a vectorstore.
Next, check out the [full tutorial on retrieval-augmented generation](/docs/tutorials/rag).
| |
145479
|
"data": {
"text/plain": [
"{\n",
" setup: \u001b[32m\"Why don't cats play poker in the jungle?\"\u001b[39m,\n",
" punchline: \u001b[32m\"Too many cheetahs!\"\u001b[39m\n",
"}"
]
},
"execution_count": 6,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"const structuredLlm = model.withStructuredOutput(joke, {\n",
" method: \"json_mode\",\n",
" name: \"joke\",\n",
"})\n",
"\n",
"await structuredLlm.invoke(\n",
" \"Tell me a joke about cats, respond in JSON with `setup` and `punchline` keys\"\n",
")"
]
},
{
"cell_type": "markdown",
"id": "56278a82",
"metadata": {},
"source": [
"In the above example, we use OpenAI's alternate JSON mode capability along with a more specific prompt.\n",
"\n",
"For specifics about the model you choose, peruse its entry in the [API reference pages](https://api.js.langchain.com/).\n",
"\n",
"### (Advanced) Raw outputs\n",
"\n",
"LLMs aren't perfect at generating structured output, especially as schemas become complex. You can avoid raising exceptions and handle the raw output yourself by passing `includeRaw: true`. This changes the output format to contain the raw message output and the `parsed` value (if successful):"
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "46b616a4",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"{\n",
" raw: AIMessage {\n",
" lc_serializable: \u001b[33mtrue\u001b[39m,\n",
" lc_kwargs: {\n",
" content: \u001b[32m\"\"\u001b[39m,\n",
" tool_calls: [\n",
" {\n",
" name: \u001b[32m\"joke\"\u001b[39m,\n",
" args: \u001b[36m[Object]\u001b[39m,\n",
" id: \u001b[32m\"call_0pEdltlfSXjq20RaBFKSQOeF\"\u001b[39m\n",
" }\n",
" ],\n",
" invalid_tool_calls: [],\n",
" additional_kwargs: { function_call: \u001b[90mundefined\u001b[39m, tool_calls: [ \u001b[36m[Object]\u001b[39m ] },\n",
" response_metadata: {}\n",
" },\n",
" lc_namespace: [ \u001b[32m\"langchain_core\"\u001b[39m, \u001b[32m\"messages\"\u001b[39m ],\n",
" content: \u001b[32m\"\"\u001b[39m,\n",
" name: \u001b[90mundefined\u001b[39m,\n",
" additional_kwargs: {\n",
" function_call: \u001b[90mundefined\u001b[39m,\n",
" tool_calls: [\n",
" {\n",
" id: \u001b[32m\"call_0pEdltlfSXjq20RaBFKSQOeF\"\u001b[39m,\n",
" type: \u001b[32m\"function\"\u001b[39m,\n",
" function: \u001b[36m[Object]\u001b[39m\n",
" }\n",
" ]\n",
" },\n",
" response_metadata: {\n",
" tokenUsage: { completionTokens: \u001b[33m33\u001b[39m, promptTokens: \u001b[33m88\u001b[39m, totalTokens: \u001b[33m121\u001b[39m },\n",
" finish_reason: \u001b[32m\"stop\"\u001b[39m\n",
" },\n",
" tool_calls: [\n",
" {\n",
" name: \u001b[32m\"joke\"\u001b[39m,\n",
" args: {\n",
" setup: \u001b[32m\"Why was the cat sitting on the computer?\"\u001b[39m,\n",
" punchline: \u001b[32m\"Because it wanted to keep an eye on the mouse!\"\u001b[39m,\n",
" rating: \u001b[33m7\u001b[39m\n",
" },\n",
" id: \u001b[32m\"call_0pEdltlfSXjq20RaBFKSQOeF\"\u001b[39m\n",
" }\n",
" ],\n",
" invalid_tool_calls: [],\n",
" usage_metadata: { input_tokens: \u001b[33m88\u001b[39m, output_tokens: \u001b[33m33\u001b[39m, total_tokens: \u001b[33m121\u001b[39m }\n",
" },\n",
" parsed: {\n",
" setup: \u001b[32m\"Why was the cat sitting on the computer?\"\u001b[39m,\n",
" punchline: \u001b[32m\"Because it wanted to keep an eye on the mouse!\"\u001b[39m,\n",
" rating: \u001b[33m7\u001b[39m\n",
" }\n",
"}"
]
},
"execution_count": 2,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"const joke = z.object({\n",
" setup: z.string().describe(\"The setup of the joke\"),\n",
" punchline: z.string().describe(\"The punchline to the joke\"),\n",
" rating: z.number().optional().describe(\"How funny the joke is, from 1 to 10\"),\n",
"});\n",
"\n",
"const structuredLlm = model.withStructuredOutput(joke, { includeRaw: true, name: \"joke\" });\n",
"\n",
"await structuredLlm.invoke(\"Tell me a joke about cats\");"
]
},
{
"cell_type": "markdown",
"id": "5e92a98a",
"metadata": {},
"source": [
"## Prompting techniques\n",
"\n",
"You can also prompt models to outputting information in a given format. This approach relies on designing good prompts and then parsing the output of the models. This is the only option for models that don't support `.with_structured_output()` or other built-in approaches.\n",
"\n",
"### Using `JsonOutputParser`\n",
"\n",
"The following example uses the built-in [`JsonOutputParser`](https://api.js.langchain.com/classes/langchain_core.output_parsers.JsonOutputParser.html) to parse the output of a chat model prompted to match a the given JSON schema. Note that we are adding `format_instructions` directly to the prompt from a method on the parser:"
]
},
{
"cell_type": "code",
"execution_count": 7,
"id": "6e514455",
"metadata": {},
"outputs": [],
"source": [
"import { JsonOutputParser } from \"@langchain/core/output_parsers\";\n",
"import { ChatPromptTemplate } from \"@langchain/core/prompts\";\n",
"\n",
"type Person = {\n",
" name: string;\n",
" height_in_meters: number;\n",
"};\n",
"\n",
"type People = {\n",
" people: Person[];\n",
"};\n",
"\n",
"const formatInstructions = `Respond only in valid JSON. The JSON object you return should match the following schema:\n",
"{{ people: [{{ name: \"string\", height_in_meters: \"number\" }}] }}\n",
"\n",
| |
145480
|
"Where people is an array of objects, each with a name and height_in_meters field.\n",
"`\n",
"\n",
"// Set up a parser\n",
"const parser = new JsonOutputParser<People>();\n",
"\n",
"// Prompt\n",
"const prompt = await ChatPromptTemplate.fromMessages(\n",
" [\n",
" [\n",
" \"system\",\n",
" \"Answer the user query. Wrap the output in `json` tags\\n{format_instructions}\",\n",
" ],\n",
" [\n",
" \"human\",\n",
" \"{query}\",\n",
" ]\n",
" ]\n",
").partial({\n",
" format_instructions: formatInstructions,\n",
"})"
]
},
{
"cell_type": "markdown",
"id": "082fa166",
"metadata": {},
"source": [
"Let’s take a look at what information is sent to the model:"
]
},
{
"cell_type": "code",
"execution_count": 8,
"id": "3d73d33d",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"System: Answer the user query. Wrap the output in `json` tags\n",
"Respond only in valid JSON. The JSON object you return should match the following schema:\n",
"{{ people: [{{ name: \"string\", height_in_meters: \"number\" }}] }}\n",
"\n",
"Where people is an array of objects, each with a name and height_in_meters field.\n",
"\n",
"Human: Anna is 23 years old and she is 6 feet tall\n"
]
}
],
"source": [
"const query = \"Anna is 23 years old and she is 6 feet tall\"\n",
"\n",
"console.log((await prompt.format({ query })).toString())"
]
},
{
"cell_type": "markdown",
"id": "081956b9",
"metadata": {},
"source": [
"And now let's invoke it:"
]
},
{
"cell_type": "code",
"execution_count": 9,
"id": "8d6b3d17",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"{ people: [ { name: \u001b[32m\"Anna\"\u001b[39m, height_in_meters: \u001b[33m1.83\u001b[39m } ] }"
]
},
"execution_count": 9,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"const chain = prompt.pipe(model).pipe(parser);\n",
"\n",
"await chain.invoke({ query })"
]
},
{
"cell_type": "markdown",
"id": "6732dd87",
"metadata": {},
"source": [
"For a deeper dive into using output parsers with prompting techniques for structured output, see [this guide](/docs/how_to/output_parser_structured).\n",
"\n",
"### Custom Parsing\n",
"\n",
"You can also create a custom prompt and parser with [LangChain Expression Language (LCEL)](/docs/concepts/#langchain-expression-language), using a plain function to parse the output from the model:"
]
},
{
"cell_type": "code",
"execution_count": 10,
"id": "525721b3",
"metadata": {},
"outputs": [],
"source": [
"import { AIMessage } from \"@langchain/core/messages\";\n",
"import { ChatPromptTemplate } from \"@langchain/core/prompts\";\n",
"\n",
"type Person = {\n",
" name: string;\n",
" height_in_meters: number;\n",
"};\n",
"\n",
"type People = {\n",
" people: Person[];\n",
"};\n",
"\n",
"const schema = `{{ people: [{{ name: \"string\", height_in_meters: \"number\" }}] }}`\n",
"\n",
"// Prompt\n",
"const prompt = await ChatPromptTemplate.fromMessages(\n",
" [\n",
" [\n",
" \"system\",\n",
" `Answer the user query. Output your answer as JSON that\n",
"matches the given schema: \\`\\`\\`json\\n{schema}\\n\\`\\`\\`.\n",
"Make sure to wrap the answer in \\`\\`\\`json and \\`\\`\\` tags`\n",
" ],\n",
" [\n",
" \"human\",\n",
" \"{query}\",\n",
" ]\n",
" ]\n",
").partial({\n",
" schema\n",
"});\n",
"\n",
"/**\n",
" * Custom extractor\n",
" * \n",
" * Extracts JSON content from a string where\n",
" * JSON is embedded between ```json and ``` tags.\n",
" */\n",
"const extractJson = (output: AIMessage): Array<People> => {\n",
" const text = output.content as string;\n",
" // Define the regular expression pattern to match JSON blocks\n",
" const pattern = /```json(.*?)```/gs;\n",
"\n",
" // Find all non-overlapping matches of the pattern in the string\n",
" const matches = text.match(pattern);\n",
"\n",
" // Process each match, attempting to parse it as JSON\n",
" try {\n",
" return matches?.map(match => {\n",
" // Remove the markdown code block syntax to isolate the JSON string\n",
" const jsonStr = match.replace(/```json|```/g, '').trim();\n",
" return JSON.parse(jsonStr);\n",
" }) ?? [];\n",
" } catch (error) {\n",
" throw new Error(`Failed to parse: ${output}`);\n",
" }\n",
"}"
]
},
{
"cell_type": "markdown",
"id": "9f1bc8f7",
"metadata": {},
"source": [
"Here is the prompt sent to the model:"
]
},
{
"cell_type": "code",
"execution_count": 11,
"id": "c8a30d0e",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"System: Answer the user query. Output your answer as JSON that\n",
"matches the given schema: ```json\n",
"{{ people: [{{ name: \"string\", height_in_meters: \"number\" }}] }}\n",
"```.\n",
"Make sure to wrap the answer in ```json and ``` tags\n",
"Human: Anna is 23 years old and she is 6 feet tall\n"
]
}
],
"source": [
"const query = \"Anna is 23 years old and she is 6 feet tall\"\n",
"\n",
"console.log((await prompt.format({ query })).toString())"
]
},
{
"cell_type": "markdown",
"id": "ec018893",
"metadata": {},
"source": [
"And here's what it looks like when we invoke it:"
]
},
{
"cell_type": "code",
"execution_count": 12,
"id": "e1e7baf6",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"[\n",
" { people: [ { name: \u001b[32m\"Anna\"\u001b[39m, height_in_meters: \u001b[33m1.83\u001b[39m } ] }\n",
"]"
]
},
"execution_count": 12,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"import { RunnableLambda } from \"@langchain/core/runnables\";\n",
"\n",
"const chain = prompt.pipe(model).pipe(new RunnableLambda({ func: extractJson }));\n",
"\n",
"await chain.invoke({ query })"
]
},
{
"cell_type": "markdown",
"id": "7a39221a",
"metadata": {},
"source": [
| |
145482
|
# How to load JSON data
> [JSON (JavaScript Object Notation)](https://en.wikipedia.org/wiki/JSON) is an open standard file format and data interchange format that uses human-readable text to store and transmit data objects consisting of attribute–value pairs and arrays (or other serializable values).
> [JSON Lines](https://jsonlines.org/) is a file format where each line is a valid JSON value.
The JSON loader uses [JSON pointer](https://github.com/janl/node-jsonpointer) to target keys in your JSON files you want to target.
### No JSON pointer example
The most simple way of using it is to specify no JSON pointer.
The loader will load all strings it finds in the JSON object.
Example JSON file:
```json
{
"texts": ["This is a sentence.", "This is another sentence."]
}
```
Example code:
```typescript
import { JSONLoader } from "langchain/document_loaders/fs/json";
const loader = new JSONLoader("src/document_loaders/example_data/example.json");
const docs = await loader.load();
/*
[
Document {
"metadata": {
"blobType": "application/json",
"line": 1,
"source": "blob",
},
"pageContent": "This is a sentence.",
},
Document {
"metadata": {
"blobType": "application/json",
"line": 2,
"source": "blob",
},
"pageContent": "This is another sentence.",
},
]
*/
```
### Using JSON pointer example
You can do a more advanced scenario by choosing which keys in your JSON object you want to extract string from.
In this example, we want to only extract information from "from" and "surname" entries.
```json
{
"1": {
"body": "BD 2023 SUMMER",
"from": "LinkedIn Job",
"labels": ["IMPORTANT", "CATEGORY_UPDATES", "INBOX"]
},
"2": {
"body": "Intern, Treasury and other roles are available",
"from": "LinkedIn Job2",
"labels": ["IMPORTANT"],
"other": {
"name": "plop",
"surname": "bob"
}
}
}
```
Example code:
```typescript
import { JSONLoader } from "langchain/document_loaders/fs/json";
const loader = new JSONLoader(
"src/document_loaders/example_data/example.json",
["/from", "/surname"]
);
const docs = await loader.load();
/*
[
Document {
pageContent: 'LinkedIn Job',
metadata: { source: './src/json/example.json', line: 1 }
},
Document {
pageContent: 'LinkedIn Job2',
metadata: { source: './src/json/example.json', line: 2 }
},
Document {
pageContent: 'bob',
metadata: { source: './src/json/example.json', line: 3 }
}
]
**/
```
| |
145483
|
---
sidebar_position: 5
---
# How to track token usage
:::info Prerequisites
This guide assumes familiarity with the following concepts:
- [Chat models](/docs/concepts/#chat-models)
:::
This notebook goes over how to track your token usage for specific calls.
## Using `AIMessage.usage_metadata`
A number of model providers return token usage information as part of the chat generation response. When available, this information will be included on the `AIMessage` objects produced by the corresponding model.
LangChain `AIMessage` objects include a [`usage_metadata`](https://api.js.langchain.com/classes/langchain_core.messages.AIMessage.html#usage_metadata) attribute for supported providers. When populated, this attribute will be an object with standard keys (e.g., "input_tokens" and "output_tokens").
#### OpenAI
import CodeBlock from "@theme/CodeBlock";
import IntegrationInstallTooltip from "@mdx_components/integration_install_tooltip.mdx";
<IntegrationInstallTooltip></IntegrationInstallTooltip>
```bash npm2yarn
npm install @langchain/openai @langchain/core
```
import UsageMetadataExample from "@examples/models/chat/usage_metadata.ts";
<CodeBlock language="typescript">{UsageMetadataExample}</CodeBlock>
#### Anthropic
```bash npm2yarn
npm install @langchain/anthropic @langchain/core
```
import UsageMetadataExampleAnthropic from "@examples/models/chat/usage_metadata_anthropic.ts";
<CodeBlock language="typescript">{UsageMetadataExampleAnthropic}</CodeBlock>
## Using `AIMessage.response_metadata`
A number of model providers return token usage information as part of the chat generation response. When available, this is included in the `AIMessage.response_metadata` field.
#### OpenAI
import Example from "@examples/models/chat/token_usage_tracking.ts";
<CodeBlock language="typescript">{Example}</CodeBlock>
#### Anthropic
import AnthropicExample from "@examples/models/chat/token_usage_tracking_anthropic.ts";
<CodeBlock language="typescript">{AnthropicExample}</CodeBlock>
## Streaming
Some providers support token count metadata in a streaming context.
#### OpenAI
For example, OpenAI will return a message chunk at the end of a stream with token usage information. This behavior is supported by `@langchain/openai` >= 0.1.0 and can be enabled by passing a `stream_options` parameter when making your call.
:::info
By default, the last message chunk in a stream will include a `finish_reason` in the message's `response_metadata` attribute. If we include token usage in streaming mode, an additional chunk containing usage metadata will be added to the end of the stream, such that `finish_reason` appears on the second to last message chunk.
:::
import OpenAIStreamTokens from "@examples/models/chat/integration_openai_stream_tokens.ts";
<CodeBlock language="typescript">{OpenAIStreamTokens}</CodeBlock>
## Using callbacks
You can also use the `handleLLMEnd` callback to get the full output from the LLM, including token usage for supported models.
Here's an example of how you could do that:
import CallbackExample from "@examples/models/chat/token_usage_tracking_callback.ts";
<CodeBlock language="typescript">{CallbackExample}</CodeBlock>
## Next steps
You've now seen a few examples of how to track chat model token usage for supported providers.
Next, check out the other how-to guides on chat models in this section, like [how to get a model to return structured output](/docs/how_to/structured_output) or [how to add caching to your chat models](/docs/how_to/chat_model_caching).
| |
145487
|
# How to do retrieval with contextual compression
:::info Prerequisites
This guide assumes familiarity with the following concepts:
- [Retrievers](/docs/concepts/#retrievers)
- [Retrieval-augmented generation (RAG)](/docs/tutorials/rag)
:::
One challenge with retrieval is that usually you don't know the specific queries your document storage system will face when you ingest data into the system. This means that the information most relevant to a query may be buried in a document with a lot of irrelevant text. Passing that full document through your application can lead to more expensive LLM calls and poorer responses.
Contextual compression is meant to fix this. The idea is simple: instead of immediately returning retrieved documents as-is, you can compress them using the context of the given query, so that only the relevant information is returned. “Compressing” here refers to both compressing the contents of an individual document and filtering out documents wholesale.
To use the Contextual Compression Retriever, you'll need:
- a base retriever
- a Document Compressor
The Contextual Compression Retriever passes queries to the base retriever, takes the initial documents and passes them through the Document Compressor. The Document Compressor takes a list of documents and shortens it by reducing the contents of documents or dropping documents altogether.
## Using a vanilla vector store retriever
Let's start by initializing a simple vector store retriever and storing the 2023 State of the Union speech (in chunks).
Given an example question, our retriever returns one or two relevant docs and a few irrelevant docs, and even the relevant docs have a lot of irrelevant information in them.
To extract all the context we can, we use an `LLMChainExtractor`, which will iterate over the initially returned documents and extract from each only the content that is relevant to the query.
import IntegrationInstallTooltip from "@mdx_components/integration_install_tooltip.mdx";
<IntegrationInstallTooltip></IntegrationInstallTooltip>
```bash npm2yarn
npm install @langchain/openai @langchain/community @langchain/core
```
import CodeBlock from "@theme/CodeBlock";
import Example from "@examples/retrievers/contextual_compression.ts";
<CodeBlock language="typescript">{Example}</CodeBlock>
## `EmbeddingsFilter`
Making an extra LLM call over each retrieved document is expensive and slow. The `EmbeddingsFilter` provides a cheaper and faster option by embedding the documents and query and only returning those documents which have sufficiently similar embeddings to the query.
This is most useful for non-vector store retrievers where we may not have control over the returned chunk size, or as part of a pipeline, outlined below.
Here's an example:
import EmbeddingsFilterExample from "@examples/retrievers/embeddings_filter.ts";
<CodeBlock language="typescript">{EmbeddingsFilterExample}</CodeBlock>
## Stringing compressors and document transformers together
Using the `DocumentCompressorPipeline` we can also easily combine multiple compressors in sequence. Along with compressors we can add BaseDocumentTransformers to our pipeline, which don't perform any contextual compression but simply perform some transformation on a set of documents.
For example `TextSplitters` can be used as document transformers to split documents into smaller pieces, and the `EmbeddingsFilter` can be used to filter out documents based on similarity of the individual chunks to the input query.
Below we create a compressor pipeline by first splitting raw webpage documents retrieved from the [Tavily web search API retriever](/docs/integrations/retrievers/tavily) into smaller chunks, then filtering based on relevance to the query.
The result is smaller chunks that are semantically similar to the input query.
This skips the need to add documents to a vector store to perform similarity search, which can be useful for one-off use cases:
import DocumentCompressorPipelineExample from "@examples/retrievers/document_compressor_pipeline.ts";
<CodeBlock language="typescript">{DocumentCompressorPipelineExample}</CodeBlock>
## Next steps
You've now learned a few ways to use contextual compression to remove bad data from your results.
See the individual sections for deeper dives on specific retrievers, the [broader tutorial on RAG](/docs/tutorials/rag), or this section to learn how to
[create your own custom retriever over any data source](/docs/how_to/custom_retriever/).
| |
145492
|
# How to load data from a directory
This covers how to load all documents in a directory.
The second argument is a map of file extensions to loader factories. Each file will be passed to the matching loader, and the resulting documents will be concatenated together.
Example folder:
```text
src/document_loaders/example_data/example/
├── example.json
├── example.jsonl
├── example.txt
└── example.csv
```
Example code:
```typescript
import { DirectoryLoader } from "langchain/document_loaders/fs/directory";
import {
JSONLoader,
JSONLinesLoader,
} from "langchain/document_loaders/fs/json";
import { TextLoader } from "langchain/document_loaders/fs/text";
import { CSVLoader } from "@langchain/community/document_loaders/fs/csv";
const loader = new DirectoryLoader(
"src/document_loaders/example_data/example",
{
".json": (path) => new JSONLoader(path, "/texts"),
".jsonl": (path) => new JSONLinesLoader(path, "/html"),
".txt": (path) => new TextLoader(path),
".csv": (path) => new CSVLoader(path, "text"),
}
);
const docs = await loader.load();
console.log({ docs });
```
| |
145500
|
{
"cells": [
{
"cell_type": "raw",
"id": "d35de667-0352-4bfb-a890-cebe7f676fe7",
"metadata": {},
"source": [
"---\n",
"sidebar_position: 5\n",
"keywords: [RunnablePassthrough, LCEL]\n",
"---"
]
},
{
"cell_type": "markdown",
"id": "b022ab74-794d-4c54-ad47-ff9549ddb9d2",
"metadata": {},
"source": [
"# How to pass through arguments from one step to the next\n",
"\n",
":::info Prerequisites\n",
"\n",
"This guide assumes familiarity with the following concepts:\n",
"\n",
"- [LangChain Expression Language (LCEL)](/docs/concepts/#langchain-expression-language)\n",
"- [Chaining runnables](/docs/how_to/sequence/)\n",
"- [Calling runnables in parallel](/docs/how_to/parallel/)\n",
"- [Custom functions](/docs/how_to/functions/)\n",
"\n",
":::\n",
"\n",
"\n",
"When composing chains with several steps, sometimes you will want to pass data from previous steps unchanged for use as input to a later step. The [`RunnablePassthrough`](https://api.js.langchain.com/classes/langchain_core.runnables.RunnablePassthrough.html) class allows you to do just this, and is typically is used in conjuction with a [RunnableParallel](/docs/how_to/parallel/) to pass data through to a later step in your constructed chains.\n",
"\n",
"Let's look at an example:"
]
},
{
"cell_type": "code",
"execution_count": 1,
"id": "03988b8d-d54c-4492-8707-1594372cf093",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"{ passed: { num: \u001b[33m1\u001b[39m }, modified: \u001b[33m2\u001b[39m }"
]
},
"execution_count": 1,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"import { RunnableParallel, RunnablePassthrough } from \"@langchain/core/runnables\";\n",
"\n",
"const runnable = RunnableParallel.from({\n",
" passed: new RunnablePassthrough<{ num: number }>(),\n",
" modified: (input: { num: number }) => input.num + 1,\n",
"});\n",
"\n",
"await runnable.invoke({ num: 1 });"
]
},
{
"cell_type": "markdown",
"id": "702c7acc-cd31-4037-9489-647df192fd7c",
"metadata": {},
"source": [
"As seen above, `passed` key was called with `RunnablePassthrough()` and so it simply passed on `{'num': 1}`. \n",
"\n",
"We also set a second key in the map with `modified`. This uses a lambda to set a single value adding 1 to the num, which resulted in `modified` key with the value of `2`."
]
},
{
"cell_type": "markdown",
"id": "15187a3b-d666-4b9b-a258-672fc51fe0e2",
"metadata": {},
"source": [
"## Retrieval Example\n",
"\n",
"In the example below, we see a more real-world use case where we use `RunnablePassthrough` along with `RunnableParallel` in a chain to properly format inputs to a prompt:\n",
"\n",
"```{=mdx}\n",
"import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n",
"import Npm2Yarn from \"@theme/Npm2Yarn\";\n",
"\n",
"<IntegrationInstallTooltip></IntegrationInstallTooltip>\n",
"\n",
"<Npm2Yarn>\n",
" @langchain/openai @langchain/core\n",
"</Npm2Yarn>\n",
"```"
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "267d1460-53c1-4fdb-b2c3-b6a1eb7fccff",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"\u001b[32m\"Harrison worked at Kensho.\"\u001b[39m"
]
},
"execution_count": 3,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"import { StringOutputParser } from \"@langchain/core/output_parsers\";\n",
"import { ChatPromptTemplate } from \"@langchain/core/prompts\";\n",
"import { RunnablePassthrough, RunnableSequence } from \"@langchain/core/runnables\";\n",
"import { ChatOpenAI, OpenAIEmbeddings } from \"@langchain/openai\";\n",
"import { MemoryVectorStore } from \"langchain/vectorstores/memory\";\n",
"\n",
"const vectorstore = await MemoryVectorStore.fromDocuments([\n",
" { pageContent: \"harrison worked at kensho\", metadata: {} }\n",
"], new OpenAIEmbeddings());\n",
"\n",
"const retriever = vectorstore.asRetriever();\n",
"\n",
"const template = `Answer the question based only on the following context:\n",
"{context}\n",
"\n",
"Question: {question}\n",
"`;\n",
"\n",
"const prompt = ChatPromptTemplate.fromTemplate(template);\n",
"\n",
"const model = new ChatOpenAI({ model: \"gpt-4o\" });\n",
"\n",
"const retrievalChain = RunnableSequence.from([\n",
" {\n",
" context: retriever.pipe((docs) => docs[0].pageContent),\n",
" question: new RunnablePassthrough()\n",
" },\n",
" prompt,\n",
" model,\n",
" new StringOutputParser(),\n",
"]);\n",
"\n",
"await retrievalChain.invoke(\"where did harrison work?\");"
]
},
{
"cell_type": "markdown",
"id": "392cd4c4-e7ed-4ab8-934d-f7a4eca55ee1",
"metadata": {},
"source": [
"Here the input to prompt is expected to be a map with keys `\"context\"` and `\"question\"`. The user input is just the question. So we need to get the context using our retriever and passthrough the user input under the `\"question\"` key. The `RunnablePassthrough` allows us to pass on the user's question to the prompt and model.\n",
"\n",
"## Next steps\n",
"\n",
"Now you've learned how to pass data through your chains to help to help format the data flowing through your chains.\n",
"\n",
"To learn more, see the other how-to guides on runnables in this section."
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Deno",
"language": "typescript",
"name": "deno"
},
"language_info": {
"file_extension": ".ts",
"mimetype": "text/x.typescript",
"name": "typescript",
"nb_converter": "script",
"pygments_lexer": "typescript",
"version": "5.3.3"
}
},
"nbformat": 4,
"nbformat_minor": 5
}
| |
145502
|
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# How to force tool calling behavior\n",
"\n",
"```{=mdx}\n",
"\n",
":::info Prerequisites\n",
"\n",
"This guide assumes familiarity with the following concepts:\n",
"- [Chat models](/docs/concepts/#chat-models)\n",
"- [LangChain Tools](/docs/concepts/#tools)\n",
"- [How to use a model to call tools](/docs/how_to/tool_calling)\n",
"\n",
":::\n",
"\n",
"```\n",
"\n",
"In order to force our LLM to select a specific tool, we can use the `tool_choice` parameter to ensure certain behavior. First, let's define our model and tools:"
]
},
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [],
"source": [
"import { tool } from '@langchain/core/tools';\n",
"import { z } from 'zod';\n",
"\n",
"const add = tool((input) => {\n",
" return `${input.a + input.b}`\n",
"}, {\n",
" name: \"add\",\n",
" description: \"Adds a and b.\",\n",
" schema: z.object({\n",
" a: z.number(),\n",
" b: z.number(),\n",
" })\n",
"})\n",
"\n",
"const multiply = tool((input) => {\n",
" return `${input.a * input.b}`\n",
"}, {\n",
" name: \"Multiply\",\n",
" description: \"Multiplies a and b.\",\n",
" schema: z.object({\n",
" a: z.number(),\n",
" b: z.number(),\n",
" })\n",
"})\n",
"\n",
"const tools = [add, multiply]"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [],
"source": [
"import { ChatOpenAI } from '@langchain/openai';\n",
"\n",
"const llm = new ChatOpenAI({\n",
" model: \"gpt-3.5-turbo\",\n",
"})"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"For example, we can force our tool to call the multiply tool by using the following code:"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"[\n",
" {\n",
" \"name\": \"Multiply\",\n",
" \"args\": {\n",
" \"a\": 2,\n",
" \"b\": 4\n",
" },\n",
" \"type\": \"tool_call\",\n",
" \"id\": \"call_d5isFbUkn17Wjr6yEtNz7dDF\"\n",
" }\n",
"]\n"
]
}
],
"source": [
"const llmForcedToMultiply = llm.bindTools(tools, {\n",
" tool_choice: \"Multiply\",\n",
"})\n",
"const multiplyResult = await llmForcedToMultiply.invoke(\"what is 2 + 4\");\n",
"console.log(JSON.stringify(multiplyResult.tool_calls, null, 2));"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Even if we pass it something that doesn't require multiplcation - it will still call the tool!"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"We can also just force our tool to select at least one of our tools by passing `\"any\"` (or for OpenAI models, the equivalent, `\"required\"`) to the `tool_choice` parameter."
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"[\n",
" {\n",
" \"name\": \"add\",\n",
" \"args\": {\n",
" \"a\": 2,\n",
" \"b\": 3\n",
" },\n",
" \"type\": \"tool_call\",\n",
" \"id\": \"call_La72g7Aj0XHG0pfPX6Dwg2vT\"\n",
" }\n",
"]\n"
]
}
],
"source": [
"const llmForcedToUseTool = llm.bindTools(tools, {\n",
" tool_choice: \"any\",\n",
"})\n",
"const anyToolResult = await llmForcedToUseTool.invoke(\"What day is today?\");\n",
"console.log(JSON.stringify(anyToolResult.tool_calls, null, 2));"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "TypeScript",
"language": "typescript",
"name": "tslab"
},
"language_info": {
"codemirror_mode": {
"mode": "typescript",
"name": "javascript",
"typescript": true
},
"file_extension": ".ts",
"mimetype": "text/typescript",
"name": "typescript",
"version": "3.7.2"
}
},
"nbformat": 4,
"nbformat_minor": 2
}
| |
145504
|
{
"cells": [
{
"cell_type": "raw",
"id": "94c3ad61",
"metadata": {},
"source": [
"---\n",
"sidebar_position: 3\n",
"---"
]
},
{
"cell_type": "markdown",
"id": "b91e03f1",
"metadata": {},
"source": [
"# How to use few shot examples\n",
"\n",
"In this guide, we'll learn how to create a simple prompt template that provides the model with example inputs and outputs when generating. Providing the LLM with a few such examples is called few-shotting, and is a simple yet powerful way to guide generation and in some cases drastically improve model performance.\n",
"\n",
"A few-shot prompt template can be constructed from either a set of examples, or from an [Example Selector](https://api.js.langchain.com/classes/langchain_core.example_selectors.BaseExampleSelector.html) class responsible for choosing a subset of examples from the defined set.\n",
"\n",
"This guide will cover few-shotting with string prompt templates. For a guide on few-shotting with chat messages for chat models, see [here](/docs/how_to/few_shot_examples_chat/).\n",
"\n",
":::info Prerequisites\n",
"\n",
"This guide assumes familiarity with the following concepts:\n",
"\n",
"- [Prompt templates](/docs/concepts/#prompt-templates)\n",
"- [Example selectors](/docs/concepts/#example-selectors)\n",
"- [LLMs](/docs/concepts/#llms)\n",
"- [Vectorstores](/docs/concepts/#vectorstores)\n",
"\n",
":::\n",
"\n",
"## Create a formatter for the few-shot examples\n",
"\n",
"Configure a formatter that will format the few-shot examples into a string. This formatter should be a `PromptTemplate` object."
]
},
{
"cell_type": "code",
"execution_count": 1,
"id": "4e70bce2",
"metadata": {},
"outputs": [],
"source": [
"import { PromptTemplate } from \"@langchain/core/prompts\";\n",
"\n",
"const examplePrompt = PromptTemplate.fromTemplate(\"Question: {question}\\n{answer}\")"
]
},
{
"cell_type": "markdown",
"id": "50846ad4",
"metadata": {},
"source": [
"## Creating the example set\n",
"\n",
"Next, we'll create a list of few-shot examples. Each example should be a dictionary representing an example input to the formatter prompt we defined above."
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "a44be840",
"metadata": {},
"outputs": [],
"source": [
"const examples = [\n",
" {\n",
" question: \"Who lived longer, Muhammad Ali or Alan Turing?\",\n",
" answer: `\n",
" Are follow up questions needed here: Yes.\n",
" Follow up: How old was Muhammad Ali when he died?\n",
" Intermediate answer: Muhammad Ali was 74 years old when he died.\n",
" Follow up: How old was Alan Turing when he died?\n",
" Intermediate answer: Alan Turing was 41 years old when he died.\n",
" So the final answer is: Muhammad Ali\n",
" `\n",
" },\n",
" {\n",
" question: \"When was the founder of craigslist born?\",\n",
" answer: `\n",
" Are follow up questions needed here: Yes.\n",
" Follow up: Who was the founder of craigslist?\n",
" Intermediate answer: Craigslist was founded by Craig Newmark.\n",
" Follow up: When was Craig Newmark born?\n",
" Intermediate answer: Craig Newmark was born on December 6, 1952.\n",
" So the final answer is: December 6, 1952\n",
" `\n",
" },\n",
" {\n",
" question: \"Who was the maternal grandfather of George Washington?\",\n",
" answer: `\n",
" Are follow up questions needed here: Yes.\n",
" Follow up: Who was the mother of George Washington?\n",
" Intermediate answer: The mother of George Washington was Mary Ball Washington.\n",
" Follow up: Who was the father of Mary Ball Washington?\n",
" Intermediate answer: The father of Mary Ball Washington was Joseph Ball.\n",
" So the final answer is: Joseph Ball\n",
" `\n",
" },\n",
" {\n",
" question: \"Are both the directors of Jaws and Casino Royale from the same country?\",\n",
" answer: `\n",
" Are follow up questions needed here: Yes.\n",
" Follow up: Who is the director of Jaws?\n",
" Intermediate Answer: The director of Jaws is Steven Spielberg.\n",
" Follow up: Where is Steven Spielberg from?\n",
" Intermediate Answer: The United States.\n",
" Follow up: Who is the director of Casino Royale?\n",
" Intermediate Answer: The director of Casino Royale is Martin Campbell.\n",
" Follow up: Where is Martin Campbell from?\n",
" Intermediate Answer: New Zealand.\n",
" So the final answer is: No\n",
" `\n",
" }\n",
" ];"
]
},
{
"cell_type": "markdown",
"id": "dad66af1",
"metadata": {},
"source": [
"### Pass the examples and formatter to `FewShotPromptTemplate`\n",
"\n",
"Finally, create a [`FewShotPromptTemplate`](https://api.js.langchain.com/classes/langchain_core.prompts.FewShotPromptTemplate.html) object. This object takes in the few-shot examples and the formatter for the few-shot examples. When this `FewShotPromptTemplate` is formatted, it formats the passed examples using the `examplePrompt`, then and adds them to the final prompt before `suffix`:"
]
},
{
"cell_type": "code",
"execution_count": 7,
"id": "e76fa1ba",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"\n",
"Question: Who lived longer, Muhammad Ali or Alan Turing?\n",
"\n",
" Are follow up questions needed here: Yes.\n",
" Follow up: How old was Muhammad Ali when he died?\n",
" Intermediate answer: Muhammad Ali was 74 years old when he died.\n",
" Follow up: How old was Alan Turing when he died?\n",
" Intermediate answer: Alan Turing was 41 years old when he died.\n",
" So the final answer is: Muhammad Ali\n",
" \n",
"\n",
"Question: When was the founder of craigslist born?\n",
"\n",
" Are follow up questions needed here: Yes.\n",
" Follow up: Who was the founder of craigslist?\n",
" Intermediate answer: Craigslist was founded by Craig Newmark.\n",
" Follow up: When was Craig Newmark born?\n",
" Intermediate answer: Craig Newmark was born on December 6, 1952.\n",
" So the final answer is: December 6, 1952\n",
" \n",
"\n",
"Question: Who was the maternal grandfather of George Washington?\n",
"\n",
" Are follow up questions needed here: Yes.\n",
" Follow up: Who was the mother of George Washington?\n",
" Intermediate answer: The mother of George Washington was Mary Ball Washington.\n",
" Follow up: Who was the father of Mary Ball Washington?\n",
" Intermediate answer: The father of Mary Ball Washington was Joseph Ball.\n",
" So the final answer is: Joseph Ball\n",
" \n",
"\n",
"Question: Are both the directors of Jaws and Casino Royale from the same country?\n",
"\n",
" Are follow up questions needed here: Yes.\n",
" Follow up: Who is the director of Jaws?\n",
" Intermediate Answer: The director of Jaws is Steven Spielberg.\n",
" Follow up: Where is Steven Spielberg from?\n",
" Intermediate Answer: The United States.\n",
" Follow up: Who is the director of Casino Royale?\n",
" Intermediate Answer: The director of Casino Royale is Martin Campbell.\n",
| |
145512
|
{
"cells": [
{
"cell_type": "raw",
"metadata": {},
"source": [
"---\n",
"keywords: [RunnablePassthrough, assign, LCEL]\n",
"---"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# How to add values to a chain's state\n",
"\n",
":::info Prerequisites\n",
"\n",
"This guide assumes familiarity with the following concepts:\n",
"\n",
"- [LangChain Expression Language (LCEL)](/docs/concepts/#langchain-expression-language)\n",
"- [Chaining runnables](/docs/how_to/sequence/)\n",
"- [Calling runnables in parallel](/docs/how_to/parallel/)\n",
"- [Custom functions](/docs/how_to/functions/)\n",
"- [Passing data through](/docs/how_to/passthrough)\n",
"\n",
":::\n",
"\n",
"An alternate way of [passing data through](/docs/how_to/passthrough) steps of a chain is to leave the current values of the chain state unchanged while assigning a new value under a given key. The [`RunnablePassthrough.assign()`](https://api.js.langchain.com/classes/langchain_core.runnables.RunnablePassthrough.html#assign-2) static method takes an input value and adds the extra arguments passed to the assign function.\n",
"\n",
"This is useful in the common [LangChain Expression Language](/docs/concepts/#langchain-expression-language) pattern of additively creating a dictionary to use as input to a later step.\n",
"\n",
"Here's an example:"
]
},
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"{ extra: { num: \u001b[33m1\u001b[39m, mult: \u001b[33m3\u001b[39m, modified: \u001b[33m2\u001b[39m } }"
]
},
"execution_count": 1,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"import { RunnableParallel, RunnablePassthrough } from \"@langchain/core/runnables\";\n",
"\n",
"const runnable = RunnableParallel.from({\n",
" extra: RunnablePassthrough.assign({\n",
" mult: (input: { num: number }) => input.num * 3,\n",
" modified: (input: { num: number }) => input.num + 1\n",
" })\n",
"});\n",
"\n",
"await runnable.invoke({ num: 1 });"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Let's break down what's happening here.\n",
"\n",
"- The input to the chain is `{\"num\": 1}`. This is passed into a `RunnableParallel`, which invokes the runnables it is passed in parallel with that input.\n",
"- The value under the `extra` key is invoked. `RunnablePassthrough.assign()` keeps the original keys in the input dict (`{\"num\": 1}`), and assigns a new key called `mult`. The value is `lambda x: x[\"num\"] * 3)`, which is `3`. Thus, the result is `{\"num\": 1, \"mult\": 3}`.\n",
"- `{\"num\": 1, \"mult\": 3}` is returned to the `RunnableParallel` call, and is set as the value to the key `extra`.\n",
"- At the same time, the `modified` key is called. The result is `2`, since the lambda extracts a key called `\"num\"` from its input and adds one.\n",
"\n",
"Thus, the result is `{'extra': {'num': 1, 'mult': 3}, 'modified': 2}`.\n",
"\n",
"## Streaming\n",
"\n",
"One convenient feature of this method is that it allows values to pass through as soon as they are available. To show this off, we'll use `RunnablePassthrough.assign()` to immediately return source docs in a retrieval chain:\n",
"\n",
"```{=mdx}\n",
"import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n",
"import Npm2Yarn from \"@theme/Npm2Yarn\";\n",
"\n",
"<IntegrationInstallTooltip></IntegrationInstallTooltip>\n",
"\n",
"<Npm2Yarn>\n",
" @langchain/openai @langchain/core\n",
"</Npm2Yarn>\n",
"```"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"{ question: \"where did harrison work?\" }\n",
"{ context: \"harrison worked at kensho\" }\n",
"{ output: \"\" }\n",
"{ output: \"H\" }\n",
"{ output: \"arrison\" }\n",
"{ output: \" worked\" }\n",
"{ output: \" at\" }\n",
"{ output: \" Kens\" }\n",
"{ output: \"ho\" }\n",
"{ output: \".\" }\n",
"{ output: \"\" }\n"
]
}
],
"source": [
"import { StringOutputParser } from \"@langchain/core/output_parsers\";\n",
"import { ChatPromptTemplate } from \"@langchain/core/prompts\";\n",
"import { RunnablePassthrough, RunnableSequence } from \"@langchain/core/runnables\";\n",
"import { ChatOpenAI, OpenAIEmbeddings } from \"@langchain/openai\";\n",
"import { MemoryVectorStore } from \"langchain/vectorstores/memory\";\n",
"\n",
"const vectorstore = await MemoryVectorStore.fromDocuments([\n",
" { pageContent: \"harrison worked at kensho\", metadata: {} }\n",
"], new OpenAIEmbeddings());\n",
"\n",
"const retriever = vectorstore.asRetriever();\n",
"\n",
"const template = `Answer the question based only on the following context:\n",
"{context}\n",
"\n",
"Question: {question}\n",
"`;\n",
"\n",
"const prompt = ChatPromptTemplate.fromTemplate(template);\n",
"\n",
"const model = new ChatOpenAI({ model: \"gpt-4o\" });\n",
"\n",
"const generationChain = prompt.pipe(model).pipe(new StringOutputParser());\n",
"\n",
"const retrievalChain = RunnableSequence.from([\n",
" {\n",
" context: retriever.pipe((docs) => docs[0].pageContent),\n",
" question: new RunnablePassthrough()\n",
" },\n",
" RunnablePassthrough.assign({ output: generationChain }),\n",
"]);\n",
"\n",
"const stream = await retrievalChain.stream(\"where did harrison work?\");\n",
"\n",
"for await (const chunk of stream) {\n",
" console.log(chunk);\n",
"}"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"We can see that the first chunk contains the original `\"question\"` since that is immediately available. The second chunk contains `\"context\"` since the retriever finishes second. Finally, the output from the `generation_chain` streams in chunks as soon as it is available.\n",
"\n",
"## Next steps\n",
"\n",
"Now you've learned how to pass data through your chains to help to help format the data flowing through your chains.\n",
"\n",
"To learn more, see the other how-to guides on runnables in this section."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Deno",
"language": "typescript",
"name": "deno"
},
"language_info": {
"file_extension": ".ts",
"mimetype": "text/x.typescript",
"name": "typescript",
"nb_converter": "script",
"pygments_lexer": "typescript",
"version": "5.3.3"
}
},
"nbformat": 4,
"nbformat_minor": 4
}
| |
145513
|
# How use a vector store to retrieve data
:::info Prerequisites
This guide assumes familiarity with the following concepts:
- [Vector stores](/docs/concepts/#vectorstores)
- [Retrievers](/docs/concepts/#retrievers)
- [Text splitters](/docs/concepts#text-splitters)
- [Chaining runnables](/docs/how_to/sequence/)
:::
Vector stores can be converted into retrievers using the [`.asRetriever()`](https://api.js.langchain.com/classes/langchain_core.vectorstores.VectorStore.html#asRetriever) method, which allows you to more easily compose them in chains.
Below, we show a retrieval-augmented generation (RAG) chain that performs question answering over documents using the following steps:
1. Initialize an vector store
2. Create a retriever from that vector store
3. Compose a question answering chain
4. Ask questions!
Each of the steps has multiple sub steps and potential configurations, but we'll go through one common flow.
First, install the required dependency:
import CodeBlock from "@theme/CodeBlock";
import IntegrationInstallTooltip from "@mdx_components/integration_install_tooltip.mdx";
<IntegrationInstallTooltip></IntegrationInstallTooltip>
```bash npm2yarn
npm install @langchain/openai @langchain/core
```
You can download the `state_of_the_union.txt` file [here](https://github.com/langchain-ai/langchain/blob/master/docs/docs/modules/state_of_the_union.txt).
import RetrievalQAExample from "@examples/chains/retrieval_qa.ts";
<CodeBlock language="typescript">{RetrievalQAExample}</CodeBlock>
Let's walk through what's happening here.
1. We first load a long text and split it into smaller documents using a text splitter.
We then load those documents (which also embeds the documents using the passed `OpenAIEmbeddings` instance) into HNSWLib, our vector store, creating our index.
2. Though we can query the vector store directly, we convert the vector store into a retriever to return retrieved documents in the right format for the question answering chain.
3. We initialize a retrieval chain, which we'll call later in step 4.
4. We ask questions!
## Next steps
You've now learned how to convert a vector store as a retriever.
See the individual sections for deeper dives on specific retrievers, the [broader tutorial on RAG](/docs/tutorials/rag), or this section to learn how to
[create your own custom retriever over any data source](/docs/how_to/custom_retriever/).
| |
145522
|
"Message chunks are additive by design – one can simply add them up using the `.concat()` method to get the state of the response so far!"
]
},
{
"cell_type": "code",
"execution_count": 5,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"AIMessageChunk {\n",
" lc_serializable: true,\n",
" lc_kwargs: {\n",
" content: \"Hello! I'm a\",\n",
" additional_kwargs: {},\n",
" response_metadata: { prompt: 0, completion: 0, finish_reason: null },\n",
" tool_call_chunks: [],\n",
" id: 'chatcmpl-9lO8YUEcX7rqaxxevelHBtl1GaWoo',\n",
" tool_calls: [],\n",
" invalid_tool_calls: []\n",
" },\n",
" lc_namespace: [ 'langchain_core', 'messages' ],\n",
" content: \"Hello! I'm a\",\n",
" name: undefined,\n",
" additional_kwargs: {},\n",
" response_metadata: { prompt: 0, completion: 0, finish_reason: null },\n",
" id: 'chatcmpl-9lO8YUEcX7rqaxxevelHBtl1GaWoo',\n",
" tool_calls: [],\n",
" invalid_tool_calls: [],\n",
" tool_call_chunks: [],\n",
" usage_metadata: undefined\n",
"}\n"
]
}
],
"source": [
"let finalChunk = chunks[0];\n",
"\n",
"for (const chunk of chunks.slice(1, 5)) {\n",
" finalChunk = finalChunk.concat(chunk);\n",
"}\n",
"\n",
"finalChunk"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Chains\n",
"\n",
"Virtually all LLM applications involve more steps than just a call to a language model.\n",
"\n",
"Let’s build a simple chain using `LangChain Expression Language` (`LCEL`) that combines a prompt, model and a parser and verify that streaming works.\n",
"\n",
"We will use `StringOutputParser` to parse the output from the model. This is a simple parser that extracts the content field from an `AIMessageChunk`, giving us the `token` returned by the model.\n",
"\n",
":::{.callout-tip}\n",
"LCEL is a declarative way to specify a “program” by chainining together different LangChain primitives. Chains created using LCEL benefit from an automatic implementation of stream, allowing streaming of the final output. In fact, chains created with LCEL implement the entire standard Runnable interface.\n",
":::"
]
},
{
"cell_type": "code",
"execution_count": 6,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"|\n",
"Sure|\n",
",|\n",
" here's|\n",
" a|\n",
" joke|\n",
" for|\n",
" you|\n",
":\n",
"\n",
"|\n",
"Why|\n",
" did|\n",
" the|\n",
" par|\n",
"rot|\n",
" sit|\n",
" on|\n",
" the|\n",
" stick|\n",
"?\n",
"\n",
"|\n",
"Because|\n",
" it|\n",
" wanted|\n",
" to|\n",
" be|\n",
" a|\n",
" \"|\n",
"pol|\n",
"ly|\n",
"-stick|\n",
"-al|\n",
"\"|\n",
" observer|\n",
"!|\n",
"|\n",
"|\n"
]
}
],
"source": [
"import { StringOutputParser } from \"@langchain/core/output_parsers\";\n",
"import { ChatPromptTemplate } from \"@langchain/core/prompts\";\n",
"\n",
"const prompt = ChatPromptTemplate.fromTemplate(\"Tell me a joke about {topic}\");\n",
"\n",
"const parser = new StringOutputParser();\n",
"\n",
"const chain = prompt.pipe(model).pipe(parser);\n",
"\n",
"const stream = await chain.stream({\n",
" topic: \"parrot\",\n",
"});\n",
"\n",
"for await (const chunk of stream) {\n",
" console.log(`${chunk}|`)\n",
"}"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
":::{.callout-note}\n",
"You do not have to use the `LangChain Expression Language` to use LangChain and can instead rely on a standard **imperative** programming approach by\n",
"caling `invoke`, `batch` or `stream` on each component individually, assigning the results to variables and then using them downstream as you see fit.\n",
"\n",
"If that works for your needs, then that's fine by us 👌!\n",
":::\n",
"\n",
"### Working with Input Streams\n",
"\n",
"What if you wanted to stream JSON from the output as it was being generated?\n",
"\n",
"If you were to rely on `JSON.parse` to parse the partial json, the parsing would fail as the partial json wouldn't be valid json.\n",
"\n",
"You'd likely be at a complete loss of what to do and claim that it wasn't possible to stream JSON.\n",
"\n",
"Well, turns out there is a way to do it - the parser needs to operate on the **input stream**, and attempt to \"auto-complete\" the partial json into a valid state.\n",
"\n",
"Let's see such a parser in action to understand what this means."
]
},
{
"cell_type": "code",
"execution_count": 7,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"{\n",
" countries: [\n",
" { name: 'France', population: 67390000 },\n",
" { name: 'Spain', population: 47350000 },\n",
" { name: 'Japan', population: 125800000 }\n",
" ]\n",
"}\n"
]
}
],
"source": [
"import { JsonOutputParser } from \"@langchain/core/output_parsers\"\n",
"\n",
"const chain = model.pipe(new JsonOutputParser());\n",
"const stream = await chain.stream(\n",
" `Output a list of the countries france, spain and japan and their populations in JSON format. Use a dict with an outer key of \"countries\" which contains a list of countries. Each country should have the key \"name\" and \"population\"`\n",
");\n",
"\n",
"for await (const chunk of stream) {\n",
" console.log(chunk);\n",
"}"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Now, let's **break** streaming. We'll use the previous example and append an extraction function at the end that extracts the country names from the finalized JSON. Since this new last step is just a function call with no defined streaming behavior, the streaming output from previous steps is aggregated, then passed as a single input to the function.\n",
"\n",
":::{.callout-warning}\n",
"Any steps in the chain that operate on **finalized inputs** rather than on **input streams** can break streaming functionality via `stream`.\n",
":::\n",
"\n",
":::{.callout-tip}\n",
"Later, we will discuss the `streamEvents` API which streams results from intermediate steps. This API will stream results from intermediate steps even if the chain contains steps that only operate on **finalized inputs**.\n",
":::"
]
},
{
"cell_type": "code",
"execution_count": 9,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"[\"France\",\"Spain\",\"Japan\"]\n"
]
}
],
"source": [
| |
145537
|
{
"cells": [
{
"cell_type": "raw",
"id": "0e77c293-4049-43be-ba49-ff9daeefeee7",
"metadata": {},
"source": [
"---\n",
"sidebar_position: 4\n",
"---"
]
},
{
"cell_type": "markdown",
"id": "14d3fd06",
"metadata": {},
"source": [
"# How to do per-user retrieval\n",
"\n",
":::info Prerequisites\n",
"\n",
"This guide assumes familiarity with the following:\n",
"\n",
"- [Retrieval-augmented generation](/docs/tutorials/rag/)\n",
"\n",
":::\n",
"\n",
"When building a retrieval app, you often have to build it with multiple users in\n",
"mind. This means that you may be storing data not just for one user, but for\n",
"many different users, and they should not be able to see each other's data. This\n",
"means that you need to be able to configure your retrieval chain to only\n",
"retrieve certain information. This generally involves two steps.\n",
"\n",
"**Step 1: Make sure the retriever you are using supports multiple users**\n",
"\n",
"At the moment, there is no unified flag or filter for this in LangChain. Rather,\n",
"each vectorstore and retriever may have their own, and may be called different\n",
"things (namespaces, multi-tenancy, etc). For vectorstores, this is generally\n",
"exposed as a keyword argument that is passed in during `similaritySearch`. By\n",
"reading the documentation or source code, figure out whether the retriever you\n",
"are using supports multiple users, and, if so, how to use it.\n",
"\n",
"**Step 2: Add that parameter as a configurable field for the chain**\n",
"\n",
"The LangChain `config` object is passed through to every Runnable. Here you can\n",
"add any fields you'd like to the `configurable` object. Later, inside the chain\n",
"we can extract these fields.\n",
"\n",
"**Step 3: Call the chain with that configurable field**\n",
"\n",
"Now, at runtime you can call this chain with configurable field.\n",
"\n",
"## Code Example\n",
"\n",
"Let's see a concrete example of what this looks like in code. We will use\n",
"Pinecone for this example."
]
},
{
"cell_type": "markdown",
"id": "c8ccbef7",
"metadata": {},
"source": [
"## Setup\n",
"\n",
"### Install dependencies\n",
"\n",
"```{=mdx}\n",
"import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n",
"import Npm2Yarn from \"@theme/Npm2Yarn\";\n",
"\n",
"<IntegrationInstallTooltip></IntegrationInstallTooltip>\n",
"\n",
"<Npm2Yarn>\n",
" @langchain/pinecone @langchain/openai @langchain/core @pinecone-database/pinecone\n",
"</Npm2Yarn>\n",
"```\n",
"\n",
"### Set environment variables\n",
"\n",
"We'll use OpenAI and Pinecone in this example:\n",
"\n",
"```env\n",
"OPENAI_API_KEY=your-api-key\n",
"\n",
"PINECONE_API_KEY=your-api-key\n",
"PINECONE_INDEX=your-index-name\n",
"\n",
"# Optional, use LangSmith for best-in-class observability\n",
"LANGSMITH_API_KEY=your-api-key\n",
"LANGCHAIN_TRACING_V2=true\n",
"\n",
"# Reduce tracing latency if you are not in a serverless environment\n",
"# LANGCHAIN_CALLBACKS_BACKGROUND=true\n",
"```"
]
},
{
"cell_type": "code",
"execution_count": 1,
"id": "7345de3c",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"[ \u001b[32m\"77b8f174-9d89-4c6c-b2ab-607fe3913b2d\"\u001b[39m ]"
]
},
"execution_count": 1,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"import { OpenAIEmbeddings } from \"@langchain/openai\";\n",
"import { PineconeStore } from \"@langchain/pinecone\";\n",
"import { Pinecone } from \"@pinecone-database/pinecone\";\n",
"import { Document } from \"@langchain/core/documents\";\n",
"\n",
"const embeddings = new OpenAIEmbeddings();\n",
"\n",
"const pinecone = new Pinecone();\n",
"\n",
"const pineconeIndex = pinecone.Index(process.env.PINECONE_INDEX);\n",
"\n",
"/**\n",
" * Pinecone allows you to partition the records in an index into namespaces. \n",
" * Queries and other operations are then limited to one namespace, \n",
" * so different requests can search different subsets of your index.\n",
" * Read more about namespaces here: https://docs.pinecone.io/guides/indexes/use-namespaces\n",
" * \n",
" * NOTE: If you have namespace enabled in your Pinecone index, you must provide the namespace when creating the PineconeStore.\n",
" */\n",
"const namespace = \"pinecone\";\n",
"\n",
"const vectorStore = await PineconeStore.fromExistingIndex(\n",
" new OpenAIEmbeddings(),\n",
" { pineconeIndex, namespace },\n",
");\n",
"\n",
"await vectorStore.addDocuments(\n",
" [new Document({ pageContent: \"i worked at kensho\" })],\n",
" { namespace: \"harrison\" },\n",
");\n",
"\n",
"await vectorStore.addDocuments(\n",
" [new Document({ pageContent: \"i worked at facebook\" })],\n",
" { namespace: \"ankush\" },\n",
");"
]
},
{
"cell_type": "markdown",
"id": "39c11920",
"metadata": {},
"source": [
"The pinecone kwarg for `namespace` can be used to separate documents"
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "3c2a39fa",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"[ Document { pageContent: \u001b[32m\"i worked at facebook\"\u001b[39m, metadata: {} } ]"
]
},
"execution_count": 2,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"// This will only get documents for Ankush\n",
"const ankushRetriever = vectorStore.asRetriever({\n",
" filter: {\n",
" namespace: \"ankush\",\n",
" },\n",
"});\n",
"\n",
"await ankushRetriever.invoke(\n",
" \"where did i work?\",\n",
");"
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "56393baa",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"[ Document { pageContent: \u001b[32m\"i worked at kensho\"\u001b[39m, metadata: {} } ]"
]
},
"execution_count": 3,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"// This will only get documents for Harrison\n",
"const harrisonRetriever = vectorStore.asRetriever({\n",
" filter: {\n",
" namespace: \"harrison\",\n",
" },\n",
"});\n",
"\n",
"await harrisonRetriever.invoke(\n",
" \"where did i work?\",\n",
");"
]
},
{
"cell_type": "markdown",
| |
145539
|
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# How to add chat history\n",
"\n",
"\n",
":::note\n",
"\n",
"This tutorial previously built a chatbot using [RunnableWithMessageHistory](https://api.js.langchain.com/classes/_langchain_core.runnables.RunnableWithMessageHistory.html). You can access this version of the tutorial in the [v0.2 docs](https://js.langchain.com/v0.2/docs/how_to/qa_chat_history_how_to/).\n",
"\n",
"The LangGraph implementation offers a number of advantages over `RunnableWithMessageHistory`, including the ability to persist arbitrary components of an application's state (instead of only messages).\n",
"\n",
":::\n",
"\n",
"In many Q&A applications we want to allow the user to have a back-and-forth conversation, meaning the application needs some sort of \"memory\" of past questions and answers, and some logic for incorporating those into its current thinking.\n",
"\n",
"In this guide we focus on **adding logic for incorporating historical messages.**\n",
"\n",
"This is largely a condensed version of the [Conversational RAG tutorial](/docs/tutorials/qa_chat_history).\n",
"\n",
"We will cover two approaches:\n",
"\n",
"1. [Chains](/docs/how_to/qa_chat_history_how_to#chains), in which we always execute a retrieval step;\n",
"2. [Agents](/docs/how_to/qa_chat_history_how_to#agents), in which we give an LLM discretion over whether and how to execute a retrieval step (or multiple steps).\n",
"\n",
"For the external knowledge source, we will use the same [LLM Powered Autonomous Agents](https://lilianweng.github.io/posts/2023-06-23-agent/) blog post by Lilian Weng from the [RAG tutorial](/docs/tutorials/rag)."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Setup\n",
"### Dependencies\n",
"\n",
"We’ll use an OpenAI chat model and embeddings and a Memory vector store in this walkthrough, but everything shown here works with any [ChatModel](/docs/concepts/#chat-models) or [LLM](/docs/concepts#llms), [Embeddings](/docs/concepts#embedding-models), and [VectorStore](/docs/concepts#vectorstores) or [Retriever](/docs/concepts#retrievers).\n",
"\n",
"We’ll use the following packages:\n",
"\n",
"```bash\n",
"npm install --save langchain @langchain/openai langchain cheerio uuid\n",
"```\n",
"\n",
"We need to set environment variable `OPENAI_API_KEY`:\n",
"\n",
"```bash\n",
"export OPENAI_API_KEY=YOUR_KEY\n",
"```"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### LangSmith\n",
"\n",
"Many of the applications you build with LangChain will contain multiple steps with multiple invocations of LLM calls. As these applications get more and more complex, it becomes crucial to be able to inspect what exactly is going on inside your chain or agent. The best way to do this is with [LangSmith](https://docs.smith.langchain.com).\n",
"\n",
"Note that LangSmith is not needed, but it is helpful. If you do want to use LangSmith, after you sign up at the link above, make sure to set your environment variables to start logging traces:\n",
"\n",
"\n",
"```bash\n",
"export LANGCHAIN_TRACING_V2=true\n",
"export LANGCHAIN_API_KEY=YOUR_KEY\n",
"\n",
"# Reduce tracing latency if you are not in a serverless environment\n",
"# export LANGCHAIN_CALLBACKS_BACKGROUND=true\n",
"```"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Chains {#chains}\n",
"\n",
"In a conversational RAG application, queries issued to the retriever should be informed by the context of the conversation. LangChain provides a [createHistoryAwareRetriever](https://api.js.langchain.com/functions/langchain.chains_history_aware_retriever.createHistoryAwareRetriever.html) constructor to simplify this. It constructs a chain that accepts keys `input` and `chat_history` as input, and has the same output schema as a retriever. `createHistoryAwareRetriever` requires as inputs: \n",
"\n",
"1. LLM;\n",
"2. Retriever;\n",
"3. Prompt.\n",
"\n",
"First we obtain these objects:\n",
"\n",
"### LLM\n",
"\n",
"We can use any supported chat model:\n",
"\n",
"```{=mdx}\n",
"import ChatModelTabs from \"@theme/ChatModelTabs\"\n",
"\n",
"<ChatModelTabs customVarName=\"llm\" />\n",
"```"
]
},
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [],
"source": [
"// @lc-docs-hide-cell\n",
"import { ChatOpenAI } from \"@langchain/openai\";\n",
"\n",
"const llm = new ChatOpenAI({ model: \"gpt-4o\" });"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Initial setup"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [],
"source": [
"import { CheerioWebBaseLoader } from \"@langchain/community/document_loaders/web/cheerio\";\n",
"import { RecursiveCharacterTextSplitter } from \"langchain/text_splitter\";\n",
"import { MemoryVectorStore } from \"langchain/vectorstores/memory\"\n",
"import { OpenAIEmbeddings } from \"@langchain/openai\";\n",
"\n",
"const loader = new CheerioWebBaseLoader(\n",
" \"https://lilianweng.github.io/posts/2023-06-23-agent/\"\n",
");\n",
"\n",
"const docs = await loader.load();\n",
"\n",
"const textSplitter = new RecursiveCharacterTextSplitter({ chunkSize: 1000, chunkOverlap: 200 });\n",
"const splits = await textSplitter.splitDocuments(docs);\n",
"const vectorStore = await MemoryVectorStore.fromDocuments(splits, new OpenAIEmbeddings());\n",
"\n",
"// Retrieve and generate using the relevant snippets of the blog.\n",
"const retriever = vectorStore.asRetriever();"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Prompt\n",
"\n",
"We'll use a prompt that includes a `MessagesPlaceholder` variable under the name \"chat_history\". This allows us to pass in a list of Messages to the prompt using the \"chat_history\" input key, and these messages will be inserted after the system message and before the human message containing the latest question."
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"outputs": [],
"source": [
"import { ChatPromptTemplate, MessagesPlaceholder } from \"@langchain/core/prompts\";\n",
"\n",
"const contextualizeQSystemPrompt = (\n",
" \"Given a chat history and the latest user question \" +\n",
" \"which might reference context in the chat history, \" +\n",
" \"formulate a standalone question which can be understood \" +\n",
" \"without the chat history. Do NOT answer the question, \" +\n",
" \"just reformulate it if needed and otherwise return it as is.\"\n",
")\n",
"\n",
"const contextualizeQPrompt = ChatPromptTemplate.fromMessages(\n",
" [\n",
" [\"system\", contextualizeQSystemPrompt],\n",
" new MessagesPlaceholder(\"chat_history\"),\n",
" [\"human\", \"{input}\"],\n",
" ]\n",
")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Assembling the chain\n",
"\n",
"We can then instantiate the history-aware retriever:"
]
},
| |
145573
|
# How to load PDF files
> [Portable Document Format (PDF)](https://en.wikipedia.org/wiki/PDF), standardized as ISO 32000, is a file format developed by Adobe in 1992 to present documents, including text formatting and images, in a manner independent of application software, hardware, and operating systems.
This covers how to load `PDF` documents into the Document format that we use downstream.
By default, one document will be created for each page in the PDF file. You can change this behavior by setting the `splitPages` option to `false`.
## Setup
```bash npm2yarn
npm install pdf-parse
```
## Usage, one document per page
```typescript
import { PDFLoader } from "@langchain/community/document_loaders/fs/pdf";
// Or, in web environments:
// import { WebPDFLoader } from "@langchain/community/document_loaders/web/pdf";
// const blob = new Blob(); // e.g. from a file input
// const loader = new WebPDFLoader(blob);
const loader = new PDFLoader("src/document_loaders/example_data/example.pdf");
const docs = await loader.load();
```
## Usage, one document per file
```typescript
import { PDFLoader } from "@langchain/community/document_loaders/fs/pdf";
const loader = new PDFLoader("src/document_loaders/example_data/example.pdf", {
splitPages: false,
});
const docs = await loader.load();
```
## Usage, custom `pdfjs` build
By default we use the `pdfjs` build bundled with `pdf-parse`, which is compatible with most environments, including Node.js and modern browsers. If you want to use a more recent version of `pdfjs-dist` or if you want to use a custom build of `pdfjs-dist`, you can do so by providing a custom `pdfjs` function that returns a promise that resolves to the `PDFJS` object.
In the following example we use the "legacy" (see [pdfjs docs](https://github.com/mozilla/pdf.js/wiki/Frequently-Asked-Questions#which-browsersenvironments-are-supported)) build of `pdfjs-dist`, which includes several polyfills not included in the default build.
```bash npm2yarn
npm install pdfjs-dist
```
```typescript
import { PDFLoader } from "@langchain/community/document_loaders/fs/pdf";
const loader = new PDFLoader("src/document_loaders/example_data/example.pdf", {
// you may need to add `.then(m => m.default)` to the end of the import
pdfjs: () => import("pdfjs-dist/legacy/build/pdf.js"),
});
```
## Eliminating extra spaces
PDFs come in many varieties, which makes reading them a challenge. The loader parses individual text elements and joins them together with a space by default, but
if you are seeing excessive spaces, this may not be the desired behavior. In that case, you can override the separator with an empty string like this:
```typescript
import { PDFLoader } from "@langchain/community/document_loaders/fs/pdf";
const loader = new PDFLoader("src/document_loaders/example_data/example.pdf", {
parsedItemSeparator: "",
});
const docs = await loader.load();
```
| |
145578
|
---
sidebar_position: 1
---
# How to stream responses from an LLM
All [`LLM`s](https://api.js.langchain.com/classes/langchain_core.language_models_llms.BaseLLM.html) implement the [Runnable interface](https://api.js.langchain.com/classes/langchain_core.runnables.Runnable.html), which comes with **default** implementations of standard runnable methods (i.e. `ainvoke`, `batch`, `abatch`, `stream`, `astream`, `astream_events`).
The **default** streaming implementations provide an `AsyncGenerator` that yields a single value: the final output from the underlying chat model provider.
The ability to stream the output token-by-token depends on whether the provider has implemented proper streaming support.
See which [integrations support token-by-token streaming here](/docs/integrations/llms/).
:::{.callout-note}
The **default** implementation does **not** provide support for token-by-token streaming, but it ensures that the model can be swapped in for any other model as it supports the same standard interface.
:::
## Using `.stream()`
import CodeBlock from "@theme/CodeBlock";
The easiest way to stream is to use the `.stream()` method. This returns an readable stream that you can also iterate over:
import StreamMethodExample from "@examples/models/llm/llm_streaming_stream_method.ts";
import IntegrationInstallTooltip from "@mdx_components/integration_install_tooltip.mdx";
<IntegrationInstallTooltip></IntegrationInstallTooltip>
```bash npm2yarn
npm install @langchain/openai @langchain/core
```
<CodeBlock language="typescript">{StreamMethodExample}</CodeBlock>
For models that do not support streaming, the entire response will be returned as a single chunk.
## Using a callback handler
You can also use a [`CallbackHandler`](https://api.js.langchain.com/classes/langchain_core.callbacks_base.BaseCallbackHandler.html) like so:
import StreamingExample from "@examples/models/llm/llm_streaming.ts";
<CodeBlock language="typescript">{StreamingExample}</CodeBlock>
We still have access to the end `LLMResult` if using `generate`. However, `tokenUsage` may not be currently supported for all model providers when streaming.
| |
145586
|
{
"cells": [
{
"cell_type": "raw",
"metadata": {
"vscode": {
"languageId": "raw"
}
},
"source": [
"---\n",
"keywords: [recursivecharactertextsplitter]\n",
"---"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# How to recursively split text by characters\n",
"\n",
":::info Prerequisites\n",
"\n",
"This guide assumes familiarity with the following concepts:\n",
"\n",
"- [Text splitters](/docs/concepts#text-splitters)\n",
"\n",
":::\n",
"\n",
"This text splitter is the recommended one for generic text. It is parameterized by a list of characters. It tries to split on them in order until the chunks are small enough. The default list is `[\"\\n\\n\", \"\\n\", \" \", \"\"]`. This has the effect of trying to keep all paragraphs (and then sentences, and then words) together as long as possible, as those would generically seem to be the strongest semantically related pieces of text.\n",
"\n",
"1. How the text is split: by list of characters.\n",
"2. How the chunk size is measured: by number of characters.\n",
"\n",
"Below we show example usage.\n",
"\n",
"To obtain the string content directly, use `.splitText`.\n",
"\n",
"To create LangChain [Document](https://api.js.langchain.com/classes/langchain_core.documents.Document.html) objects (e.g., for use in downstream tasks), use `.createDocuments`."
]
},
{
"cell_type": "code",
"execution_count": 5,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"[\n",
" Document {\n",
" pageContent: \"Hi.\",\n",
" metadata: { loc: { lines: { from: 1, to: 1 } } }\n",
" },\n",
" Document {\n",
" pageContent: \"I'm\",\n",
" metadata: { loc: { lines: { from: 3, to: 3 } } }\n",
" },\n",
" Document {\n",
" pageContent: \"Harrison.\",\n",
" metadata: { loc: { lines: { from: 3, to: 3 } } }\n",
" }\n",
"]\n"
]
}
],
"source": [
"import { RecursiveCharacterTextSplitter } from \"@langchain/textsplitters\";\n",
"\n",
"const text = `Hi.\\n\\nI'm Harrison.\\n\\nHow? Are? You?\\nOkay then f f f f.\n",
"This is a weird text to write, but gotta test the splittingggg some how.\\n\\n\n",
"Bye!\\n\\n-H.`;\n",
"const splitter = new RecursiveCharacterTextSplitter({\n",
" chunkSize: 10,\n",
" chunkOverlap: 1,\n",
"});\n",
"\n",
"const output = await splitter.createDocuments([text]);\n",
"\n",
"console.log(output.slice(0, 3));"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"You'll note that in the above example we are splitting a raw text string and getting back a list of documents. We can also split documents directly."
]
},
{
"cell_type": "code",
"execution_count": 6,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"[\n",
" Document {\n",
" pageContent: \"Hi.\",\n",
" metadata: { loc: { lines: { from: 1, to: 1 } } }\n",
" },\n",
" Document {\n",
" pageContent: \"I'm\",\n",
" metadata: { loc: { lines: { from: 3, to: 3 } } }\n",
" },\n",
" Document {\n",
" pageContent: \"Harrison.\",\n",
" metadata: { loc: { lines: { from: 3, to: 3 } } }\n",
" }\n",
"]\n"
]
}
],
"source": [
"import { Document } from \"@langchain/core/documents\";\n",
"import { RecursiveCharacterTextSplitter } from \"@langchain/textsplitters\";\n",
"\n",
"const text = `Hi.\\n\\nI'm Harrison.\\n\\nHow? Are? You?\\nOkay then f f f f.\n",
"This is a weird text to write, but gotta test the splittingggg some how.\\n\\n\n",
"Bye!\\n\\n-H.`;\n",
"const splitter = new RecursiveCharacterTextSplitter({\n",
" chunkSize: 10,\n",
" chunkOverlap: 1,\n",
"});\n",
"\n",
"const docOutput = await splitter.splitDocuments([\n",
" new Document({ pageContent: text }),\n",
"]);\n",
"\n",
"console.log(docOutput.slice(0, 3));"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"You can customize the `RecursiveCharacterTextSplitter` with arbitrary separators by passing a `separators` parameter like this:"
]
},
{
"cell_type": "code",
"execution_count": 7,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"[\n",
" Document {\n",
" pageContent: \"Some other considerations include:\",\n",
" metadata: { loc: { lines: { from: 1, to: 1 } } }\n",
" },\n",
" Document {\n",
" pageContent: \"- Do you deploy your backend and frontend together\",\n",
" metadata: { loc: { lines: { from: 3, to: 3 } } }\n",
" },\n",
" Document {\n",
" pageContent: \"r, or separately?\",\n",
" metadata: { loc: { lines: { from: 3, to: 3 } } }\n",
" }\n",
"]\n"
]
}
],
"source": [
"import { RecursiveCharacterTextSplitter } from \"langchain/text_splitter\";\n",
"import { Document } from \"@langchain/core/documents\";\n",
"\n",
"const text = `Some other considerations include:\n",
"\n",
"- Do you deploy your backend and frontend together, or separately?\n",
"- Do you deploy your backend co-located with your database, or separately?\n",
"\n",
"**Production Support:** As you move your LangChains into production, we'd love to offer more hands-on support.\n",
"Fill out [this form](https://airtable.com/appwQzlErAS2qiP0L/shrGtGaVBVAz7NcV2) to share more about what you're building, and our team will get in touch.\n",
"\n",
"## Deployment Options\n",
"\n",
"See below for a list of deployment options for your LangChain app. If you don't see your preferred option, please get in touch and we can add it to this list.`;\n",
"\n",
"const splitter = new RecursiveCharacterTextSplitter({\n",
" chunkSize: 50,\n",
" chunkOverlap: 1,\n",
" separators: [\"|\", \"##\", \">\", \"-\"],\n",
"});\n",
"\n",
"const docOutput = await splitter.splitDocuments([\n",
" new Document({ pageContent: text }),\n",
"]);\n",
"\n",
"console.log(docOutput.slice(0, 3));"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Next steps\n",
"\n",
"You've now learned a method for splitting text by character.\n",
"\n",
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.