id
stringlengths 6
6
| text
stringlengths 20
17.2k
| title
stringclasses 1
value |
|---|---|---|
154592
|
"""Agent toolkits contain integrations with various resources and services.
LangChain has a large ecosystem of integrations with various external resources
like local and remote file systems, APIs and databases.
These integrations allow developers to create versatile applications that combine the
power of LLMs with the ability to access, interact with and manipulate external
resources.
When developing an application, developers should inspect the capabilities and
permissions of the tools that underlie the given agent toolkit, and determine
whether permissions of the given toolkit are appropriate for the application.
See [Security](https://python.langchain.com/docs/security) for more information.
"""
from pathlib import Path
from typing import TYPE_CHECKING, Any
from langchain_core._api.path import as_import_path
from langchain_core.tools.retriever import create_retriever_tool
from langchain._api import create_importer
from langchain.agents.agent_toolkits.conversational_retrieval.openai_functions import (
create_conversational_retrieval_agent,
)
from langchain.agents.agent_toolkits.vectorstore.base import (
create_vectorstore_agent,
create_vectorstore_router_agent,
)
from langchain.agents.agent_toolkits.vectorstore.toolkit import (
VectorStoreInfo,
VectorStoreRouterToolkit,
VectorStoreToolkit,
)
if TYPE_CHECKING:
from langchain_community.agent_toolkits.ainetwork.toolkit import AINetworkToolkit
from langchain_community.agent_toolkits.amadeus.toolkit import AmadeusToolkit
from langchain_community.agent_toolkits.azure_cognitive_services import (
AzureCognitiveServicesToolkit,
)
from langchain_community.agent_toolkits.file_management.toolkit import (
FileManagementToolkit,
)
from langchain_community.agent_toolkits.gmail.toolkit import GmailToolkit
from langchain_community.agent_toolkits.jira.toolkit import JiraToolkit
from langchain_community.agent_toolkits.json.base import create_json_agent
from langchain_community.agent_toolkits.json.toolkit import JsonToolkit
from langchain_community.agent_toolkits.multion.toolkit import MultionToolkit
from langchain_community.agent_toolkits.nasa.toolkit import NasaToolkit
from langchain_community.agent_toolkits.nla.toolkit import NLAToolkit
from langchain_community.agent_toolkits.office365.toolkit import O365Toolkit
from langchain_community.agent_toolkits.openapi.base import create_openapi_agent
from langchain_community.agent_toolkits.openapi.toolkit import OpenAPIToolkit
from langchain_community.agent_toolkits.playwright.toolkit import (
PlayWrightBrowserToolkit,
)
from langchain_community.agent_toolkits.powerbi.base import create_pbi_agent
from langchain_community.agent_toolkits.powerbi.chat_base import (
create_pbi_chat_agent,
)
from langchain_community.agent_toolkits.powerbi.toolkit import PowerBIToolkit
from langchain_community.agent_toolkits.slack.toolkit import SlackToolkit
from langchain_community.agent_toolkits.spark_sql.base import create_spark_sql_agent
from langchain_community.agent_toolkits.spark_sql.toolkit import SparkSQLToolkit
from langchain_community.agent_toolkits.sql.base import create_sql_agent
from langchain_community.agent_toolkits.sql.toolkit import SQLDatabaseToolkit
from langchain_community.agent_toolkits.steam.toolkit import SteamToolkit
from langchain_community.agent_toolkits.zapier.toolkit import ZapierToolkit
DEPRECATED_AGENTS = [
"create_csv_agent",
"create_pandas_dataframe_agent",
"create_xorbits_agent",
"create_python_agent",
"create_spark_dataframe_agent",
]
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"AINetworkToolkit": "langchain_community.agent_toolkits.ainetwork.toolkit",
"AmadeusToolkit": "langchain_community.agent_toolkits.amadeus.toolkit",
"AzureCognitiveServicesToolkit": (
"langchain_community.agent_toolkits.azure_cognitive_services"
),
"FileManagementToolkit": (
"langchain_community.agent_toolkits.file_management.toolkit"
),
"GmailToolkit": "langchain_community.agent_toolkits.gmail.toolkit",
"JiraToolkit": "langchain_community.agent_toolkits.jira.toolkit",
"JsonToolkit": "langchain_community.agent_toolkits.json.toolkit",
"MultionToolkit": "langchain_community.agent_toolkits.multion.toolkit",
"NasaToolkit": "langchain_community.agent_toolkits.nasa.toolkit",
"NLAToolkit": "langchain_community.agent_toolkits.nla.toolkit",
"O365Toolkit": "langchain_community.agent_toolkits.office365.toolkit",
"OpenAPIToolkit": "langchain_community.agent_toolkits.openapi.toolkit",
"PlayWrightBrowserToolkit": "langchain_community.agent_toolkits.playwright.toolkit",
"PowerBIToolkit": "langchain_community.agent_toolkits.powerbi.toolkit",
"SlackToolkit": "langchain_community.agent_toolkits.slack.toolkit",
"SteamToolkit": "langchain_community.agent_toolkits.steam.toolkit",
"SQLDatabaseToolkit": "langchain_community.agent_toolkits.sql.toolkit",
"SparkSQLToolkit": "langchain_community.agent_toolkits.spark_sql.toolkit",
"ZapierToolkit": "langchain_community.agent_toolkits.zapier.toolkit",
"create_json_agent": "langchain_community.agent_toolkits.json.base",
"create_openapi_agent": "langchain_community.agent_toolkits.openapi.base",
"create_pbi_agent": "langchain_community.agent_toolkits.powerbi.base",
"create_pbi_chat_agent": "langchain_community.agent_toolkits.powerbi.chat_base",
"create_spark_sql_agent": "langchain_community.agent_toolkits.spark_sql.base",
"create_sql_agent": "langchain_community.agent_toolkits.sql.base",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Get attr name."""
if name in DEPRECATED_AGENTS:
relative_path = as_import_path(Path(__file__).parent, suffix=name)
old_path = "langchain." + relative_path
new_path = "langchain_experimental." + relative_path
raise ImportError(
f"{name} has been moved to langchain experimental. "
"See https://github.com/langchain-ai/langchain/discussions/11680"
"for more information.\n"
f"Please update your import statement from: `{old_path}` to `{new_path}`."
)
return _import_attribute(name)
__all__ = [
"AINetworkToolkit",
"AmadeusToolkit",
"AzureCognitiveServicesToolkit",
"FileManagementToolkit",
"GmailToolkit",
"JiraToolkit",
"JsonToolkit",
"MultionToolkit",
"NasaToolkit",
"NLAToolkit",
"O365Toolkit",
"OpenAPIToolkit",
"PlayWrightBrowserToolkit",
"PowerBIToolkit",
"SlackToolkit",
"SteamToolkit",
"SQLDatabaseToolkit",
"SparkSQLToolkit",
"VectorStoreInfo",
"VectorStoreRouterToolkit",
"VectorStoreToolkit",
"ZapierToolkit",
"create_json_agent",
"create_openapi_agent",
"create_pbi_agent",
"create_pbi_chat_agent",
"create_spark_sql_agent",
"create_sql_agent",
"create_vectorstore_agent",
"create_vectorstore_router_agent",
"create_conversational_retrieval_agent",
"create_retriever_tool",
]
| |
154609
|
from pathlib import Path
from typing import Any
from langchain_core._api.path import as_import_path
def __getattr__(name: str) -> Any:
"""Get attr name."""
if name == "create_python_agent":
# Get directory of langchain package
HERE = Path(__file__).parents[3]
here = as_import_path(Path(__file__).parent, relative_to=HERE)
old_path = "langchain." + here + "." + name
new_path = "langchain_experimental." + here + "." + name
raise ImportError(
"This agent has been moved to langchain experiment. "
"This agent relies on python REPL tool under the hood, so to use it "
"safely please sandbox the python REPL. "
"Read https://github.com/langchain-ai/langchain/blob/master/SECURITY.md "
"and https://github.com/langchain-ai/langchain/discussions/11680"
"To keep using this code as is, install langchain experimental and "
f"update your import statement from:\n `{old_path}` to `{new_path}`."
)
raise AttributeError(f"{name} does not exist")
| |
154643
|
from typing import Any, List, Optional
from langchain_core.language_models import BaseLanguageModel
from langchain_core.memory import BaseMemory
from langchain_core.messages import SystemMessage
from langchain_core.prompts.chat import MessagesPlaceholder
from langchain_core.tools import BaseTool
from langchain.agents.agent import AgentExecutor
from langchain.agents.openai_functions_agent.agent_token_buffer_memory import (
AgentTokenBufferMemory,
)
from langchain.agents.openai_functions_agent.base import OpenAIFunctionsAgent
from langchain.memory.token_buffer import ConversationTokenBufferMemory
def _get_default_system_message() -> SystemMessage:
return SystemMessage(
content=(
"Do your best to answer the questions. "
"Feel free to use any tools available to look up "
"relevant information, only if necessary"
)
)
def create_conversational_retrieval_agent(
llm: BaseLanguageModel,
tools: List[BaseTool],
remember_intermediate_steps: bool = True,
memory_key: str = "chat_history",
system_message: Optional[SystemMessage] = None,
verbose: bool = False,
max_token_limit: int = 2000,
**kwargs: Any,
) -> AgentExecutor:
"""A convenience method for creating a conversational retrieval agent.
Args:
llm: The language model to use, should be ChatOpenAI
tools: A list of tools the agent has access to
remember_intermediate_steps: Whether the agent should remember intermediate
steps or not. Intermediate steps refer to prior action/observation
pairs from previous questions. The benefit of remembering these is if
there is relevant information in there, the agent can use it to answer
follow up questions. The downside is it will take up more tokens.
memory_key: The name of the memory key in the prompt.
system_message: The system message to use. By default, a basic one will
be used.
verbose: Whether or not the final AgentExecutor should be verbose or not,
defaults to False.
max_token_limit: The max number of tokens to keep around in memory.
Defaults to 2000.
Returns:
An agent executor initialized appropriately
"""
if remember_intermediate_steps:
memory: BaseMemory = AgentTokenBufferMemory(
memory_key=memory_key, llm=llm, max_token_limit=max_token_limit
)
else:
memory = ConversationTokenBufferMemory(
memory_key=memory_key,
return_messages=True,
output_key="output",
llm=llm,
max_token_limit=max_token_limit,
)
_system_message = system_message or _get_default_system_message()
prompt = OpenAIFunctionsAgent.create_prompt(
system_message=_system_message,
extra_prompt_messages=[MessagesPlaceholder(variable_name=memory_key)],
)
agent = OpenAIFunctionsAgent(llm=llm, tools=tools, prompt=prompt)
return AgentExecutor(
agent=agent,
tools=tools,
memory=memory,
verbose=verbose,
return_intermediate_steps=remember_intermediate_steps,
**kwargs,
)
| |
154644
|
from langchain.tools.retriever import create_retriever_tool
__all__ = ["create_retriever_tool"]
| |
154674
|
from __future__ import annotations
from typing import List, Optional, Sequence, Union
from langchain_core.language_models import BaseLanguageModel
from langchain_core.prompts import BasePromptTemplate
from langchain_core.runnables import Runnable, RunnablePassthrough
from langchain_core.tools import BaseTool
from langchain_core.tools.render import ToolsRenderer, render_text_description
from langchain.agents import AgentOutputParser
from langchain.agents.format_scratchpad import format_log_to_str
from langchain.agents.output_parsers import ReActSingleInputOutputParser
def create_react_agent(
llm: BaseLanguageModel,
tools: Sequence[BaseTool],
prompt: BasePromptTemplate,
output_parser: Optional[AgentOutputParser] = None,
tools_renderer: ToolsRenderer = render_text_description,
*,
stop_sequence: Union[bool, List[str]] = True,
) -> Runnable:
"""Create an agent that uses ReAct prompting.
Based on paper "ReAct: Synergizing Reasoning and Acting in Language Models"
(https://arxiv.org/abs/2210.03629)
Args:
llm: LLM to use as the agent.
tools: Tools this agent has access to.
prompt: The prompt to use. See Prompt section below for more.
output_parser: AgentOutputParser for parse the LLM output.
tools_renderer: This controls how the tools are converted into a string and
then passed into the LLM. Default is `render_text_description`.
stop_sequence: bool or list of str.
If True, adds a stop token of "Observation:" to avoid hallucinates.
If False, does not add a stop token.
If a list of str, uses the provided list as the stop tokens.
Default is True. You may to set this to False if the LLM you are using
does not support stop sequences.
Returns:
A Runnable sequence representing an agent. It takes as input all the same input
variables as the prompt passed in does. It returns as output either an
AgentAction or AgentFinish.
Examples:
.. code-block:: python
from langchain import hub
from langchain_community.llms import OpenAI
from langchain.agents import AgentExecutor, create_react_agent
prompt = hub.pull("hwchase17/react")
model = OpenAI()
tools = ...
agent = create_react_agent(model, tools, prompt)
agent_executor = AgentExecutor(agent=agent, tools=tools)
agent_executor.invoke({"input": "hi"})
# Use with chat history
from langchain_core.messages import AIMessage, HumanMessage
agent_executor.invoke(
{
"input": "what's my name?",
# Notice that chat_history is a string
# since this prompt is aimed at LLMs, not chat models
"chat_history": "Human: My name is Bob\\nAI: Hello Bob!",
}
)
Prompt:
The prompt must have input keys:
* `tools`: contains descriptions and arguments for each tool.
* `tool_names`: contains all tool names.
* `agent_scratchpad`: contains previous agent actions and tool outputs as a string.
Here's an example:
.. code-block:: python
from langchain_core.prompts import PromptTemplate
template = '''Answer the following questions as best you can. You have access to the following tools:
{tools}
Use the following format:
Question: the input question you must answer
Thought: you should always think about what to do
Action: the action to take, should be one of [{tool_names}]
Action Input: the input to the action
Observation: the result of the action
... (this Thought/Action/Action Input/Observation can repeat N times)
Thought: I now know the final answer
Final Answer: the final answer to the original input question
Begin!
Question: {input}
Thought:{agent_scratchpad}'''
prompt = PromptTemplate.from_template(template)
""" # noqa: E501
missing_vars = {"tools", "tool_names", "agent_scratchpad"}.difference(
prompt.input_variables + list(prompt.partial_variables)
)
if missing_vars:
raise ValueError(f"Prompt missing required variables: {missing_vars}")
prompt = prompt.partial(
tools=tools_renderer(list(tools)),
tool_names=", ".join([t.name for t in tools]),
)
if stop_sequence:
stop = ["\nObservation"] if stop_sequence is True else stop_sequence
llm_with_stop = llm.bind(stop=stop)
else:
llm_with_stop = llm
output_parser = output_parser or ReActSingleInputOutputParser()
agent = (
RunnablePassthrough.assign(
agent_scratchpad=lambda x: format_log_to_str(x["intermediate_steps"]),
)
| prompt
| llm_with_stop
| output_parser
)
return agent
| |
154702
|
"""LangChain **Runnable** and the **LangChain Expression Language (LCEL)**.
The LangChain Expression Language (LCEL) offers a declarative method to build
production-grade programs that harness the power of LLMs.
Programs created using LCEL and LangChain Runnables inherently support
synchronous, asynchronous, batch, and streaming operations.
Support for **async** allows servers hosting the LCEL based programs
to scale better for higher concurrent loads.
**Batch** operations allow for processing multiple inputs in parallel.
**Streaming** of intermediate outputs, as they're being generated, allows for
creating more responsive UX.
This module contains non-core Runnable classes.
"""
| |
154788
|
"""**Vector store** stores embedded data and performs vector search.
One of the most common ways to store and search over unstructured data is to
embed it and store the resulting embedding vectors, and then query the store
and retrieve the data that are 'most similar' to the embedded query.
**Class hierarchy:**
.. code-block::
VectorStore --> <name> # Examples: Annoy, FAISS, Milvus
BaseRetriever --> VectorStoreRetriever --> <name>Retriever # Example: VespaRetriever
**Main helpers:**
.. code-block::
Embeddings, Document
""" # noqa: E501
from typing import TYPE_CHECKING, Any
from langchain_core.vectorstores import VectorStore
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.vectorstores import (
FAISS,
AlibabaCloudOpenSearch,
AlibabaCloudOpenSearchSettings,
AnalyticDB,
Annoy,
AstraDB,
AtlasDB,
AwaDB,
AzureCosmosDBVectorSearch,
AzureSearch,
Bagel,
Cassandra,
Chroma,
Clarifai,
Clickhouse,
ClickhouseSettings,
DashVector,
DatabricksVectorSearch,
DeepLake,
Dingo,
DocArrayHnswSearch,
DocArrayInMemorySearch,
DuckDB,
EcloudESVectorStore,
ElasticKnnSearch,
ElasticsearchStore,
ElasticVectorSearch,
Epsilla,
Hologres,
LanceDB,
LLMRails,
Marqo,
MatchingEngine,
Meilisearch,
Milvus,
MomentoVectorIndex,
MongoDBAtlasVectorSearch,
MyScale,
MyScaleSettings,
Neo4jVector,
NeuralDBClientVectorStore,
NeuralDBVectorStore,
OpenSearchVectorSearch,
PGEmbedding,
PGVector,
Pinecone,
Qdrant,
Redis,
Rockset,
ScaNN,
SemaDB,
SingleStoreDB,
SKLearnVectorStore,
SQLiteVSS,
StarRocks,
SupabaseVectorStore,
Tair,
TencentVectorDB,
Tigris,
TileDB,
TimescaleVector,
Typesense,
USearch,
Vald,
Vearch,
Vectara,
VespaStore,
Weaviate,
Yellowbrick,
ZepVectorStore,
Zilliz,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"AlibabaCloudOpenSearch": "langchain_community.vectorstores",
"AlibabaCloudOpenSearchSettings": "langchain_community.vectorstores",
"AnalyticDB": "langchain_community.vectorstores",
"Annoy": "langchain_community.vectorstores",
"AstraDB": "langchain_community.vectorstores",
"AtlasDB": "langchain_community.vectorstores",
"AwaDB": "langchain_community.vectorstores",
"AzureCosmosDBVectorSearch": "langchain_community.vectorstores",
"AzureSearch": "langchain_community.vectorstores",
"Bagel": "langchain_community.vectorstores",
"Cassandra": "langchain_community.vectorstores",
"Chroma": "langchain_community.vectorstores",
"Clarifai": "langchain_community.vectorstores",
"Clickhouse": "langchain_community.vectorstores",
"ClickhouseSettings": "langchain_community.vectorstores",
"DashVector": "langchain_community.vectorstores",
"DatabricksVectorSearch": "langchain_community.vectorstores",
"DeepLake": "langchain_community.vectorstores",
"Dingo": "langchain_community.vectorstores",
"DocArrayHnswSearch": "langchain_community.vectorstores",
"DocArrayInMemorySearch": "langchain_community.vectorstores",
"DuckDB": "langchain_community.vectorstores",
"EcloudESVectorStore": "langchain_community.vectorstores",
"ElasticKnnSearch": "langchain_community.vectorstores",
"ElasticsearchStore": "langchain_community.vectorstores",
"ElasticVectorSearch": "langchain_community.vectorstores",
"Epsilla": "langchain_community.vectorstores",
"FAISS": "langchain_community.vectorstores",
"Hologres": "langchain_community.vectorstores",
"LanceDB": "langchain_community.vectorstores",
"LLMRails": "langchain_community.vectorstores",
"Marqo": "langchain_community.vectorstores",
"MatchingEngine": "langchain_community.vectorstores",
"Meilisearch": "langchain_community.vectorstores",
"Milvus": "langchain_community.vectorstores",
"MomentoVectorIndex": "langchain_community.vectorstores",
"MongoDBAtlasVectorSearch": "langchain_community.vectorstores",
"MyScale": "langchain_community.vectorstores",
"MyScaleSettings": "langchain_community.vectorstores",
"Neo4jVector": "langchain_community.vectorstores",
"NeuralDBClientVectorStore": "langchain_community.vectorstores",
"NeuralDBVectorStore": "langchain_community.vectorstores",
"NEuralDBVectorStore": "langchain_community.vectorstores",
"OpenSearchVectorSearch": "langchain_community.vectorstores",
"PGEmbedding": "langchain_community.vectorstores",
"PGVector": "langchain_community.vectorstores",
"Pinecone": "langchain_community.vectorstores",
"Qdrant": "langchain_community.vectorstores",
"Redis": "langchain_community.vectorstores",
"Rockset": "langchain_community.vectorstores",
"ScaNN": "langchain_community.vectorstores",
"SemaDB": "langchain_community.vectorstores",
"SingleStoreDB": "langchain_community.vectorstores",
"SKLearnVectorStore": "langchain_community.vectorstores",
"SQLiteVSS": "langchain_community.vectorstores",
"StarRocks": "langchain_community.vectorstores",
"SupabaseVectorStore": "langchain_community.vectorstores",
"Tair": "langchain_community.vectorstores",
"TencentVectorDB": "langchain_community.vectorstores",
"Tigris": "langchain_community.vectorstores",
"TileDB": "langchain_community.vectorstores",
"TimescaleVector": "langchain_community.vectorstores",
"Typesense": "langchain_community.vectorstores",
"USearch": "langchain_community.vectorstores",
"Vald": "langchain_community.vectorstores",
"Vearch": "langchain_community.vectorstores",
"Vectara": "langchain_community.vectorstores",
"VespaStore": "langchain_community.vectorstores",
"Weaviate": "langchain_community.vectorstores",
"Yellowbrick": "langchain_community.vectorstores",
"ZepVectorStore": "langchain_community.vectorstores",
"Zilliz": "langchain_community.vectorstores",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"AlibabaCloudOpenSearch",
"AlibabaCloudOpenSearchSettings",
"AnalyticDB",
"Annoy",
"AstraDB",
"AtlasDB",
"AwaDB",
"AzureCosmosDBVectorSearch",
"AzureSearch",
"Bagel",
"Cassandra",
"Chroma",
"Clarifai",
"Clickhouse",
"ClickhouseSettings",
"DashVector",
"DatabricksVectorSearch",
"DeepLake",
"Dingo",
"DocArrayHnswSearch",
"DocArrayInMemorySearch",
"DuckDB",
"EcloudESVectorStore",
"ElasticKnnSearch",
"ElasticsearchStore",
"ElasticVectorSearch",
"Epsilla",
"FAISS",
"Hologres",
"LanceDB",
"LLMRails",
"Marqo",
"MatchingEngine",
"Meilisearch",
"Milvus",
"MomentoVectorIndex",
"MongoDBAtlasVectorSearch",
"MyScale",
"MyScaleSettings",
"Neo4jVector",
"NeuralDBClientVectorStore",
"NeuralDBVectorStore",
"OpenSearchVectorSearch",
"PGEmbedding",
"PGVector",
"Pinecone",
"Qdrant",
"Redis",
"Rockset",
"ScaNN",
"SemaDB",
"SingleStoreDB",
"SKLearnVectorStore",
"SQLiteVSS",
"StarRocks",
"SupabaseVectorStore",
"Tair",
"TencentVectorDB",
"Tigris",
"TileDB",
"TimescaleVector",
"Typesense",
"USearch",
"Vald",
"Vearch",
"Vectara",
"VectorStore",
"VespaStore",
"Weaviate",
"Yellowbrick",
"ZepVectorStore",
"Zilliz",
]
| |
154795
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.vectorstores import FAISS
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {"FAISS": "langchain_community.vectorstores"}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"FAISS",
]
| |
154897
|
from langchain_core.output_parsers import PydanticOutputParser
__all__ = ["PydanticOutputParser"]
| |
154904
|
from __future__ import annotations
from typing import Any, Dict, List
from langchain_core.output_parsers import BaseOutputParser
from langchain_core.output_parsers.json import parse_and_check_json_markdown
from pydantic import BaseModel
from langchain.output_parsers.format_instructions import (
STRUCTURED_FORMAT_INSTRUCTIONS,
STRUCTURED_FORMAT_SIMPLE_INSTRUCTIONS,
)
line_template = '\t"{name}": {type} // {description}'
class ResponseSchema(BaseModel):
"""Schema for a response from a structured output parser."""
name: str
"""The name of the schema."""
description: str
"""The description of the schema."""
type: str = "string"
"""The type of the response."""
def _get_sub_string(schema: ResponseSchema) -> str:
return line_template.format(
name=schema.name, description=schema.description, type=schema.type
)
class StructuredOutputParser(BaseOutputParser[Dict[str, Any]]):
"""Parse the output of an LLM call to a structured output."""
response_schemas: List[ResponseSchema]
"""The schemas for the response."""
@classmethod
def from_response_schemas(
cls, response_schemas: List[ResponseSchema]
) -> StructuredOutputParser:
return cls(response_schemas=response_schemas)
def get_format_instructions(self, only_json: bool = False) -> str:
"""Get format instructions for the output parser.
example:
```python
from langchain.output_parsers.structured import (
StructuredOutputParser, ResponseSchema
)
response_schemas = [
ResponseSchema(
name="foo",
description="a list of strings",
type="List[string]"
),
ResponseSchema(
name="bar",
description="a string",
type="string"
),
]
parser = StructuredOutputParser.from_response_schemas(response_schemas)
print(parser.get_format_instructions()) # noqa: T201
output:
# The output should be a Markdown code snippet formatted in the following
# schema, including the leading and trailing "```json" and "```":
#
# ```json
# {
# "foo": List[string] // a list of strings
# "bar": string // a string
# }
# ```
Args:
only_json (bool): If True, only the json in the Markdown code snippet
will be returned, without the introducing text. Defaults to False.
"""
schema_str = "\n".join(
[_get_sub_string(schema) for schema in self.response_schemas]
)
if only_json:
return STRUCTURED_FORMAT_SIMPLE_INSTRUCTIONS.format(format=schema_str)
else:
return STRUCTURED_FORMAT_INSTRUCTIONS.format(format=schema_str)
def parse(self, text: str) -> Dict[str, Any]:
expected_keys = [rs.name for rs in self.response_schemas]
return parse_and_check_json_markdown(text, expected_keys)
@property
def _type(self) -> str:
return "structured"
| |
155037
|
"TencentCOSFileLoader": "langchain_community.document_loaders",
"TextLoader": "langchain_community.document_loaders",
"ToMarkdownLoader": "langchain_community.document_loaders",
"TomlLoader": "langchain_community.document_loaders",
"TrelloLoader": "langchain_community.document_loaders",
"TwitterTweetLoader": "langchain_community.document_loaders",
"UnstructuredAPIFileIOLoader": "langchain_community.document_loaders",
"UnstructuredAPIFileLoader": "langchain_community.document_loaders",
"UnstructuredCSVLoader": "langchain_community.document_loaders",
"UnstructuredEPubLoader": "langchain_community.document_loaders",
"UnstructuredEmailLoader": "langchain_community.document_loaders",
"UnstructuredExcelLoader": "langchain_community.document_loaders",
"UnstructuredFileIOLoader": "langchain_community.document_loaders",
"UnstructuredFileLoader": "langchain_community.document_loaders",
"UnstructuredHTMLLoader": "langchain_community.document_loaders",
"UnstructuredImageLoader": "langchain_community.document_loaders",
"UnstructuredMarkdownLoader": "langchain_community.document_loaders",
"UnstructuredODTLoader": "langchain_community.document_loaders",
"UnstructuredOrgModeLoader": "langchain_community.document_loaders",
"UnstructuredPDFLoader": "langchain_community.document_loaders",
"UnstructuredPowerPointLoader": "langchain_community.document_loaders",
"UnstructuredRSTLoader": "langchain_community.document_loaders",
"UnstructuredRTFLoader": "langchain_community.document_loaders",
"UnstructuredTSVLoader": "langchain_community.document_loaders",
"UnstructuredURLLoader": "langchain_community.document_loaders",
"UnstructuredWordDocumentLoader": "langchain_community.document_loaders",
"UnstructuredXMLLoader": "langchain_community.document_loaders",
"WeatherDataLoader": "langchain_community.document_loaders",
"WebBaseLoader": "langchain_community.document_loaders",
"WhatsAppChatLoader": "langchain_community.document_loaders",
"WikipediaLoader": "langchain_community.document_loaders",
"XorbitsLoader": "langchain_community.document_loaders",
"YoutubeAudioLoader": "langchain_community.document_loaders",
"YoutubeLoader": "langchain_community.document_loaders",
"YuqueLoader": "langchain_community.document_loaders",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"AcreomLoader",
"AsyncHtmlLoader",
"AsyncChromiumLoader",
"AZLyricsLoader",
"AcreomLoader",
"AirbyteCDKLoader",
"AirbyteGongLoader",
"AirbyteJSONLoader",
"AirbyteHubspotLoader",
"AirbyteSalesforceLoader",
"AirbyteShopifyLoader",
"AirbyteStripeLoader",
"AirbyteTypeformLoader",
"AirbyteZendeskSupportLoader",
"AirtableLoader",
"AmazonTextractPDFLoader",
"ApifyDatasetLoader",
"ArcGISLoader",
"ArxivLoader",
"AssemblyAIAudioTranscriptLoader",
"AsyncHtmlLoader",
"AzureAIDataLoader",
"AzureBlobStorageContainerLoader",
"AzureBlobStorageFileLoader",
"BSHTMLLoader",
"BibtexLoader",
"BigQueryLoader",
"BiliBiliLoader",
"BlackboardLoader",
"Blob",
"BlobLoader",
"BlockchainDocumentLoader",
"BraveSearchLoader",
"BrowserlessLoader",
"CSVLoader",
"ChatGPTLoader",
"CoNLLULoader",
"CollegeConfidentialLoader",
"ConcurrentLoader",
"ConfluenceLoader",
"CouchbaseLoader",
"CubeSemanticLoader",
"DataFrameLoader",
"DatadogLogsLoader",
"DiffbotLoader",
"DirectoryLoader",
"DiscordChatLoader",
"DocugamiLoader",
"DocusaurusLoader",
"Docx2txtLoader",
"DropboxLoader",
"DuckDBLoader",
"EtherscanLoader",
"EverNoteLoader",
"FacebookChatLoader",
"FaunaLoader",
"FigmaFileLoader",
"FileSystemBlobLoader",
"GCSDirectoryLoader",
"GCSFileLoader",
"GeoDataFrameLoader",
"GithubFileLoader",
"GitHubIssuesLoader",
"GitLoader",
"GitbookLoader",
"GoogleApiClient",
"GoogleApiYoutubeLoader",
"GoogleSpeechToTextLoader",
"GoogleDriveLoader",
"GutenbergLoader",
"HNLoader",
"HuggingFaceDatasetLoader",
"IFixitLoader",
"IMSDbLoader",
"ImageCaptionLoader",
"IuguLoader",
"JSONLoader",
"JoplinLoader",
"LarkSuiteDocLoader",
"LakeFSLoader",
"MHTMLLoader",
"MWDumpLoader",
"MastodonTootsLoader",
"MathpixPDFLoader",
"MaxComputeLoader",
"MergedDataLoader",
"ModernTreasuryLoader",
"MongodbLoader",
"NewsURLLoader",
"NotebookLoader",
"NotionDBLoader",
"NotionDirectoryLoader",
"OBSDirectoryLoader",
"OBSFileLoader",
"ObsidianLoader",
"OneDriveFileLoader",
"OneDriveLoader",
"OnlinePDFLoader",
"OpenCityDataLoader",
"OutlookMessageLoader",
"PDFMinerLoader",
"PDFMinerPDFasHTMLLoader",
"PDFPlumberLoader",
"PlaywrightURLLoader",
"PolarsDataFrameLoader",
"PsychicLoader",
"PubMedLoader",
"PyMuPDFLoader",
"PyPDFDirectoryLoader",
"PagedPDFSplitter",
"PyPDFLoader",
"PyPDFium2Loader",
"PySparkDataFrameLoader",
"PythonLoader",
"RSSFeedLoader",
"ReadTheDocsLoader",
"RecursiveUrlLoader",
"RedditPostsLoader",
"RoamLoader",
"RocksetLoader",
"S3DirectoryLoader",
"S3FileLoader",
"SRTLoader",
"SeleniumURLLoader",
"SharePointLoader",
"SitemapLoader",
"SlackDirectoryLoader",
"SnowflakeLoader",
"SpreedlyLoader",
"StripeLoader",
"TelegramChatApiLoader",
"TelegramChatFileLoader",
"TelegramChatLoader",
"TensorflowDatasetLoader",
"TencentCOSDirectoryLoader",
"TencentCOSFileLoader",
"TextLoader",
"ToMarkdownLoader",
"TomlLoader",
"TrelloLoader",
"TwitterTweetLoader",
"UnstructuredAPIFileIOLoader",
"UnstructuredAPIFileLoader",
"UnstructuredCSVLoader",
"UnstructuredEPubLoader",
"UnstructuredEmailLoader",
"UnstructuredExcelLoader",
"UnstructuredFileIOLoader",
"UnstructuredFileLoader",
"UnstructuredHTMLLoader",
"UnstructuredImageLoader",
"UnstructuredMarkdownLoader",
"UnstructuredODTLoader",
"UnstructuredOrgModeLoader",
"UnstructuredPDFLoader",
"UnstructuredPowerPointLoader",
"UnstructuredRSTLoader",
"UnstructuredRTFLoader",
"UnstructuredTSVLoader",
"UnstructuredURLLoader",
"UnstructuredWordDocumentLoader",
"UnstructuredXMLLoader",
"WeatherDataLoader",
"WebBaseLoader",
"WhatsAppChatLoader",
"WikipediaLoader",
"XorbitsLoader",
"YoutubeAudioLoader",
"YoutubeLoader",
"YuqueLoader",
]
| |
155071
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.document_loaders import TextLoader
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {"TextLoader": "langchain_community.document_loaders"}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"TextLoader",
]
| |
155171
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.llms import GooglePalm
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {"GooglePalm": "langchain_community.llms"}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"GooglePalm",
]
| |
155255
|
from typing import List
from langchain_core.language_models import BaseLanguageModel
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts.few_shot import FewShotPromptTemplate
from langchain_core.prompts.prompt import PromptTemplate
TEST_GEN_TEMPLATE_SUFFIX = "Add another example."
def generate_example(
examples: List[dict], llm: BaseLanguageModel, prompt_template: PromptTemplate
) -> str:
"""Return another example given a list of examples for a prompt."""
prompt = FewShotPromptTemplate(
examples=examples,
suffix=TEST_GEN_TEMPLATE_SUFFIX,
input_variables=[],
example_prompt=prompt_template,
)
chain = prompt | llm | StrOutputParser()
return chain.invoke({})
| |
155257
|
"""**Chains** are easily reusable components linked together.
Chains encode a sequence of calls to components like models, document retrievers,
other Chains, etc., and provide a simple interface to this sequence.
The Chain interface makes it easy to create apps that are:
- **Stateful:** add Memory to any Chain to give it state,
- **Observable:** pass Callbacks to a Chain to execute additional functionality,
like logging, outside the main sequence of component calls,
- **Composable:** combine Chains with other components, including other Chains.
**Class hierarchy:**
.. code-block::
Chain --> <name>Chain # Examples: LLMChain, MapReduceChain, RouterChain
"""
from typing import Any
from langchain._api import create_importer
_module_lookup = {
"APIChain": "langchain.chains.api.base",
"OpenAPIEndpointChain": "langchain_community.chains.openapi.chain",
"AnalyzeDocumentChain": "langchain.chains.combine_documents.base",
"MapReduceDocumentsChain": "langchain.chains.combine_documents.map_reduce",
"MapRerankDocumentsChain": "langchain.chains.combine_documents.map_rerank",
"ReduceDocumentsChain": "langchain.chains.combine_documents.reduce",
"RefineDocumentsChain": "langchain.chains.combine_documents.refine",
"StuffDocumentsChain": "langchain.chains.combine_documents.stuff",
"ConstitutionalChain": "langchain.chains.constitutional_ai.base",
"ConversationChain": "langchain.chains.conversation.base",
"ChatVectorDBChain": "langchain.chains.conversational_retrieval.base",
"ConversationalRetrievalChain": "langchain.chains.conversational_retrieval.base",
"generate_example": "langchain.chains.example_generator",
"FlareChain": "langchain.chains.flare.base",
"ArangoGraphQAChain": "langchain_community.chains.graph_qa.arangodb",
"GraphQAChain": "langchain_community.chains.graph_qa.base",
"GraphCypherQAChain": "langchain_community.chains.graph_qa.cypher",
"FalkorDBQAChain": "langchain_community.chains.graph_qa.falkordb",
"HugeGraphQAChain": "langchain_community.chains.graph_qa.hugegraph",
"KuzuQAChain": "langchain_community.chains.graph_qa.kuzu",
"NebulaGraphQAChain": "langchain_community.chains.graph_qa.nebulagraph",
"NeptuneOpenCypherQAChain": "langchain_community.chains.graph_qa.neptune_cypher",
"NeptuneSparqlQAChain": "langchain_community.chains.graph_qa.neptune_sparql",
"OntotextGraphDBQAChain": "langchain_community.chains.graph_qa.ontotext_graphdb",
"GraphSparqlQAChain": "langchain_community.chains.graph_qa.sparql",
"create_history_aware_retriever": "langchain.chains.history_aware_retriever",
"HypotheticalDocumentEmbedder": "langchain.chains.hyde.base",
"LLMChain": "langchain.chains.llm",
"LLMCheckerChain": "langchain.chains.llm_checker.base",
"LLMMathChain": "langchain.chains.llm_math.base",
"LLMRequestsChain": "langchain_community.chains.llm_requests",
"LLMSummarizationCheckerChain": "langchain.chains.llm_summarization_checker.base",
"load_chain": "langchain.chains.loading",
"MapReduceChain": "langchain.chains.mapreduce",
"OpenAIModerationChain": "langchain.chains.moderation",
"NatBotChain": "langchain.chains.natbot.base",
"create_citation_fuzzy_match_chain": "langchain.chains.openai_functions",
"create_citation_fuzzy_match_runnable": "langchain.chains.openai_functions",
"create_extraction_chain": "langchain.chains.openai_functions",
"create_extraction_chain_pydantic": "langchain.chains.openai_functions",
"create_qa_with_sources_chain": "langchain.chains.openai_functions",
"create_qa_with_structure_chain": "langchain.chains.openai_functions",
"create_tagging_chain": "langchain.chains.openai_functions",
"create_tagging_chain_pydantic": "langchain.chains.openai_functions",
"QAGenerationChain": "langchain.chains.qa_generation.base",
"QAWithSourcesChain": "langchain.chains.qa_with_sources.base",
"RetrievalQAWithSourcesChain": "langchain.chains.qa_with_sources.retrieval",
"VectorDBQAWithSourcesChain": "langchain.chains.qa_with_sources.vector_db",
"create_retrieval_chain": "langchain.chains.retrieval",
"RetrievalQA": "langchain.chains.retrieval_qa.base",
"VectorDBQA": "langchain.chains.retrieval_qa.base",
"LLMRouterChain": "langchain.chains.router",
"MultiPromptChain": "langchain.chains.router",
"MultiRetrievalQAChain": "langchain.chains.router",
"MultiRouteChain": "langchain.chains.router",
"RouterChain": "langchain.chains.router",
"SequentialChain": "langchain.chains.sequential",
"SimpleSequentialChain": "langchain.chains.sequential",
"create_sql_query_chain": "langchain.chains.sql_database.query",
"create_structured_output_runnable": "langchain.chains.structured_output",
"load_summarize_chain": "langchain.chains.summarize",
"TransformChain": "langchain.chains.transform",
}
importer = create_importer(__package__, module_lookup=_module_lookup)
def __getattr__(name: str) -> Any:
return importer(name)
__all__ = list(_module_lookup.keys())
| |
155259
|
class LLMChain(Chain):
"""Chain to run queries against LLMs.
This class is deprecated. See below for an example implementation using
LangChain runnables:
.. code-block:: python
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import PromptTemplate
from langchain_openai import OpenAI
prompt_template = "Tell me a {adjective} joke"
prompt = PromptTemplate(
input_variables=["adjective"], template=prompt_template
)
llm = OpenAI()
chain = prompt | llm | StrOutputParser()
chain.invoke("your adjective here")
Example:
.. code-block:: python
from langchain.chains import LLMChain
from langchain_community.llms import OpenAI
from langchain_core.prompts import PromptTemplate
prompt_template = "Tell me a {adjective} joke"
prompt = PromptTemplate(
input_variables=["adjective"], template=prompt_template
)
llm = LLMChain(llm=OpenAI(), prompt=prompt)
"""
@classmethod
def is_lc_serializable(self) -> bool:
return True
prompt: BasePromptTemplate
"""Prompt object to use."""
llm: Union[
Runnable[LanguageModelInput, str], Runnable[LanguageModelInput, BaseMessage]
]
"""Language model to call."""
output_key: str = "text" #: :meta private:
output_parser: BaseLLMOutputParser = Field(default_factory=StrOutputParser)
"""Output parser to use.
Defaults to one that takes the most likely string but does not change it
otherwise."""
return_final_only: bool = True
"""Whether to return only the final parsed result. Defaults to True.
If false, will return a bunch of extra information about the generation."""
llm_kwargs: dict = Field(default_factory=dict)
model_config = ConfigDict(
arbitrary_types_allowed=True,
extra="forbid",
)
@property
def input_keys(self) -> List[str]:
"""Will be whatever keys the prompt expects.
:meta private:
"""
return self.prompt.input_variables
@property
def output_keys(self) -> List[str]:
"""Will always return text key.
:meta private:
"""
if self.return_final_only:
return [self.output_key]
else:
return [self.output_key, "full_generation"]
def _call(
self,
inputs: Dict[str, Any],
run_manager: Optional[CallbackManagerForChainRun] = None,
) -> Dict[str, str]:
response = self.generate([inputs], run_manager=run_manager)
return self.create_outputs(response)[0]
def generate(
self,
input_list: List[Dict[str, Any]],
run_manager: Optional[CallbackManagerForChainRun] = None,
) -> LLMResult:
"""Generate LLM result from inputs."""
prompts, stop = self.prep_prompts(input_list, run_manager=run_manager)
callbacks = run_manager.get_child() if run_manager else None
if isinstance(self.llm, BaseLanguageModel):
return self.llm.generate_prompt(
prompts,
stop,
callbacks=callbacks,
**self.llm_kwargs,
)
else:
results = self.llm.bind(stop=stop, **self.llm_kwargs).batch(
cast(List, prompts), {"callbacks": callbacks}
)
generations: List[List[Generation]] = []
for res in results:
if isinstance(res, BaseMessage):
generations.append([ChatGeneration(message=res)])
else:
generations.append([Generation(text=res)])
return LLMResult(generations=generations)
async def agenerate(
self,
input_list: List[Dict[str, Any]],
run_manager: Optional[AsyncCallbackManagerForChainRun] = None,
) -> LLMResult:
"""Generate LLM result from inputs."""
prompts, stop = await self.aprep_prompts(input_list, run_manager=run_manager)
callbacks = run_manager.get_child() if run_manager else None
if isinstance(self.llm, BaseLanguageModel):
return await self.llm.agenerate_prompt(
prompts,
stop,
callbacks=callbacks,
**self.llm_kwargs,
)
else:
results = await self.llm.bind(stop=stop, **self.llm_kwargs).abatch(
cast(List, prompts), {"callbacks": callbacks}
)
generations: List[List[Generation]] = []
for res in results:
if isinstance(res, BaseMessage):
generations.append([ChatGeneration(message=res)])
else:
generations.append([Generation(text=res)])
return LLMResult(generations=generations)
def prep_prompts(
self,
input_list: List[Dict[str, Any]],
run_manager: Optional[CallbackManagerForChainRun] = None,
) -> Tuple[List[PromptValue], Optional[List[str]]]:
"""Prepare prompts from inputs."""
stop = None
if len(input_list) == 0:
return [], stop
if "stop" in input_list[0]:
stop = input_list[0]["stop"]
prompts = []
for inputs in input_list:
selected_inputs = {k: inputs[k] for k in self.prompt.input_variables}
prompt = self.prompt.format_prompt(**selected_inputs)
_colored_text = get_colored_text(prompt.to_string(), "green")
_text = "Prompt after formatting:\n" + _colored_text
if run_manager:
run_manager.on_text(_text, end="\n", verbose=self.verbose)
if "stop" in inputs and inputs["stop"] != stop:
raise ValueError(
"If `stop` is present in any inputs, should be present in all."
)
prompts.append(prompt)
return prompts, stop
async def aprep_prompts(
self,
input_list: List[Dict[str, Any]],
run_manager: Optional[AsyncCallbackManagerForChainRun] = None,
) -> Tuple[List[PromptValue], Optional[List[str]]]:
"""Prepare prompts from inputs."""
stop = None
if len(input_list) == 0:
return [], stop
if "stop" in input_list[0]:
stop = input_list[0]["stop"]
prompts = []
for inputs in input_list:
selected_inputs = {k: inputs[k] for k in self.prompt.input_variables}
prompt = self.prompt.format_prompt(**selected_inputs)
_colored_text = get_colored_text(prompt.to_string(), "green")
_text = "Prompt after formatting:\n" + _colored_text
if run_manager:
await run_manager.on_text(_text, end="\n", verbose=self.verbose)
if "stop" in inputs and inputs["stop"] != stop:
raise ValueError(
"If `stop` is present in any inputs, should be present in all."
)
prompts.append(prompt)
return prompts, stop
def apply(
self, input_list: List[Dict[str, Any]], callbacks: Callbacks = None
) -> List[Dict[str, str]]:
"""Utilize the LLM generate method for speed gains."""
callback_manager = CallbackManager.configure(
callbacks, self.callbacks, self.verbose
)
run_manager = callback_manager.on_chain_start(
None,
{"input_list": input_list},
name=self.get_name(),
)
try:
response = self.generate(input_list, run_manager=run_manager)
except BaseException as e:
run_manager.on_chain_error(e)
raise e
outputs = self.create_outputs(response)
run_manager.on_chain_end({"outputs": outputs})
return outputs
async def aapply(
self, input_list: List[Dict[str, Any]], callbacks: Callbacks = None
) -> List[Dict[str, str]]:
"""Utilize the LLM generate method for speed gains."""
callback_manager = AsyncCallbackManager.configure(
callbacks, self.callbacks, self.verbose
)
run_manager = await callback_manager.on_chain_start(
None,
{"input_list": input_list},
name=self.get_name(),
)
try:
response = await self.agenerate(input_list, run_manager=run_manager)
except BaseException as e:
await run_manager.on_chain_error(e)
raise e
outputs = self.create_outputs(response)
await run_manager.on_chain_end({"outputs": outputs})
return outputs
@property
def _run_output_key(self) -> str:
return self.output_key
def create_outputs(self, llm_result: LLMResult) -> List[Dict[str, Any]]:
"""Create outputs from response."""
result = [
# Get the text of the top generated string.
{
self.output_key: self.output_parser.parse_result(generation),
"full_generation": generation,
}
for generation in llm_result.generations
]
if self.return_final_only:
result = [{self.output_key: r[self.output_key]} for r in result]
return result
| |
155260
|
async def _acall(
self,
inputs: Dict[str, Any],
run_manager: Optional[AsyncCallbackManagerForChainRun] = None,
) -> Dict[str, str]:
response = await self.agenerate([inputs], run_manager=run_manager)
return self.create_outputs(response)[0]
def predict(self, callbacks: Callbacks = None, **kwargs: Any) -> str:
"""Format prompt with kwargs and pass to LLM.
Args:
callbacks: Callbacks to pass to LLMChain
**kwargs: Keys to pass to prompt template.
Returns:
Completion from LLM.
Example:
.. code-block:: python
completion = llm.predict(adjective="funny")
"""
return self(kwargs, callbacks=callbacks)[self.output_key]
async def apredict(self, callbacks: Callbacks = None, **kwargs: Any) -> str:
"""Format prompt with kwargs and pass to LLM.
Args:
callbacks: Callbacks to pass to LLMChain
**kwargs: Keys to pass to prompt template.
Returns:
Completion from LLM.
Example:
.. code-block:: python
completion = llm.predict(adjective="funny")
"""
return (await self.acall(kwargs, callbacks=callbacks))[self.output_key]
def predict_and_parse(
self, callbacks: Callbacks = None, **kwargs: Any
) -> Union[str, List[str], Dict[str, Any]]:
"""Call predict and then parse the results."""
warnings.warn(
"The predict_and_parse method is deprecated, "
"instead pass an output parser directly to LLMChain."
)
result = self.predict(callbacks=callbacks, **kwargs)
if self.prompt.output_parser is not None:
return self.prompt.output_parser.parse(result)
else:
return result
async def apredict_and_parse(
self, callbacks: Callbacks = None, **kwargs: Any
) -> Union[str, List[str], Dict[str, str]]:
"""Call apredict and then parse the results."""
warnings.warn(
"The apredict_and_parse method is deprecated, "
"instead pass an output parser directly to LLMChain."
)
result = await self.apredict(callbacks=callbacks, **kwargs)
if self.prompt.output_parser is not None:
return self.prompt.output_parser.parse(result)
else:
return result
def apply_and_parse(
self, input_list: List[Dict[str, Any]], callbacks: Callbacks = None
) -> Sequence[Union[str, List[str], Dict[str, str]]]:
"""Call apply and then parse the results."""
warnings.warn(
"The apply_and_parse method is deprecated, "
"instead pass an output parser directly to LLMChain."
)
result = self.apply(input_list, callbacks=callbacks)
return self._parse_generation(result)
def _parse_generation(
self, generation: List[Dict[str, str]]
) -> Sequence[Union[str, List[str], Dict[str, str]]]:
if self.prompt.output_parser is not None:
return [
self.prompt.output_parser.parse(res[self.output_key])
for res in generation
]
else:
return generation
async def aapply_and_parse(
self, input_list: List[Dict[str, Any]], callbacks: Callbacks = None
) -> Sequence[Union[str, List[str], Dict[str, str]]]:
"""Call apply and then parse the results."""
warnings.warn(
"The aapply_and_parse method is deprecated, "
"instead pass an output parser directly to LLMChain."
)
result = await self.aapply(input_list, callbacks=callbacks)
return self._parse_generation(result)
@property
def _chain_type(self) -> str:
return "llm_chain"
@classmethod
def from_string(cls, llm: BaseLanguageModel, template: str) -> LLMChain:
"""Create LLMChain from LLM and template."""
prompt_template = PromptTemplate.from_template(template)
return cls(llm=llm, prompt=prompt_template)
def _get_num_tokens(self, text: str) -> int:
return _get_language_model(self.llm).get_num_tokens(text)
def _get_language_model(llm_like: Runnable) -> BaseLanguageModel:
if isinstance(llm_like, BaseLanguageModel):
return llm_like
elif isinstance(llm_like, RunnableBinding):
return _get_language_model(llm_like.bound)
elif isinstance(llm_like, RunnableWithFallbacks):
return _get_language_model(llm_like.runnable)
elif isinstance(llm_like, (RunnableBranch, DynamicRunnable)):
return _get_language_model(llm_like.default)
else:
raise ValueError(
f"Unable to extract BaseLanguageModel from llm_like object of type "
f"{type(llm_like)}"
)
| |
155264
|
from __future__ import annotations
from typing import Any, Dict, Union
from langchain_core.retrievers import (
BaseRetriever,
RetrieverOutput,
)
from langchain_core.runnables import Runnable, RunnablePassthrough
def create_retrieval_chain(
retriever: Union[BaseRetriever, Runnable[dict, RetrieverOutput]],
combine_docs_chain: Runnable[Dict[str, Any], str],
) -> Runnable:
"""Create retrieval chain that retrieves documents and then passes them on.
Args:
retriever: Retriever-like object that returns list of documents. Should
either be a subclass of BaseRetriever or a Runnable that returns
a list of documents. If a subclass of BaseRetriever, then it
is expected that an `input` key be passed in - this is what
is will be used to pass into the retriever. If this is NOT a
subclass of BaseRetriever, then all the inputs will be passed
into this runnable, meaning that runnable should take a dictionary
as input.
combine_docs_chain: Runnable that takes inputs and produces a string output.
The inputs to this will be any original inputs to this chain, a new
context key with the retrieved documents, and chat_history (if not present
in the inputs) with a value of `[]` (to easily enable conversational
retrieval.
Returns:
An LCEL Runnable. The Runnable return is a dictionary containing at the very
least a `context` and `answer` key.
Example:
.. code-block:: python
# pip install -U langchain langchain-community
from langchain_community.chat_models import ChatOpenAI
from langchain.chains.combine_documents import create_stuff_documents_chain
from langchain.chains import create_retrieval_chain
from langchain import hub
retrieval_qa_chat_prompt = hub.pull("langchain-ai/retrieval-qa-chat")
llm = ChatOpenAI()
retriever = ...
combine_docs_chain = create_stuff_documents_chain(
llm, retrieval_qa_chat_prompt
)
retrieval_chain = create_retrieval_chain(retriever, combine_docs_chain)
chain.invoke({"input": "..."})
"""
if not isinstance(retriever, BaseRetriever):
retrieval_docs: Runnable[dict, RetrieverOutput] = retriever
else:
retrieval_docs = (lambda x: x["input"]) | retriever
retrieval_chain = (
RunnablePassthrough.assign(
context=retrieval_docs.with_config(run_name="retrieve_documents"),
).assign(answer=combine_docs_chain)
).with_config(run_name="retrieval_chain")
return retrieval_chain
| |
155267
|
class Chain(RunnableSerializable[Dict[str, Any], Dict[str, Any]], ABC):
"""Abstract base class for creating structured sequences of calls to components.
Chains should be used to encode a sequence of calls to components like
models, document retrievers, other chains, etc., and provide a simple interface
to this sequence.
The Chain interface makes it easy to create apps that are:
- Stateful: add Memory to any Chain to give it state,
- Observable: pass Callbacks to a Chain to execute additional functionality,
like logging, outside the main sequence of component calls,
- Composable: the Chain API is flexible enough that it is easy to combine
Chains with other components, including other Chains.
The main methods exposed by chains are:
- `__call__`: Chains are callable. The `__call__` method is the primary way to
execute a Chain. This takes inputs as a dictionary and returns a
dictionary output.
- `run`: A convenience method that takes inputs as args/kwargs and returns the
output as a string or object. This method can only be used for a subset of
chains and cannot return as rich of an output as `__call__`.
"""
memory: Optional[BaseMemory] = None
"""Optional memory object. Defaults to None.
Memory is a class that gets called at the start
and at the end of every chain. At the start, memory loads variables and passes
them along in the chain. At the end, it saves any returned variables.
There are many different types of memory - please see memory docs
for the full catalog."""
callbacks: Callbacks = Field(default=None, exclude=True)
"""Optional list of callback handlers (or callback manager). Defaults to None.
Callback handlers are called throughout the lifecycle of a call to a chain,
starting with on_chain_start, ending with on_chain_end or on_chain_error.
Each custom chain can optionally call additional callback methods, see Callback docs
for full details."""
verbose: bool = Field(default_factory=_get_verbosity)
"""Whether or not run in verbose mode. In verbose mode, some intermediate logs
will be printed to the console. Defaults to the global `verbose` value,
accessible via `langchain.globals.get_verbose()`."""
tags: Optional[List[str]] = None
"""Optional list of tags associated with the chain. Defaults to None.
These tags will be associated with each call to this chain,
and passed as arguments to the handlers defined in `callbacks`.
You can use these to eg identify a specific instance of a chain with its use case.
"""
metadata: Optional[Dict[str, Any]] = None
"""Optional metadata associated with the chain. Defaults to None.
This metadata will be associated with each call to this chain,
and passed as arguments to the handlers defined in `callbacks`.
You can use these to eg identify a specific instance of a chain with its use case.
"""
callback_manager: Optional[BaseCallbackManager] = Field(default=None, exclude=True)
"""[DEPRECATED] Use `callbacks` instead."""
model_config = ConfigDict(
arbitrary_types_allowed=True,
)
def get_input_schema(
self, config: Optional[RunnableConfig] = None
) -> Type[BaseModel]:
# This is correct, but pydantic typings/mypy don't think so.
return create_model( # type: ignore[call-overload]
"ChainInput", **{k: (Any, None) for k in self.input_keys}
)
def get_output_schema(
self, config: Optional[RunnableConfig] = None
) -> Type[BaseModel]:
# This is correct, but pydantic typings/mypy don't think so.
return create_model( # type: ignore[call-overload]
"ChainOutput", **{k: (Any, None) for k in self.output_keys}
)
def invoke(
self,
input: Dict[str, Any],
config: Optional[RunnableConfig] = None,
**kwargs: Any,
) -> Dict[str, Any]:
config = ensure_config(config)
callbacks = config.get("callbacks")
tags = config.get("tags")
metadata = config.get("metadata")
run_name = config.get("run_name") or self.get_name()
run_id = config.get("run_id")
include_run_info = kwargs.get("include_run_info", False)
return_only_outputs = kwargs.get("return_only_outputs", False)
inputs = self.prep_inputs(input)
callback_manager = CallbackManager.configure(
callbacks,
self.callbacks,
self.verbose,
tags,
self.tags,
metadata,
self.metadata,
)
new_arg_supported = inspect.signature(self._call).parameters.get("run_manager")
run_manager = callback_manager.on_chain_start(
None,
inputs,
run_id,
name=run_name,
)
try:
self._validate_inputs(inputs)
outputs = (
self._call(inputs, run_manager=run_manager)
if new_arg_supported
else self._call(inputs)
)
final_outputs: Dict[str, Any] = self.prep_outputs(
inputs, outputs, return_only_outputs
)
except BaseException as e:
run_manager.on_chain_error(e)
raise e
run_manager.on_chain_end(outputs)
if include_run_info:
final_outputs[RUN_KEY] = RunInfo(run_id=run_manager.run_id)
return final_outputs
async def ainvoke(
self,
input: Dict[str, Any],
config: Optional[RunnableConfig] = None,
**kwargs: Any,
) -> Dict[str, Any]:
config = ensure_config(config)
callbacks = config.get("callbacks")
tags = config.get("tags")
metadata = config.get("metadata")
run_name = config.get("run_name") or self.get_name()
run_id = config.get("run_id")
include_run_info = kwargs.get("include_run_info", False)
return_only_outputs = kwargs.get("return_only_outputs", False)
inputs = await self.aprep_inputs(input)
callback_manager = AsyncCallbackManager.configure(
callbacks,
self.callbacks,
self.verbose,
tags,
self.tags,
metadata,
self.metadata,
)
new_arg_supported = inspect.signature(self._acall).parameters.get("run_manager")
run_manager = await callback_manager.on_chain_start(
None,
inputs,
run_id,
name=run_name,
)
try:
self._validate_inputs(inputs)
outputs = (
await self._acall(inputs, run_manager=run_manager)
if new_arg_supported
else await self._acall(inputs)
)
final_outputs: Dict[str, Any] = await self.aprep_outputs(
inputs, outputs, return_only_outputs
)
except BaseException as e:
await run_manager.on_chain_error(e)
raise e
await run_manager.on_chain_end(outputs)
if include_run_info:
final_outputs[RUN_KEY] = RunInfo(run_id=run_manager.run_id)
return final_outputs
@property
def _chain_type(self) -> str:
raise NotImplementedError("Saving not supported for this chain type.")
@model_validator(mode="before")
@classmethod
def raise_callback_manager_deprecation(cls, values: Dict) -> Any:
"""Raise deprecation warning if callback_manager is used."""
if values.get("callback_manager") is not None:
if values.get("callbacks") is not None:
raise ValueError(
"Cannot specify both callback_manager and callbacks. "
"callback_manager is deprecated, callbacks is the preferred "
"parameter to pass in."
)
warnings.warn(
"callback_manager is deprecated. Please use callbacks instead.",
DeprecationWarning,
)
values["callbacks"] = values.pop("callback_manager", None)
return values
@field_validator("verbose", mode="before")
@classmethod
def set_verbose(cls, verbose: Optional[bool]) -> bool:
"""Set the chain verbosity.
Defaults to the global setting if not specified by the user.
"""
if verbose is None:
return _get_verbosity()
else:
return verbose
@property
@abstractmethod
def input_keys(self) -> List[str]:
"""Keys expected to be in the chain input."""
@property
@abstractmethod
def output_keys(self) -> List[str]:
"""Keys expected to be in the chain output."""
| |
155268
|
def _validate_inputs(self, inputs: Dict[str, Any]) -> None:
"""Check that all inputs are present."""
if not isinstance(inputs, dict):
_input_keys = set(self.input_keys)
if self.memory is not None:
# If there are multiple input keys, but some get set by memory so that
# only one is not set, we can still figure out which key it is.
_input_keys = _input_keys.difference(self.memory.memory_variables)
if len(_input_keys) != 1:
raise ValueError(
f"A single string input was passed in, but this chain expects "
f"multiple inputs ({_input_keys}). When a chain expects "
f"multiple inputs, please call it by passing in a dictionary, "
"eg `chain({'foo': 1, 'bar': 2})`"
)
missing_keys = set(self.input_keys).difference(inputs)
if missing_keys:
raise ValueError(f"Missing some input keys: {missing_keys}")
def _validate_outputs(self, outputs: Dict[str, Any]) -> None:
missing_keys = set(self.output_keys).difference(outputs)
if missing_keys:
raise ValueError(f"Missing some output keys: {missing_keys}")
@abstractmethod
def _call(
self,
inputs: Dict[str, Any],
run_manager: Optional[CallbackManagerForChainRun] = None,
) -> Dict[str, Any]:
"""Execute the chain.
This is a private method that is not user-facing. It is only called within
`Chain.__call__`, which is the user-facing wrapper method that handles
callbacks configuration and some input/output processing.
Args:
inputs: A dict of named inputs to the chain. Assumed to contain all inputs
specified in `Chain.input_keys`, including any inputs added by memory.
run_manager: The callbacks manager that contains the callback handlers for
this run of the chain.
Returns:
A dict of named outputs. Should contain all outputs specified in
`Chain.output_keys`.
"""
async def _acall(
self,
inputs: Dict[str, Any],
run_manager: Optional[AsyncCallbackManagerForChainRun] = None,
) -> Dict[str, Any]:
"""Asynchronously execute the chain.
This is a private method that is not user-facing. It is only called within
`Chain.acall`, which is the user-facing wrapper method that handles
callbacks configuration and some input/output processing.
Args:
inputs: A dict of named inputs to the chain. Assumed to contain all inputs
specified in `Chain.input_keys`, including any inputs added by memory.
run_manager: The callbacks manager that contains the callback handlers for
this run of the chain.
Returns:
A dict of named outputs. Should contain all outputs specified in
`Chain.output_keys`.
"""
return await run_in_executor(
None, self._call, inputs, run_manager.get_sync() if run_manager else None
)
@deprecated("0.1.0", alternative="invoke", removal="1.0")
def __call__(
self,
inputs: Union[Dict[str, Any], Any],
return_only_outputs: bool = False,
callbacks: Callbacks = None,
*,
tags: Optional[List[str]] = None,
metadata: Optional[Dict[str, Any]] = None,
run_name: Optional[str] = None,
include_run_info: bool = False,
) -> Dict[str, Any]:
"""Execute the chain.
Args:
inputs: Dictionary of inputs, or single input if chain expects
only one param. Should contain all inputs specified in
`Chain.input_keys` except for inputs that will be set by the chain's
memory.
return_only_outputs: Whether to return only outputs in the
response. If True, only new keys generated by this chain will be
returned. If False, both input keys and new keys generated by this
chain will be returned. Defaults to False.
callbacks: Callbacks to use for this chain run. These will be called in
addition to callbacks passed to the chain during construction, but only
these runtime callbacks will propagate to calls to other objects.
tags: List of string tags to pass to all callbacks. These will be passed in
addition to tags passed to the chain during construction, but only
these runtime tags will propagate to calls to other objects.
metadata: Optional metadata associated with the chain. Defaults to None
include_run_info: Whether to include run info in the response. Defaults
to False.
Returns:
A dict of named outputs. Should contain all outputs specified in
`Chain.output_keys`.
"""
config = {
"callbacks": callbacks,
"tags": tags,
"metadata": metadata,
"run_name": run_name,
}
return self.invoke(
inputs,
cast(RunnableConfig, {k: v for k, v in config.items() if v is not None}),
return_only_outputs=return_only_outputs,
include_run_info=include_run_info,
)
@deprecated("0.1.0", alternative="ainvoke", removal="1.0")
async def acall(
self,
inputs: Union[Dict[str, Any], Any],
return_only_outputs: bool = False,
callbacks: Callbacks = None,
*,
tags: Optional[List[str]] = None,
metadata: Optional[Dict[str, Any]] = None,
run_name: Optional[str] = None,
include_run_info: bool = False,
) -> Dict[str, Any]:
"""Asynchronously execute the chain.
Args:
inputs: Dictionary of inputs, or single input if chain expects
only one param. Should contain all inputs specified in
`Chain.input_keys` except for inputs that will be set by the chain's
memory.
return_only_outputs: Whether to return only outputs in the
response. If True, only new keys generated by this chain will be
returned. If False, both input keys and new keys generated by this
chain will be returned. Defaults to False.
callbacks: Callbacks to use for this chain run. These will be called in
addition to callbacks passed to the chain during construction, but only
these runtime callbacks will propagate to calls to other objects.
tags: List of string tags to pass to all callbacks. These will be passed in
addition to tags passed to the chain during construction, but only
these runtime tags will propagate to calls to other objects.
metadata: Optional metadata associated with the chain. Defaults to None
include_run_info: Whether to include run info in the response. Defaults
to False.
Returns:
A dict of named outputs. Should contain all outputs specified in
`Chain.output_keys`.
"""
config = {
"callbacks": callbacks,
"tags": tags,
"metadata": metadata,
"run_name": run_name,
}
return await self.ainvoke(
inputs,
cast(RunnableConfig, {k: v for k, v in config.items() if k is not None}),
return_only_outputs=return_only_outputs,
include_run_info=include_run_info,
)
def prep_outputs(
self,
inputs: Dict[str, str],
outputs: Dict[str, str],
return_only_outputs: bool = False,
) -> Dict[str, str]:
"""Validate and prepare chain outputs, and save info about this run to memory.
Args:
inputs: Dictionary of chain inputs, including any inputs added by chain
memory.
outputs: Dictionary of initial chain outputs.
return_only_outputs: Whether to only return the chain outputs. If False,
inputs are also added to the final outputs.
Returns:
A dict of the final chain outputs.
"""
self._validate_outputs(outputs)
if self.memory is not None:
self.memory.save_context(inputs, outputs)
if return_only_outputs:
return outputs
else:
return {**inputs, **outputs}
async def aprep_outputs(
self,
inputs: Dict[str, str],
outputs: Dict[str, str],
return_only_outputs: bool = False,
) -> Dict[str, str]:
"""Validate and prepare chain outputs, and save info about this run to memory.
Args:
inputs: Dictionary of chain inputs, including any inputs added by chain
memory.
outputs: Dictionary of initial chain outputs.
return_only_outputs: Whether to only return the chain outputs. If False,
inputs are also added to the final outputs.
Returns:
A dict of the final chain outputs.
"""
self._validate_outputs(outputs)
if self.memory is not None:
await self.memory.asave_context(inputs, outputs)
if return_only_outputs:
return outputs
else:
return {**inputs, **outputs}
| |
155269
|
def prep_inputs(self, inputs: Union[Dict[str, Any], Any]) -> Dict[str, str]:
"""Prepare chain inputs, including adding inputs from memory.
Args:
inputs: Dictionary of raw inputs, or single input if chain expects
only one param. Should contain all inputs specified in
`Chain.input_keys` except for inputs that will be set by the chain's
memory.
Returns:
A dictionary of all inputs, including those added by the chain's memory.
"""
if not isinstance(inputs, dict):
_input_keys = set(self.input_keys)
if self.memory is not None:
# If there are multiple input keys, but some get set by memory so that
# only one is not set, we can still figure out which key it is.
_input_keys = _input_keys.difference(self.memory.memory_variables)
inputs = {list(_input_keys)[0]: inputs}
if self.memory is not None:
external_context = self.memory.load_memory_variables(inputs)
inputs = dict(inputs, **external_context)
return inputs
async def aprep_inputs(self, inputs: Union[Dict[str, Any], Any]) -> Dict[str, str]:
"""Prepare chain inputs, including adding inputs from memory.
Args:
inputs: Dictionary of raw inputs, or single input if chain expects
only one param. Should contain all inputs specified in
`Chain.input_keys` except for inputs that will be set by the chain's
memory.
Returns:
A dictionary of all inputs, including those added by the chain's memory.
"""
if not isinstance(inputs, dict):
_input_keys = set(self.input_keys)
if self.memory is not None:
# If there are multiple input keys, but some get set by memory so that
# only one is not set, we can still figure out which key it is.
_input_keys = _input_keys.difference(self.memory.memory_variables)
inputs = {list(_input_keys)[0]: inputs}
if self.memory is not None:
external_context = await self.memory.aload_memory_variables(inputs)
inputs = dict(inputs, **external_context)
return inputs
@property
def _run_output_key(self) -> str:
if len(self.output_keys) != 1:
raise ValueError(
f"`run` not supported when there is not exactly "
f"one output key. Got {self.output_keys}."
)
return self.output_keys[0]
@deprecated("0.1.0", alternative="invoke", removal="1.0")
def run(
self,
*args: Any,
callbacks: Callbacks = None,
tags: Optional[List[str]] = None,
metadata: Optional[Dict[str, Any]] = None,
**kwargs: Any,
) -> Any:
"""Convenience method for executing chain.
The main difference between this method and `Chain.__call__` is that this
method expects inputs to be passed directly in as positional arguments or
keyword arguments, whereas `Chain.__call__` expects a single input dictionary
with all the inputs
Args:
*args: If the chain expects a single input, it can be passed in as the
sole positional argument.
callbacks: Callbacks to use for this chain run. These will be called in
addition to callbacks passed to the chain during construction, but only
these runtime callbacks will propagate to calls to other objects.
tags: List of string tags to pass to all callbacks. These will be passed in
addition to tags passed to the chain during construction, but only
these runtime tags will propagate to calls to other objects.
**kwargs: If the chain expects multiple inputs, they can be passed in
directly as keyword arguments.
Returns:
The chain output.
Example:
.. code-block:: python
# Suppose we have a single-input chain that takes a 'question' string:
chain.run("What's the temperature in Boise, Idaho?")
# -> "The temperature in Boise is..."
# Suppose we have a multi-input chain that takes a 'question' string
# and 'context' string:
question = "What's the temperature in Boise, Idaho?"
context = "Weather report for Boise, Idaho on 07/03/23..."
chain.run(question=question, context=context)
# -> "The temperature in Boise is..."
"""
# Run at start to make sure this is possible/defined
_output_key = self._run_output_key
if args and not kwargs:
if len(args) != 1:
raise ValueError("`run` supports only one positional argument.")
return self(args[0], callbacks=callbacks, tags=tags, metadata=metadata)[
_output_key
]
if kwargs and not args:
return self(kwargs, callbacks=callbacks, tags=tags, metadata=metadata)[
_output_key
]
if not kwargs and not args:
raise ValueError(
"`run` supported with either positional arguments or keyword arguments,"
" but none were provided."
)
else:
raise ValueError(
f"`run` supported with either positional arguments or keyword arguments"
f" but not both. Got args: {args} and kwargs: {kwargs}."
)
@deprecated("0.1.0", alternative="ainvoke", removal="1.0")
async def arun(
self,
*args: Any,
callbacks: Callbacks = None,
tags: Optional[List[str]] = None,
metadata: Optional[Dict[str, Any]] = None,
**kwargs: Any,
) -> Any:
"""Convenience method for executing chain.
The main difference between this method and `Chain.__call__` is that this
method expects inputs to be passed directly in as positional arguments or
keyword arguments, whereas `Chain.__call__` expects a single input dictionary
with all the inputs
Args:
*args: If the chain expects a single input, it can be passed in as the
sole positional argument.
callbacks: Callbacks to use for this chain run. These will be called in
addition to callbacks passed to the chain during construction, but only
these runtime callbacks will propagate to calls to other objects.
tags: List of string tags to pass to all callbacks. These will be passed in
addition to tags passed to the chain during construction, but only
these runtime tags will propagate to calls to other objects.
**kwargs: If the chain expects multiple inputs, they can be passed in
directly as keyword arguments.
Returns:
The chain output.
Example:
.. code-block:: python
# Suppose we have a single-input chain that takes a 'question' string:
await chain.arun("What's the temperature in Boise, Idaho?")
# -> "The temperature in Boise is..."
# Suppose we have a multi-input chain that takes a 'question' string
# and 'context' string:
question = "What's the temperature in Boise, Idaho?"
context = "Weather report for Boise, Idaho on 07/03/23..."
await chain.arun(question=question, context=context)
# -> "The temperature in Boise is..."
"""
if len(self.output_keys) != 1:
raise ValueError(
f"`run` not supported when there is not exactly "
f"one output key. Got {self.output_keys}."
)
elif args and not kwargs:
if len(args) != 1:
raise ValueError("`run` supports only one positional argument.")
return (
await self.acall(
args[0], callbacks=callbacks, tags=tags, metadata=metadata
)
)[self.output_keys[0]]
if kwargs and not args:
return (
await self.acall(
kwargs, callbacks=callbacks, tags=tags, metadata=metadata
)
)[self.output_keys[0]]
raise ValueError(
f"`run` supported with either positional arguments or keyword arguments"
f" but not both. Got args: {args} and kwargs: {kwargs}."
)
def dict(self, **kwargs: Any) -> Dict:
"""Dictionary representation of chain.
Expects `Chain._chain_type` property to be implemented and for memory to be
null.
Args:
**kwargs: Keyword arguments passed to default `pydantic.BaseModel.dict`
method.
Returns:
A dictionary representation of the chain.
Example:
.. code-block:: python
chain.dict(exclude_unset=True)
# -> {"_type": "foo", "verbose": False, ...}
"""
_dict = super().dict(**kwargs)
try:
_dict["_type"] = self._chain_type
except NotImplementedError:
pass
return _dict
| |
155273
|
# flake8: noqa
from langchain_core.prompts.prompt import PromptTemplate
_template = """Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question.
Chat History:
{chat_history}
Follow Up Input: {question}
Standalone question:"""
CONDENSE_QUESTION_PROMPT = PromptTemplate.from_template(_template)
prompt_template = """Use the following pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer.
{context}
Question: {question}
Helpful Answer:"""
QA_PROMPT = PromptTemplate(
template=prompt_template, input_variables=["context", "question"]
)
| |
155283
|
# flake8: noqa
from langchain.chains.prompt_selector import ConditionalPromptSelector, is_chat_model
from langchain_core.prompts import PromptTemplate
from langchain_core.prompts.chat import (
ChatPromptTemplate,
HumanMessagePromptTemplate,
SystemMessagePromptTemplate,
)
prompt_template = """Use the following pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer.
{context}
Question: {question}
Helpful Answer:"""
PROMPT = PromptTemplate(
template=prompt_template, input_variables=["context", "question"]
)
system_template = """Use the following pieces of context to answer the user's question.
If you don't know the answer, just say that you don't know, don't try to make up an answer.
----------------
{context}"""
messages = [
SystemMessagePromptTemplate.from_template(system_template),
HumanMessagePromptTemplate.from_template("{question}"),
]
CHAT_PROMPT = ChatPromptTemplate.from_messages(messages)
PROMPT_SELECTOR = ConditionalPromptSelector(
default_prompt=PROMPT, conditionals=[(is_chat_model, CHAT_PROMPT)]
)
| |
155287
|
from typing import Any, List, Optional, Type, Union, cast
from langchain_core._api import deprecated
from langchain_core.language_models import BaseLanguageModel
from langchain_core.messages import HumanMessage, SystemMessage
from langchain_core.output_parsers import BaseLLMOutputParser
from langchain_core.output_parsers.openai_functions import (
OutputFunctionsParser,
PydanticOutputFunctionsParser,
)
from langchain_core.prompts import PromptTemplate
from langchain_core.prompts.chat import ChatPromptTemplate, HumanMessagePromptTemplate
from langchain_core.utils.pydantic import is_basemodel_subclass
from pydantic import BaseModel, Field
from langchain.chains.llm import LLMChain
from langchain.chains.openai_functions.utils import get_llm_kwargs
class AnswerWithSources(BaseModel):
"""An answer to the question, with sources."""
answer: str = Field(..., description="Answer to the question that was asked")
sources: List[str] = Field(
..., description="List of sources used to answer the question"
)
@deprecated(
since="0.2.13",
removal="1.0",
message=(
"This function is deprecated. Refer to this guide on retrieval and question "
"answering with structured responses: "
"https://python.langchain.com/docs/how_to/qa_sources/#structure-sources-in-model-response" # noqa: E501
),
)
def create_qa_with_structure_chain(
llm: BaseLanguageModel,
schema: Union[dict, Type[BaseModel]],
output_parser: str = "base",
prompt: Optional[Union[PromptTemplate, ChatPromptTemplate]] = None,
verbose: bool = False,
) -> LLMChain:
"""Create a question answering chain that returns an answer with sources
based on schema.
Args:
llm: Language model to use for the chain.
schema: Pydantic schema to use for the output.
output_parser: Output parser to use. Should be one of `pydantic` or `base`.
Default to `base`.
prompt: Optional prompt to use for the chain.
Returns:
"""
if output_parser == "pydantic":
if not (isinstance(schema, type) and is_basemodel_subclass(schema)):
raise ValueError(
"Must provide a pydantic class for schema when output_parser is "
"'pydantic'."
)
_output_parser: BaseLLMOutputParser = PydanticOutputFunctionsParser(
pydantic_schema=schema
)
elif output_parser == "base":
_output_parser = OutputFunctionsParser()
else:
raise ValueError(
f"Got unexpected output_parser: {output_parser}. "
f"Should be one of `pydantic` or `base`."
)
if isinstance(schema, type) and is_basemodel_subclass(schema):
if hasattr(schema, "model_json_schema"):
schema_dict = cast(dict, schema.model_json_schema())
else:
schema_dict = cast(dict, schema.schema())
else:
schema_dict = cast(dict, schema)
function = {
"name": schema_dict["title"],
"description": schema_dict["description"],
"parameters": schema_dict,
}
llm_kwargs = get_llm_kwargs(function)
messages = [
SystemMessage(
content=(
"You are a world class algorithm to answer "
"questions in a specific format."
)
),
HumanMessage(content="Answer question using the following context"),
HumanMessagePromptTemplate.from_template("{context}"),
HumanMessagePromptTemplate.from_template("Question: {question}"),
HumanMessage(content="Tips: Make sure to answer in the correct format"),
]
prompt = prompt or ChatPromptTemplate(messages=messages) # type: ignore[arg-type, call-arg]
chain = LLMChain(
llm=llm,
prompt=prompt,
llm_kwargs=llm_kwargs,
output_parser=_output_parser,
verbose=verbose,
)
return chain
@deprecated(
since="0.2.13",
removal="1.0",
message=(
"This function is deprecated. Refer to this guide on retrieval and question "
"answering with sources: "
"https://python.langchain.com/docs/how_to/qa_sources/#structure-sources-in-model-response" # noqa: E501
),
)
def create_qa_with_sources_chain(
llm: BaseLanguageModel, verbose: bool = False, **kwargs: Any
) -> LLMChain:
"""Create a question answering chain that returns an answer with sources.
Args:
llm: Language model to use for the chain.
verbose: Whether to print the details of the chain
**kwargs: Keyword arguments to pass to `create_qa_with_structure_chain`.
Returns:
Chain (LLMChain) that can be used to answer questions with citations.
"""
return create_qa_with_structure_chain(
llm, AnswerWithSources, verbose=verbose, **kwargs
)
| |
155299
|
# flake8: noqa
from langchain_core.output_parsers.list import CommaSeparatedListOutputParser
from langchain_core.prompts.prompt import PromptTemplate
PROMPT_SUFFIX = """Only use the following tables:
{table_info}
Question: {input}"""
_DEFAULT_TEMPLATE = """Given an input question, first create a syntactically correct {dialect} query to run, then look at the results of the query and return the answer. Unless the user specifies in his question a specific number of examples he wishes to obtain, always limit your query to at most {top_k} results. You can order the results by a relevant column to return the most interesting examples in the database.
Never query for all the columns from a specific table, only ask for a the few relevant columns given the question.
Pay attention to use only the column names that you can see in the schema description. Be careful to not query for columns that do not exist. Also, pay attention to which column is in which table.
Use the following format:
Question: Question here
SQLQuery: SQL Query to run
SQLResult: Result of the SQLQuery
Answer: Final answer here
"""
PROMPT = PromptTemplate(
input_variables=["input", "table_info", "dialect", "top_k"],
template=_DEFAULT_TEMPLATE + PROMPT_SUFFIX,
)
_DECIDER_TEMPLATE = """Given the below input question and list of potential tables, output a comma separated list of the table names that may be necessary to answer this question.
Question: {query}
Table Names: {table_names}
Relevant Table Names:"""
DECIDER_PROMPT = PromptTemplate(
input_variables=["query", "table_names"],
template=_DECIDER_TEMPLATE,
output_parser=CommaSeparatedListOutputParser(),
)
_cratedb_prompt = """You are a CrateDB expert. Given an input question, first create a syntactically correct CrateDB query to run, then look at the results of the query and return the answer to the input question.
Unless the user specifies in the question a specific number of examples to obtain, query for at most {top_k} results using the LIMIT clause as per CrateDB. You can order the results to return the most informative data in the database.
Never query for all columns from a table. You must query only the columns that are needed to answer the question. Wrap each column name in double quotes (") to denote them as delimited identifiers.
Pay attention to use only the column names you can see in the tables below. Be careful to not query for columns that do not exist. Also, pay attention to which column is in which table.
Pay attention to use CURRENT_DATE function to get the current date, if the question involves "today".
Use the following format:
Question: Question here
SQLQuery: SQL Query to run
SQLResult: Result of the SQLQuery
Answer: Final answer here
"""
CRATEDB_PROMPT = PromptTemplate(
input_variables=["input", "table_info", "top_k"],
template=_cratedb_prompt + PROMPT_SUFFIX,
)
_duckdb_prompt = """You are a DuckDB expert. Given an input question, first create a syntactically correct DuckDB query to run, then look at the results of the query and return the answer to the input question.
Unless the user specifies in the question a specific number of examples to obtain, query for at most {top_k} results using the LIMIT clause as per DuckDB. You can order the results to return the most informative data in the database.
Never query for all columns from a table. You must query only the columns that are needed to answer the question. Wrap each column name in double quotes (") to denote them as delimited identifiers.
Pay attention to use only the column names you can see in the tables below. Be careful to not query for columns that do not exist. Also, pay attention to which column is in which table.
Pay attention to use today() function to get the current date, if the question involves "today".
Use the following format:
Question: Question here
SQLQuery: SQL Query to run
SQLResult: Result of the SQLQuery
Answer: Final answer here
"""
DUCKDB_PROMPT = PromptTemplate(
input_variables=["input", "table_info", "top_k"],
template=_duckdb_prompt + PROMPT_SUFFIX,
)
_googlesql_prompt = """You are a GoogleSQL expert. Given an input question, first create a syntactically correct GoogleSQL query to run, then look at the results of the query and return the answer to the input question.
Unless the user specifies in the question a specific number of examples to obtain, query for at most {top_k} results using the LIMIT clause as per GoogleSQL. You can order the results to return the most informative data in the database.
Never query for all columns from a table. You must query only the columns that are needed to answer the question. Wrap each column name in backticks (`) to denote them as delimited identifiers.
Pay attention to use only the column names you can see in the tables below. Be careful to not query for columns that do not exist. Also, pay attention to which column is in which table.
Pay attention to use CURRENT_DATE() function to get the current date, if the question involves "today".
Use the following format:
Question: Question here
SQLQuery: SQL Query to run
SQLResult: Result of the SQLQuery
Answer: Final answer here
"""
GOOGLESQL_PROMPT = PromptTemplate(
input_variables=["input", "table_info", "top_k"],
template=_googlesql_prompt + PROMPT_SUFFIX,
)
_mssql_prompt = """You are an MS SQL expert. Given an input question, first create a syntactically correct MS SQL query to run, then look at the results of the query and return the answer to the input question.
Unless the user specifies in the question a specific number of examples to obtain, query for at most {top_k} results using the TOP clause as per MS SQL. You can order the results to return the most informative data in the database.
Never query for all columns from a table. You must query only the columns that are needed to answer the question. Wrap each column name in square brackets ([]) to denote them as delimited identifiers.
Pay attention to use only the column names you can see in the tables below. Be careful to not query for columns that do not exist. Also, pay attention to which column is in which table.
Pay attention to use CAST(GETDATE() as date) function to get the current date, if the question involves "today".
Use the following format:
Question: Question here
SQLQuery: SQL Query to run
SQLResult: Result of the SQLQuery
Answer: Final answer here
"""
MSSQL_PROMPT = PromptTemplate(
input_variables=["input", "table_info", "top_k"],
template=_mssql_prompt + PROMPT_SUFFIX,
)
_mysql_prompt = """You are a MySQL expert. Given an input question, first create a syntactically correct MySQL query to run, then look at the results of the query and return the answer to the input question.
Unless the user specifies in the question a specific number of examples to obtain, query for at most {top_k} results using the LIMIT clause as per MySQL. You can order the results to return the most informative data in the database.
Never query for all columns from a table. You must query only the columns that are needed to answer the question. Wrap each column name in backticks (`) to denote them as delimited identifiers.
Pay attention to use only the column names you can see in the tables below. Be careful to not query for columns that do not exist. Also, pay attention to which column is in which table.
Pay attention to use CURDATE() function to get the current date, if the question involves "today".
Use the following format:
Question: Question here
SQLQuery: SQL Query to run
SQLResult: Result of the SQLQuery
Answer: Final answer here
"""
MYSQL_PROMPT = PromptTemplate(
input_variables=["input", "table_info", "top_k"],
template=_mysql_prompt + PROMPT_SUFFIX,
)
_mariadb_prompt = """You are a MariaDB expert. Given an input question, first create a syntactically correct MariaDB query to run, then look at the results of the query and return the answer to the input question.
Unless the user specifies in the question a specific number of examples to obtain, query for at most {top_k} results using the LIMIT clause as per MariaDB. You can order the results to return the most informative data in the database.
Never query for all columns from a table. You must query only the columns that are needed to answer the question. Wrap each column name in backticks (`) to denote them as delimited identifiers.
Pay attention to use only the column names you can see in the tables below. Be careful to not query for columns that do not exist. Also, pay attention to which column is in which table.
Pay attention to use CURDATE() function to get the current date, if the question involves "today".
Use the following format:
Question: Question here
SQLQuery: SQL Query to run
SQLResult: Result of the SQLQuery
Answer: Final answer here
"""
MARIADB_PROMPT = PromptTemplate(
input_variables=["input", "table_info", "top_k"],
template=_mariadb_prompt + PROMPT_SUFFIX,
)
| |
155300
|
_oracle_prompt = """You are an Oracle SQL expert. Given an input question, first create a syntactically correct Oracle SQL query to run, then look at the results of the query and return the answer to the input question.
Unless the user specifies in the question a specific number of examples to obtain, query for at most {top_k} results using the FETCH FIRST n ROWS ONLY clause as per Oracle SQL. You can order the results to return the most informative data in the database.
Never query for all columns from a table. You must query only the columns that are needed to answer the question. Wrap each column name in double quotes (") to denote them as delimited identifiers.
Pay attention to use only the column names you can see in the tables below. Be careful to not query for columns that do not exist. Also, pay attention to which column is in which table.
Pay attention to use TRUNC(SYSDATE) function to get the current date, if the question involves "today".
Use the following format:
Question: Question here
SQLQuery: SQL Query to run
SQLResult: Result of the SQLQuery
Answer: Final answer here
"""
ORACLE_PROMPT = PromptTemplate(
input_variables=["input", "table_info", "top_k"],
template=_oracle_prompt + PROMPT_SUFFIX,
)
_postgres_prompt = """You are a PostgreSQL expert. Given an input question, first create a syntactically correct PostgreSQL query to run, then look at the results of the query and return the answer to the input question.
Unless the user specifies in the question a specific number of examples to obtain, query for at most {top_k} results using the LIMIT clause as per PostgreSQL. You can order the results to return the most informative data in the database.
Never query for all columns from a table. You must query only the columns that are needed to answer the question. Wrap each column name in double quotes (") to denote them as delimited identifiers.
Pay attention to use only the column names you can see in the tables below. Be careful to not query for columns that do not exist. Also, pay attention to which column is in which table.
Pay attention to use CURRENT_DATE function to get the current date, if the question involves "today".
Use the following format:
Question: Question here
SQLQuery: SQL Query to run
SQLResult: Result of the SQLQuery
Answer: Final answer here
"""
POSTGRES_PROMPT = PromptTemplate(
input_variables=["input", "table_info", "top_k"],
template=_postgres_prompt + PROMPT_SUFFIX,
)
_sqlite_prompt = """You are a SQLite expert. Given an input question, first create a syntactically correct SQLite query to run, then look at the results of the query and return the answer to the input question.
Unless the user specifies in the question a specific number of examples to obtain, query for at most {top_k} results using the LIMIT clause as per SQLite. You can order the results to return the most informative data in the database.
Never query for all columns from a table. You must query only the columns that are needed to answer the question. Wrap each column name in double quotes (") to denote them as delimited identifiers.
Pay attention to use only the column names you can see in the tables below. Be careful to not query for columns that do not exist. Also, pay attention to which column is in which table.
Pay attention to use date('now') function to get the current date, if the question involves "today".
Use the following format:
Question: Question here
SQLQuery: SQL Query to run
SQLResult: Result of the SQLQuery
Answer: Final answer here
"""
SQLITE_PROMPT = PromptTemplate(
input_variables=["input", "table_info", "top_k"],
template=_sqlite_prompt + PROMPT_SUFFIX,
)
_clickhouse_prompt = """You are a ClickHouse expert. Given an input question, first create a syntactically correct Clic query to run, then look at the results of the query and return the answer to the input question.
Unless the user specifies in the question a specific number of examples to obtain, query for at most {top_k} results using the LIMIT clause as per ClickHouse. You can order the results to return the most informative data in the database.
Never query for all columns from a table. You must query only the columns that are needed to answer the question. Wrap each column name in double quotes (") to denote them as delimited identifiers.
Pay attention to use only the column names you can see in the tables below. Be careful to not query for columns that do not exist. Also, pay attention to which column is in which table.
Pay attention to use today() function to get the current date, if the question involves "today".
Use the following format:
Question: "Question here"
SQLQuery: "SQL Query to run"
SQLResult: "Result of the SQLQuery"
Answer: "Final answer here"
"""
CLICKHOUSE_PROMPT = PromptTemplate(
input_variables=["input", "table_info", "top_k"],
template=_clickhouse_prompt + PROMPT_SUFFIX,
)
_prestodb_prompt = """You are a PrestoDB expert. Given an input question, first create a syntactically correct PrestoDB query to run, then look at the results of the query and return the answer to the input question.
Unless the user specifies in the question a specific number of examples to obtain, query for at most {top_k} results using the LIMIT clause as per PrestoDB. You can order the results to return the most informative data in the database.
Never query for all columns from a table. You must query only the columns that are needed to answer the question. Wrap each column name in double quotes (") to denote them as delimited identifiers.
Pay attention to use only the column names you can see in the tables below. Be careful to not query for columns that do not exist. Also, pay attention to which column is in which table.
Pay attention to use current_date function to get the current date, if the question involves "today".
Use the following format:
Question: "Question here"
SQLQuery: "SQL Query to run"
SQLResult: "Result of the SQLQuery"
Answer: "Final answer here"
"""
PRESTODB_PROMPT = PromptTemplate(
input_variables=["input", "table_info", "top_k"],
template=_prestodb_prompt + PROMPT_SUFFIX,
)
SQL_PROMPTS = {
"crate": CRATEDB_PROMPT,
"duckdb": DUCKDB_PROMPT,
"googlesql": GOOGLESQL_PROMPT,
"mssql": MSSQL_PROMPT,
"mysql": MYSQL_PROMPT,
"mariadb": MARIADB_PROMPT,
"oracle": ORACLE_PROMPT,
"postgresql": POSTGRES_PROMPT,
"sqlite": SQLITE_PROMPT,
"clickhouse": CLICKHOUSE_PROMPT,
"prestodb": PRESTODB_PROMPT,
}
| |
155307
|
import json
from typing import Any, Callable, Dict, Literal, Optional, Sequence, Type, Union
from langchain_core._api import deprecated
from langchain_core.output_parsers import (
BaseGenerationOutputParser,
BaseOutputParser,
JsonOutputParser,
PydanticOutputParser,
)
from langchain_core.output_parsers.openai_functions import (
JsonOutputFunctionsParser,
PydanticAttrOutputFunctionsParser,
PydanticOutputFunctionsParser,
)
from langchain_core.output_parsers.openai_tools import (
JsonOutputKeyToolsParser,
PydanticToolsParser,
)
from langchain_core.prompts import BasePromptTemplate
from langchain_core.runnables import Runnable
from langchain_core.utils.function_calling import (
convert_to_openai_function,
convert_to_openai_tool,
)
from langchain_core.utils.pydantic import is_basemodel_subclass
from pydantic import BaseModel
@deprecated(
since="0.1.14",
message=(
"LangChain has introduced a method called `with_structured_output` that "
"is available on ChatModels capable of tool calling. "
"You can read more about the method here: "
"<https://python.langchain.com/docs/modules/model_io/chat/structured_output/>. "
"Please follow our extraction use case documentation for more guidelines "
"on how to do information extraction with LLMs. "
"<https://python.langchain.com/docs/use_cases/extraction/>. "
"If you notice other issues, please provide "
"feedback here: "
"<https://github.com/langchain-ai/langchain/discussions/18154>"
),
removal="1.0",
alternative=(
"""
from pydantic import BaseModel, Field
from langchain_anthropic import ChatAnthropic
class Joke(BaseModel):
setup: str = Field(description="The setup of the joke")
punchline: str = Field(description="The punchline to the joke")
# Or any other chat model that supports tools.
# Please reference to to the documentation of structured_output
# to see an up to date list of which models support
# with_structured_output.
model = ChatAnthropic(model="claude-3-opus-20240229", temperature=0)
structured_llm = model.with_structured_output(Joke)
structured_llm.invoke("Tell me a joke about cats.
Make sure to call the Joke function.")
"""
),
)
def create_openai_fn_runnable(
functions: Sequence[Union[Dict[str, Any], Type[BaseModel], Callable]],
llm: Runnable,
prompt: Optional[BasePromptTemplate] = None,
*,
enforce_single_function_usage: bool = True,
output_parser: Optional[Union[BaseOutputParser, BaseGenerationOutputParser]] = None,
**llm_kwargs: Any,
) -> Runnable:
"""Create a runnable sequence that uses OpenAI functions.
Args:
functions: A sequence of either dictionaries, pydantic.BaseModels classes, or
Python functions. If dictionaries are passed in, they are assumed to
already be a valid OpenAI functions. If only a single
function is passed in, then it will be enforced that the model use that
function. pydantic.BaseModels and Python functions should have docstrings
describing what the function does. For best results, pydantic.BaseModels
should have descriptions of the parameters and Python functions should have
Google Python style args descriptions in the docstring. Additionally,
Python functions should only use primitive types (str, int, float, bool) or
pydantic.BaseModels for arguments.
llm: Language model to use, assumed to support the OpenAI function-calling API.
prompt: BasePromptTemplate to pass to the model.
enforce_single_function_usage: only used if a single function is passed in. If
True, then the model will be forced to use the given function. If False,
then the model will be given the option to use the given function or not.
output_parser: BaseLLMOutputParser to use for parsing model outputs. By default
will be inferred from the function types. If pydantic.BaseModels are passed
in, then the OutputParser will try to parse outputs using those. Otherwise
model outputs will simply be parsed as JSON. If multiple functions are
passed in and they are not pydantic.BaseModels, the chain output will
include both the name of the function that was returned and the arguments
to pass to the function.
**llm_kwargs: Additional named arguments to pass to the language model.
Returns:
A runnable sequence that will pass in the given functions to the model when run.
Example:
.. code-block:: python
from typing import Optional
from langchain.chains.structured_output import create_openai_fn_runnable
from langchain_openai import ChatOpenAI
from pydantic import BaseModel, Field
class RecordPerson(BaseModel):
'''Record some identifying information about a person.'''
name: str = Field(..., description="The person's name")
age: int = Field(..., description="The person's age")
fav_food: Optional[str] = Field(None, description="The person's favorite food")
class RecordDog(BaseModel):
'''Record some identifying information about a dog.'''
name: str = Field(..., description="The dog's name")
color: str = Field(..., description="The dog's color")
fav_food: Optional[str] = Field(None, description="The dog's favorite food")
llm = ChatOpenAI(model="gpt-4", temperature=0)
structured_llm = create_openai_fn_runnable([RecordPerson, RecordDog], llm)
structured_llm.invoke("Harry was a chubby brown beagle who loved chicken)
# -> RecordDog(name="Harry", color="brown", fav_food="chicken")
""" # noqa: E501
if not functions:
raise ValueError("Need to pass in at least one function. Received zero.")
openai_functions = [convert_to_openai_function(f) for f in functions]
llm_kwargs_: Dict[str, Any] = {"functions": openai_functions, **llm_kwargs}
if len(openai_functions) == 1 and enforce_single_function_usage:
llm_kwargs_["function_call"] = {"name": openai_functions[0]["name"]}
output_parser = output_parser or get_openai_output_parser(functions)
if prompt:
return prompt | llm.bind(**llm_kwargs_) | output_parser
else:
return llm.bind(**llm_kwargs_) | output_parser
@deprecated(
since="0.1.17",
message=(
"LangChain has introduced a method called `with_structured_output` that "
"is available on ChatModels capable of tool calling. "
"You can read more about the method here: "
"<https://python.langchain.com/docs/modules/model_io/chat/structured_output/>."
"Please follow our extraction use case documentation for more guidelines "
"on how to do information extraction with LLMs. "
"<https://python.langchain.com/docs/use_cases/extraction/>. "
"If you notice other issues, please provide "
"feedback here: "
"<https://github.com/langchain-ai/langchain/discussions/18154>"
),
removal="1.0",
alternative=(
"""
from pydantic import BaseModel, Field
from langchain_anthropic import ChatAnthropic
class Joke(BaseModel):
setup: str = Field(description="The setup of the joke")
punchline: str = Field(description="The punchline to the joke")
# Or any other chat model that supports tools.
# Please reference to to the documentation of structured_output
# to see an up to date list of which models support
# with_structured_output.
model = ChatAnthropic(model="claude-3-opus-20240229", temperature=0)
structured_llm = model.with_structured_output(Joke)
structured_llm.invoke("Tell me a joke about cats.
Make sure to call the Joke function.")
"""
),
)
def create_structured_output_runnable(
output_schema: Union[Dict[str, Any], Type[BaseModel]],
llm: Runnable,
prompt: Optional[BasePromptTemplate] = None,
*,
output_parser: Optional[Union[BaseOutputParser, BaseGenerationOutputParser]] = None,
enforce_function_usage: bool = True,
return_single: bool = True,
mode: Literal[
"openai-functions", "openai-tools", "openai-json"
] = "openai-functions",
**kwargs: Any,
) -> Runnable:
| |
155311
|
# flake8: noqa
from langchain_core.prompts import PromptTemplate
prompt_template = """Use the following pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer.
{context}
Question: {question}
Helpful Answer:"""
PROMPT = PromptTemplate(
template=prompt_template, input_variables=["context", "question"]
)
| |
155312
|
"""Chain for question-answering against a vector database."""
from __future__ import annotations
import inspect
import warnings
from abc import abstractmethod
from typing import Any, Dict, List, Optional
from langchain_core._api import deprecated
from langchain_core.callbacks import (
AsyncCallbackManagerForChainRun,
CallbackManagerForChainRun,
Callbacks,
)
from langchain_core.documents import Document
from langchain_core.language_models import BaseLanguageModel
from langchain_core.prompts import PromptTemplate
from langchain_core.retrievers import BaseRetriever
from langchain_core.vectorstores import VectorStore
from pydantic import ConfigDict, Field, model_validator
from langchain.chains.base import Chain
from langchain.chains.combine_documents.base import BaseCombineDocumentsChain
from langchain.chains.combine_documents.stuff import StuffDocumentsChain
from langchain.chains.llm import LLMChain
from langchain.chains.question_answering import load_qa_chain
from langchain.chains.question_answering.stuff_prompt import PROMPT_SELECTOR
@deprecated(
since="0.2.13",
removal="1.0",
message=(
"This class is deprecated. Use the `create_retrieval_chain` constructor "
"instead. See migration guide here: "
"https://python.langchain.com/docs/versions/migrating_chains/retrieval_qa/"
),
)
class BaseRetrievalQA(Chain):
"""Base class for question-answering chains."""
combine_documents_chain: BaseCombineDocumentsChain
"""Chain to use to combine the documents."""
input_key: str = "query" #: :meta private:
output_key: str = "result" #: :meta private:
return_source_documents: bool = False
"""Return the source documents or not."""
model_config = ConfigDict(
populate_by_name=True,
arbitrary_types_allowed=True,
extra="forbid",
)
@property
def input_keys(self) -> List[str]:
"""Input keys.
:meta private:
"""
return [self.input_key]
@property
def output_keys(self) -> List[str]:
"""Output keys.
:meta private:
"""
_output_keys = [self.output_key]
if self.return_source_documents:
_output_keys = _output_keys + ["source_documents"]
return _output_keys
@classmethod
def from_llm(
cls,
llm: BaseLanguageModel,
prompt: Optional[PromptTemplate] = None,
callbacks: Callbacks = None,
llm_chain_kwargs: Optional[dict] = None,
**kwargs: Any,
) -> BaseRetrievalQA:
"""Initialize from LLM."""
_prompt = prompt or PROMPT_SELECTOR.get_prompt(llm)
llm_chain = LLMChain(
llm=llm, prompt=_prompt, callbacks=callbacks, **(llm_chain_kwargs or {})
)
document_prompt = PromptTemplate(
input_variables=["page_content"], template="Context:\n{page_content}"
)
combine_documents_chain = StuffDocumentsChain(
llm_chain=llm_chain,
document_variable_name="context",
document_prompt=document_prompt,
callbacks=callbacks,
)
return cls(
combine_documents_chain=combine_documents_chain,
callbacks=callbacks,
**kwargs,
)
@classmethod
def from_chain_type(
cls,
llm: BaseLanguageModel,
chain_type: str = "stuff",
chain_type_kwargs: Optional[dict] = None,
**kwargs: Any,
) -> BaseRetrievalQA:
"""Load chain from chain type."""
_chain_type_kwargs = chain_type_kwargs or {}
combine_documents_chain = load_qa_chain(
llm, chain_type=chain_type, **_chain_type_kwargs
)
return cls(combine_documents_chain=combine_documents_chain, **kwargs)
@abstractmethod
def _get_docs(
self,
question: str,
*,
run_manager: CallbackManagerForChainRun,
) -> List[Document]:
"""Get documents to do question answering over."""
def _call(
self,
inputs: Dict[str, Any],
run_manager: Optional[CallbackManagerForChainRun] = None,
) -> Dict[str, Any]:
"""Run get_relevant_text and llm on input query.
If chain has 'return_source_documents' as 'True', returns
the retrieved documents as well under the key 'source_documents'.
Example:
.. code-block:: python
res = indexqa({'query': 'This is my query'})
answer, docs = res['result'], res['source_documents']
"""
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
question = inputs[self.input_key]
accepts_run_manager = (
"run_manager" in inspect.signature(self._get_docs).parameters
)
if accepts_run_manager:
docs = self._get_docs(question, run_manager=_run_manager)
else:
docs = self._get_docs(question) # type: ignore[call-arg]
answer = self.combine_documents_chain.run(
input_documents=docs, question=question, callbacks=_run_manager.get_child()
)
if self.return_source_documents:
return {self.output_key: answer, "source_documents": docs}
else:
return {self.output_key: answer}
@abstractmethod
async def _aget_docs(
self,
question: str,
*,
run_manager: AsyncCallbackManagerForChainRun,
) -> List[Document]:
"""Get documents to do question answering over."""
async def _acall(
self,
inputs: Dict[str, Any],
run_manager: Optional[AsyncCallbackManagerForChainRun] = None,
) -> Dict[str, Any]:
"""Run get_relevant_text and llm on input query.
If chain has 'return_source_documents' as 'True', returns
the retrieved documents as well under the key 'source_documents'.
Example:
.. code-block:: python
res = indexqa({'query': 'This is my query'})
answer, docs = res['result'], res['source_documents']
"""
_run_manager = run_manager or AsyncCallbackManagerForChainRun.get_noop_manager()
question = inputs[self.input_key]
accepts_run_manager = (
"run_manager" in inspect.signature(self._aget_docs).parameters
)
if accepts_run_manager:
docs = await self._aget_docs(question, run_manager=_run_manager)
else:
docs = await self._aget_docs(question) # type: ignore[call-arg]
answer = await self.combine_documents_chain.arun(
input_documents=docs, question=question, callbacks=_run_manager.get_child()
)
if self.return_source_documents:
return {self.output_key: answer, "source_documents": docs}
else:
return {self.output_key: answer}
| |
155314
|
"""Load question answering with sources chains."""
from __future__ import annotations
from typing import Any, Mapping, Optional, Protocol
from langchain_core._api import deprecated
from langchain_core.language_models import BaseLanguageModel
from langchain_core.prompts import BasePromptTemplate
from langchain.chains.combine_documents.base import BaseCombineDocumentsChain
from langchain.chains.combine_documents.map_reduce import MapReduceDocumentsChain
from langchain.chains.combine_documents.map_rerank import MapRerankDocumentsChain
from langchain.chains.combine_documents.reduce import ReduceDocumentsChain
from langchain.chains.combine_documents.refine import RefineDocumentsChain
from langchain.chains.combine_documents.stuff import StuffDocumentsChain
from langchain.chains.llm import LLMChain
from langchain.chains.qa_with_sources import (
map_reduce_prompt,
refine_prompts,
stuff_prompt,
)
from langchain.chains.question_answering.map_rerank_prompt import (
PROMPT as MAP_RERANK_PROMPT,
)
class LoadingCallable(Protocol):
"""Interface for loading the combine documents chain."""
def __call__(
self, llm: BaseLanguageModel, **kwargs: Any
) -> BaseCombineDocumentsChain:
"""Callable to load the combine documents chain."""
def _load_map_rerank_chain(
llm: BaseLanguageModel,
prompt: BasePromptTemplate = MAP_RERANK_PROMPT,
verbose: bool = False,
document_variable_name: str = "context",
rank_key: str = "score",
answer_key: str = "answer",
**kwargs: Any,
) -> MapRerankDocumentsChain:
llm_chain = LLMChain(llm=llm, prompt=prompt, verbose=verbose)
return MapRerankDocumentsChain(
llm_chain=llm_chain,
rank_key=rank_key,
answer_key=answer_key,
document_variable_name=document_variable_name,
**kwargs,
)
def _load_stuff_chain(
llm: BaseLanguageModel,
prompt: BasePromptTemplate = stuff_prompt.PROMPT,
document_prompt: BasePromptTemplate = stuff_prompt.EXAMPLE_PROMPT,
document_variable_name: str = "summaries",
verbose: Optional[bool] = None,
**kwargs: Any,
) -> StuffDocumentsChain:
llm_chain = LLMChain(llm=llm, prompt=prompt, verbose=verbose) # type: ignore[arg-type]
return StuffDocumentsChain(
llm_chain=llm_chain,
document_variable_name=document_variable_name,
document_prompt=document_prompt,
verbose=verbose, # type: ignore[arg-type]
**kwargs,
)
def _load_map_reduce_chain(
llm: BaseLanguageModel,
question_prompt: BasePromptTemplate = map_reduce_prompt.QUESTION_PROMPT,
combine_prompt: BasePromptTemplate = map_reduce_prompt.COMBINE_PROMPT,
document_prompt: BasePromptTemplate = map_reduce_prompt.EXAMPLE_PROMPT,
combine_document_variable_name: str = "summaries",
map_reduce_document_variable_name: str = "context",
collapse_prompt: Optional[BasePromptTemplate] = None,
reduce_llm: Optional[BaseLanguageModel] = None,
collapse_llm: Optional[BaseLanguageModel] = None,
verbose: Optional[bool] = None,
token_max: int = 3000,
**kwargs: Any,
) -> MapReduceDocumentsChain:
map_chain = LLMChain(llm=llm, prompt=question_prompt, verbose=verbose) # type: ignore[arg-type]
_reduce_llm = reduce_llm or llm
reduce_chain = LLMChain(llm=_reduce_llm, prompt=combine_prompt, verbose=verbose) # type: ignore[arg-type]
combine_documents_chain = StuffDocumentsChain(
llm_chain=reduce_chain,
document_variable_name=combine_document_variable_name,
document_prompt=document_prompt,
verbose=verbose, # type: ignore[arg-type]
)
if collapse_prompt is None:
collapse_chain = None
if collapse_llm is not None:
raise ValueError(
"collapse_llm provided, but collapse_prompt was not: please "
"provide one or stop providing collapse_llm."
)
else:
_collapse_llm = collapse_llm or llm
collapse_chain = StuffDocumentsChain(
llm_chain=LLMChain(
llm=_collapse_llm,
prompt=collapse_prompt,
verbose=verbose, # type: ignore[arg-type]
),
document_variable_name=combine_document_variable_name,
document_prompt=document_prompt,
)
reduce_documents_chain = ReduceDocumentsChain(
combine_documents_chain=combine_documents_chain,
collapse_documents_chain=collapse_chain,
token_max=token_max,
verbose=verbose, # type: ignore[arg-type]
)
return MapReduceDocumentsChain(
llm_chain=map_chain,
reduce_documents_chain=reduce_documents_chain,
document_variable_name=map_reduce_document_variable_name,
verbose=verbose, # type: ignore[arg-type]
**kwargs,
)
def _load_refine_chain(
llm: BaseLanguageModel,
question_prompt: BasePromptTemplate = refine_prompts.DEFAULT_TEXT_QA_PROMPT,
refine_prompt: BasePromptTemplate = refine_prompts.DEFAULT_REFINE_PROMPT,
document_prompt: BasePromptTemplate = refine_prompts.EXAMPLE_PROMPT,
document_variable_name: str = "context_str",
initial_response_name: str = "existing_answer",
refine_llm: Optional[BaseLanguageModel] = None,
verbose: Optional[bool] = None,
**kwargs: Any,
) -> RefineDocumentsChain:
initial_chain = LLMChain(llm=llm, prompt=question_prompt, verbose=verbose) # type: ignore[arg-type]
_refine_llm = refine_llm or llm
refine_chain = LLMChain(llm=_refine_llm, prompt=refine_prompt, verbose=verbose) # type: ignore[arg-type]
return RefineDocumentsChain(
initial_llm_chain=initial_chain,
refine_llm_chain=refine_chain,
document_variable_name=document_variable_name,
initial_response_name=initial_response_name,
document_prompt=document_prompt,
verbose=verbose, # type: ignore[arg-type]
**kwargs,
)
@deprecated(
since="0.2.13",
removal="1.0",
message=(
"This function is deprecated. Refer to this guide on retrieval and question "
"answering with sources: "
"https://python.langchain.com/docs/how_to/qa_sources/"
"\nSee also the following migration guides for replacements "
"based on `chain_type`:\n"
"stuff: https://python.langchain.com/docs/versions/migrating_chains/stuff_docs_chain\n" # noqa: E501
"map_reduce: https://python.langchain.com/docs/versions/migrating_chains/map_reduce_chain\n" # noqa: E501
"refine: https://python.langchain.com/docs/versions/migrating_chains/refine_chain\n" # noqa: E501
"map_rerank: https://python.langchain.com/docs/versions/migrating_chains/map_rerank_docs_chain\n" # noqa: E501
),
)
def load_qa_with_sources_chain(
llm: BaseLanguageModel,
chain_type: str = "stuff",
verbose: Optional[bool] = None,
**kwargs: Any,
) -> BaseCombineDocumentsChain:
"""Load a question answering with sources chain.
Args:
llm: Language Model to use in the chain.
chain_type: Type of document combining chain to use. Should be one of "stuff",
"map_reduce", "refine" and "map_rerank".
verbose: Whether chains should be run in verbose mode or not. Note that this
applies to all chains that make up the final chain.
Returns:
A chain to use for question answering with sources.
"""
loader_mapping: Mapping[str, LoadingCallable] = {
"stuff": _load_stuff_chain,
"map_reduce": _load_map_reduce_chain,
"refine": _load_refine_chain,
"map_rerank": _load_map_rerank_chain,
}
if chain_type not in loader_mapping:
raise ValueError(
f"Got unsupported chain type: {chain_type}. "
f"Should be one of {loader_mapping.keys()}"
)
_func: LoadingCallable = loader_mapping[chain_type]
return _func(llm, verbose=verbose, **kwargs)
| |
155317
|
"""Question-answering with sources over a vector database."""
import warnings
from typing import Any, Dict, List
from langchain_core.callbacks import (
AsyncCallbackManagerForChainRun,
CallbackManagerForChainRun,
)
from langchain_core.documents import Document
from langchain_core.vectorstores import VectorStore
from pydantic import Field, model_validator
from langchain.chains.combine_documents.stuff import StuffDocumentsChain
from langchain.chains.qa_with_sources.base import BaseQAWithSourcesChain
class VectorDBQAWithSourcesChain(BaseQAWithSourcesChain):
"""Question-answering with sources over a vector database."""
vectorstore: VectorStore = Field(exclude=True)
"""Vector Database to connect to."""
k: int = 4
"""Number of results to return from store"""
reduce_k_below_max_tokens: bool = False
"""Reduce the number of results to return from store based on tokens limit"""
max_tokens_limit: int = 3375
"""Restrict the docs to return from store based on tokens,
enforced only for StuffDocumentChain and if reduce_k_below_max_tokens is to true"""
search_kwargs: Dict[str, Any] = Field(default_factory=dict)
"""Extra search args."""
def _reduce_tokens_below_limit(self, docs: List[Document]) -> List[Document]:
num_docs = len(docs)
if self.reduce_k_below_max_tokens and isinstance(
self.combine_documents_chain, StuffDocumentsChain
):
tokens = [
self.combine_documents_chain.llm_chain._get_num_tokens(doc.page_content)
for doc in docs
]
token_count = sum(tokens[:num_docs])
while token_count > self.max_tokens_limit:
num_docs -= 1
token_count -= tokens[num_docs]
return docs[:num_docs]
def _get_docs(
self, inputs: Dict[str, Any], *, run_manager: CallbackManagerForChainRun
) -> List[Document]:
question = inputs[self.question_key]
docs = self.vectorstore.similarity_search(
question, k=self.k, **self.search_kwargs
)
return self._reduce_tokens_below_limit(docs)
async def _aget_docs(
self, inputs: Dict[str, Any], *, run_manager: AsyncCallbackManagerForChainRun
) -> List[Document]:
raise NotImplementedError("VectorDBQAWithSourcesChain does not support async")
@model_validator(mode="before")
@classmethod
def raise_deprecation(cls, values: Dict) -> Any:
warnings.warn(
"`VectorDBQAWithSourcesChain` is deprecated - "
"please use `from langchain.chains import RetrievalQAWithSourcesChain`"
)
return values
@property
def _chain_type(self) -> str:
return "vector_db_qa_with_sources_chain"
| |
155318
|
"""Question-answering with sources over an index."""
from typing import Any, Dict, List
from langchain_core.callbacks import (
AsyncCallbackManagerForChainRun,
CallbackManagerForChainRun,
)
from langchain_core.documents import Document
from langchain_core.retrievers import BaseRetriever
from pydantic import Field
from langchain.chains.combine_documents.stuff import StuffDocumentsChain
from langchain.chains.qa_with_sources.base import BaseQAWithSourcesChain
class RetrievalQAWithSourcesChain(BaseQAWithSourcesChain):
"""Question-answering with sources over an index."""
retriever: BaseRetriever = Field(exclude=True)
"""Index to connect to."""
reduce_k_below_max_tokens: bool = False
"""Reduce the number of results to return from store based on tokens limit"""
max_tokens_limit: int = 3375
"""Restrict the docs to return from store based on tokens,
enforced only for StuffDocumentChain and if reduce_k_below_max_tokens is to true"""
def _reduce_tokens_below_limit(self, docs: List[Document]) -> List[Document]:
num_docs = len(docs)
if self.reduce_k_below_max_tokens and isinstance(
self.combine_documents_chain, StuffDocumentsChain
):
tokens = [
self.combine_documents_chain.llm_chain._get_num_tokens(doc.page_content)
for doc in docs
]
token_count = sum(tokens[:num_docs])
while token_count > self.max_tokens_limit:
num_docs -= 1
token_count -= tokens[num_docs]
return docs[:num_docs]
def _get_docs(
self, inputs: Dict[str, Any], *, run_manager: CallbackManagerForChainRun
) -> List[Document]:
question = inputs[self.question_key]
docs = self.retriever.invoke(
question, config={"callbacks": run_manager.get_child()}
)
return self._reduce_tokens_below_limit(docs)
async def _aget_docs(
self, inputs: Dict[str, Any], *, run_manager: AsyncCallbackManagerForChainRun
) -> List[Document]:
question = inputs[self.question_key]
docs = await self.retriever.ainvoke(
question, config={"callbacks": run_manager.get_child()}
)
return self._reduce_tokens_below_limit(docs)
@property
def _chain_type(self) -> str:
"""Return the chain type."""
return "retrieval_qa_with_sources_chain"
| |
155320
|
"""Question answering with sources over documents."""
from __future__ import annotations
import inspect
import re
from abc import ABC, abstractmethod
from typing import Any, Dict, List, Optional, Tuple
from langchain_core._api import deprecated
from langchain_core.callbacks import (
AsyncCallbackManagerForChainRun,
CallbackManagerForChainRun,
)
from langchain_core.documents import Document
from langchain_core.language_models import BaseLanguageModel
from langchain_core.prompts import BasePromptTemplate
from pydantic import ConfigDict, model_validator
from langchain.chains import ReduceDocumentsChain
from langchain.chains.base import Chain
from langchain.chains.combine_documents.base import BaseCombineDocumentsChain
from langchain.chains.combine_documents.map_reduce import MapReduceDocumentsChain
from langchain.chains.combine_documents.stuff import StuffDocumentsChain
from langchain.chains.llm import LLMChain
from langchain.chains.qa_with_sources.loading import load_qa_with_sources_chain
from langchain.chains.qa_with_sources.map_reduce_prompt import (
COMBINE_PROMPT,
EXAMPLE_PROMPT,
QUESTION_PROMPT,
)
@deprecated(
since="0.2.13",
removal="1.0",
message=(
"This class is deprecated. Refer to this guide on retrieval and question "
"answering with sources: "
"https://python.langchain.com/docs/how_to/qa_sources/"
),
)
class BaseQAWithSourcesChain(Chain, ABC):
"""Question answering chain with sources over documents."""
combine_documents_chain: BaseCombineDocumentsChain
"""Chain to use to combine documents."""
question_key: str = "question" #: :meta private:
input_docs_key: str = "docs" #: :meta private:
answer_key: str = "answer" #: :meta private:
sources_answer_key: str = "sources" #: :meta private:
return_source_documents: bool = False
"""Return the source documents."""
@classmethod
def from_llm(
cls,
llm: BaseLanguageModel,
document_prompt: BasePromptTemplate = EXAMPLE_PROMPT,
question_prompt: BasePromptTemplate = QUESTION_PROMPT,
combine_prompt: BasePromptTemplate = COMBINE_PROMPT,
**kwargs: Any,
) -> BaseQAWithSourcesChain:
"""Construct the chain from an LLM."""
llm_question_chain = LLMChain(llm=llm, prompt=question_prompt)
llm_combine_chain = LLMChain(llm=llm, prompt=combine_prompt)
combine_results_chain = StuffDocumentsChain(
llm_chain=llm_combine_chain,
document_prompt=document_prompt,
document_variable_name="summaries",
)
reduce_documents_chain = ReduceDocumentsChain( # type: ignore[misc]
combine_documents_chain=combine_results_chain
)
combine_documents_chain = MapReduceDocumentsChain(
llm_chain=llm_question_chain,
reduce_documents_chain=reduce_documents_chain,
document_variable_name="context",
)
return cls(
combine_documents_chain=combine_documents_chain,
**kwargs,
)
@classmethod
def from_chain_type(
cls,
llm: BaseLanguageModel,
chain_type: str = "stuff",
chain_type_kwargs: Optional[dict] = None,
**kwargs: Any,
) -> BaseQAWithSourcesChain:
"""Load chain from chain type."""
_chain_kwargs = chain_type_kwargs or {}
combine_documents_chain = load_qa_with_sources_chain(
llm, chain_type=chain_type, **_chain_kwargs
)
return cls(combine_documents_chain=combine_documents_chain, **kwargs)
model_config = ConfigDict(
arbitrary_types_allowed=True,
extra="forbid",
)
@property
def input_keys(self) -> List[str]:
"""Expect input key.
:meta private:
"""
return [self.question_key]
@property
def output_keys(self) -> List[str]:
"""Return output key.
:meta private:
"""
_output_keys = [self.answer_key, self.sources_answer_key]
if self.return_source_documents:
_output_keys = _output_keys + ["source_documents"]
return _output_keys
@model_validator(mode="before")
@classmethod
def validate_naming(cls, values: Dict) -> Any:
"""Fix backwards compatibility in naming."""
if "combine_document_chain" in values:
values["combine_documents_chain"] = values.pop("combine_document_chain")
return values
def _split_sources(self, answer: str) -> Tuple[str, str]:
"""Split sources from answer."""
if re.search(r"SOURCES?:", answer, re.IGNORECASE):
answer, sources = re.split(
r"SOURCES?:|QUESTION:\s", answer, flags=re.IGNORECASE
)[:2]
sources = re.split(r"\n", sources)[0].strip()
else:
sources = ""
return answer, sources
@abstractmethod
def _get_docs(
self,
inputs: Dict[str, Any],
*,
run_manager: CallbackManagerForChainRun,
) -> List[Document]:
"""Get docs to run questioning over."""
def _call(
self,
inputs: Dict[str, Any],
run_manager: Optional[CallbackManagerForChainRun] = None,
) -> Dict[str, str]:
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
accepts_run_manager = (
"run_manager" in inspect.signature(self._get_docs).parameters
)
if accepts_run_manager:
docs = self._get_docs(inputs, run_manager=_run_manager)
else:
docs = self._get_docs(inputs) # type: ignore[call-arg]
answer = self.combine_documents_chain.run(
input_documents=docs, callbacks=_run_manager.get_child(), **inputs
)
answer, sources = self._split_sources(answer)
result: Dict[str, Any] = {
self.answer_key: answer,
self.sources_answer_key: sources,
}
if self.return_source_documents:
result["source_documents"] = docs
return result
@abstractmethod
async def _aget_docs(
self,
inputs: Dict[str, Any],
*,
run_manager: AsyncCallbackManagerForChainRun,
) -> List[Document]:
"""Get docs to run questioning over."""
async def _acall(
self,
inputs: Dict[str, Any],
run_manager: Optional[AsyncCallbackManagerForChainRun] = None,
) -> Dict[str, Any]:
_run_manager = run_manager or AsyncCallbackManagerForChainRun.get_noop_manager()
accepts_run_manager = (
"run_manager" in inspect.signature(self._aget_docs).parameters
)
if accepts_run_manager:
docs = await self._aget_docs(inputs, run_manager=_run_manager)
else:
docs = await self._aget_docs(inputs) # type: ignore[call-arg]
answer = await self.combine_documents_chain.arun(
input_documents=docs, callbacks=_run_manager.get_child(), **inputs
)
answer, sources = self._split_sources(answer)
result: Dict[str, Any] = {
self.answer_key: answer,
self.sources_answer_key: sources,
}
if self.return_source_documents:
result["source_documents"] = docs
return result
@deprecated(
since="0.2.13",
removal="1.0",
message=(
"This class is deprecated. Refer to this guide on retrieval and question "
"answering with sources: "
"https://python.langchain.com/docs/how_to/qa_sources/"
),
)
class QAWithSourcesChain(BaseQAWithSourcesChain):
"""Question answering with sources over documents."""
input_docs_key: str = "docs" #: :meta private:
@property
def input_keys(self) -> List[str]:
"""Expect input key.
:meta private:
"""
return [self.input_docs_key, self.question_key]
def _get_docs(
self,
inputs: Dict[str, Any],
*,
run_manager: CallbackManagerForChainRun,
) -> List[Document]:
"""Get docs to run questioning over."""
return inputs.pop(self.input_docs_key)
async def _aget_docs(
self,
inputs: Dict[str, Any],
*,
run_manager: AsyncCallbackManagerForChainRun,
) -> List[Document]:
"""Get docs to run questioning over."""
return inputs.pop(self.input_docs_key)
@property
def _chain_type(self) -> str:
return "qa_with_sources_chain"
| |
155337
|
# flake8: noqa
from langchain.chains.prompt_selector import ConditionalPromptSelector, is_chat_model
from langchain_core.prompts.chat import (
ChatPromptTemplate,
HumanMessagePromptTemplate,
SystemMessagePromptTemplate,
)
from langchain_core.prompts.prompt import PromptTemplate
templ1 = """You are a smart assistant designed to help high school teachers come up with reading comprehension questions.
Given a piece of text, you must come up with a question and answer pair that can be used to test a student's reading comprehension abilities.
When coming up with this question/answer pair, you must respond in the following format:
```
{{
"question": "$YOUR_QUESTION_HERE",
"answer": "$THE_ANSWER_HERE"
}}
```
Everything between the ``` must be valid json.
"""
templ2 = """Please come up with a question/answer pair, in the specified JSON format, for the following text:
----------------
{text}"""
CHAT_PROMPT = ChatPromptTemplate.from_messages(
[
SystemMessagePromptTemplate.from_template(templ1),
HumanMessagePromptTemplate.from_template(templ2),
]
)
templ = """You are a smart assistant designed to help high school teachers come up with reading comprehension questions.
Given a piece of text, you must come up with a question and answer pair that can be used to test a student's reading comprehension abilities.
When coming up with this question/answer pair, you must respond in the following format:
```
{{
"question": "$YOUR_QUESTION_HERE",
"answer": "$THE_ANSWER_HERE"
}}
```
Everything between the ``` must be valid json.
Please come up with a question/answer pair, in the specified JSON format, for the following text:
----------------
{text}"""
PROMPT = PromptTemplate.from_template(templ)
PROMPT_SELECTOR = ConditionalPromptSelector(
default_prompt=PROMPT, conditionals=[(is_chat_model, CHAT_PROMPT)]
)
| |
155378
|
@deprecated(
since="0.2.13",
removal="1.0",
message=(
"This class is deprecated. Use the `create_stuff_documents_chain` constructor "
"instead. See migration guide here: "
"https://python.langchain.com/docs/versions/migrating_chains/stuff_docs_chain/" # noqa: E501
),
)
class StuffDocumentsChain(BaseCombineDocumentsChain):
"""Chain that combines documents by stuffing into context.
This chain takes a list of documents and first combines them into a single string.
It does this by formatting each document into a string with the `document_prompt`
and then joining them together with `document_separator`. It then adds that new
string to the inputs with the variable name set by `document_variable_name`.
Those inputs are then passed to the `llm_chain`.
Example:
.. code-block:: python
from langchain.chains import StuffDocumentsChain, LLMChain
from langchain_core.prompts import PromptTemplate
from langchain_community.llms import OpenAI
# This controls how each document will be formatted. Specifically,
# it will be passed to `format_document` - see that function for more
# details.
document_prompt = PromptTemplate(
input_variables=["page_content"],
template="{page_content}"
)
document_variable_name = "context"
llm = OpenAI()
# The prompt here should take as an input variable the
# `document_variable_name`
prompt = PromptTemplate.from_template(
"Summarize this content: {context}"
)
llm_chain = LLMChain(llm=llm, prompt=prompt)
chain = StuffDocumentsChain(
llm_chain=llm_chain,
document_prompt=document_prompt,
document_variable_name=document_variable_name
)
"""
llm_chain: LLMChain
"""LLM chain which is called with the formatted document string,
along with any other inputs."""
document_prompt: BasePromptTemplate = Field(
default_factory=lambda: DEFAULT_DOCUMENT_PROMPT
)
"""Prompt to use to format each document, gets passed to `format_document`."""
document_variable_name: str
"""The variable name in the llm_chain to put the documents in.
If only one variable in the llm_chain, this need not be provided."""
document_separator: str = "\n\n"
"""The string with which to join the formatted documents"""
model_config = ConfigDict(
arbitrary_types_allowed=True,
extra="forbid",
)
@model_validator(mode="before")
@classmethod
def get_default_document_variable_name(cls, values: Dict) -> Any:
"""Get default document variable name, if not provided.
If only one variable is present in the llm_chain.prompt,
we can infer that the formatted documents should be passed in
with this variable name.
"""
llm_chain_variables = values["llm_chain"].prompt.input_variables
if "document_variable_name" not in values:
if len(llm_chain_variables) == 1:
values["document_variable_name"] = llm_chain_variables[0]
else:
raise ValueError(
"document_variable_name must be provided if there are "
"multiple llm_chain_variables"
)
else:
if values["document_variable_name"] not in llm_chain_variables:
raise ValueError(
f"document_variable_name {values['document_variable_name']} was "
f"not found in llm_chain input_variables: {llm_chain_variables}"
)
return values
@property
def input_keys(self) -> List[str]:
extra_keys = [
k for k in self.llm_chain.input_keys if k != self.document_variable_name
]
return super().input_keys + extra_keys
def _get_inputs(self, docs: List[Document], **kwargs: Any) -> dict:
"""Construct inputs from kwargs and docs.
Format and then join all the documents together into one input with name
`self.document_variable_name`. Also pluck any additional variables
from **kwargs.
Args:
docs: List of documents to format and then join into single input
**kwargs: additional inputs to chain, will pluck any other required
arguments from here.
Returns:
dictionary of inputs to LLMChain
"""
# Format each document according to the prompt
doc_strings = [format_document(doc, self.document_prompt) for doc in docs]
# Join the documents together to put them in the prompt.
inputs = {
k: v
for k, v in kwargs.items()
if k in self.llm_chain.prompt.input_variables
}
inputs[self.document_variable_name] = self.document_separator.join(doc_strings)
return inputs
def prompt_length(self, docs: List[Document], **kwargs: Any) -> Optional[int]:
"""Return the prompt length given the documents passed in.
This can be used by a caller to determine whether passing in a list
of documents would exceed a certain prompt length. This useful when
trying to ensure that the size of a prompt remains below a certain
context limit.
Args:
docs: List[Document], a list of documents to use to calculate the
total prompt length.
Returns:
Returns None if the method does not depend on the prompt length,
otherwise the length of the prompt in tokens.
"""
inputs = self._get_inputs(docs, **kwargs)
prompt = self.llm_chain.prompt.format(**inputs)
return self.llm_chain._get_num_tokens(prompt)
def combine_docs(
self, docs: List[Document], callbacks: Callbacks = None, **kwargs: Any
) -> Tuple[str, dict]:
"""Stuff all documents into one prompt and pass to LLM.
Args:
docs: List of documents to join together into one variable
callbacks: Optional callbacks to pass along
**kwargs: additional parameters to use to get inputs to LLMChain.
Returns:
The first element returned is the single string output. The second
element returned is a dictionary of other keys to return.
"""
inputs = self._get_inputs(docs, **kwargs)
# Call predict on the LLM.
return self.llm_chain.predict(callbacks=callbacks, **inputs), {}
async def acombine_docs(
self, docs: List[Document], callbacks: Callbacks = None, **kwargs: Any
) -> Tuple[str, dict]:
"""Async stuff all documents into one prompt and pass to LLM.
Args:
docs: List of documents to join together into one variable
callbacks: Optional callbacks to pass along
**kwargs: additional parameters to use to get inputs to LLMChain.
Returns:
The first element returned is the single string output. The second
element returned is a dictionary of other keys to return.
"""
inputs = self._get_inputs(docs, **kwargs)
# Call predict on the LLM.
return await self.llm_chain.apredict(callbacks=callbacks, **inputs), {}
@property
def _chain_type(self) -> str:
return "stuff_documents_chain"
| |
155385
|
async def acombine_docs(
self,
docs: List[Document],
token_max: Optional[int] = None,
callbacks: Callbacks = None,
**kwargs: Any,
) -> Tuple[str, dict]:
"""Combine documents in a map reduce manner.
Combine by mapping first chain over all documents, then reducing the results.
This reducing can be done recursively if needed (if there are many documents).
"""
map_results = await self.llm_chain.aapply(
# FYI - this is parallelized and so it is fast.
[{**{self.document_variable_name: d.page_content}, **kwargs} for d in docs],
callbacks=callbacks,
)
question_result_key = self.llm_chain.output_key
result_docs = [
Document(page_content=r[question_result_key], metadata=docs[i].metadata)
# This uses metadata from the docs, and the textual results from `results`
for i, r in enumerate(map_results)
]
result, extra_return_dict = await self.reduce_documents_chain.acombine_docs(
result_docs, token_max=token_max, callbacks=callbacks, **kwargs
)
if self.return_intermediate_steps:
intermediate_steps = [r[question_result_key] for r in map_results]
extra_return_dict["intermediate_steps"] = intermediate_steps
return result, extra_return_dict
@property
def _chain_type(self) -> str:
return "map_reduce_documents_chain"
| |
155397
|
# flake8: noqa
from langchain_core.prompts.prompt import PromptTemplate
_template = """Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question, in its original language.
Chat History:
{chat_history}
Follow Up Input: {question}
Standalone question:"""
CONDENSE_QUESTION_PROMPT = PromptTemplate.from_template(_template)
prompt_template = """Use the following pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer.
{context}
Question: {question}
Helpful Answer:"""
QA_PROMPT = PromptTemplate(
template=prompt_template, input_variables=["context", "question"]
)
| |
155398
|
"""Chain for chatting with a vector database."""
from __future__ import annotations
import inspect
import warnings
from abc import abstractmethod
from pathlib import Path
from typing import Any, Callable, Dict, List, Optional, Tuple, Type, Union
from langchain_core._api import deprecated
from langchain_core.callbacks import (
AsyncCallbackManagerForChainRun,
CallbackManagerForChainRun,
Callbacks,
)
from langchain_core.documents import Document
from langchain_core.language_models import BaseLanguageModel
from langchain_core.messages import BaseMessage
from langchain_core.prompts import BasePromptTemplate
from langchain_core.retrievers import BaseRetriever
from langchain_core.runnables import RunnableConfig
from langchain_core.vectorstores import VectorStore
from pydantic import BaseModel, ConfigDict, Field, model_validator
from langchain.chains.base import Chain
from langchain.chains.combine_documents.base import BaseCombineDocumentsChain
from langchain.chains.combine_documents.stuff import StuffDocumentsChain
from langchain.chains.conversational_retrieval.prompts import CONDENSE_QUESTION_PROMPT
from langchain.chains.llm import LLMChain
from langchain.chains.question_answering import load_qa_chain
# Depending on the memory type and configuration, the chat history format may differ.
# This needs to be consolidated.
CHAT_TURN_TYPE = Union[Tuple[str, str], BaseMessage]
_ROLE_MAP = {"human": "Human: ", "ai": "Assistant: "}
def _get_chat_history(chat_history: List[CHAT_TURN_TYPE]) -> str:
buffer = ""
for dialogue_turn in chat_history:
if isinstance(dialogue_turn, BaseMessage):
if len(dialogue_turn.content) > 0:
role_prefix = _ROLE_MAP.get(
dialogue_turn.type, f"{dialogue_turn.type}: "
)
buffer += f"\n{role_prefix}{dialogue_turn.content}"
elif isinstance(dialogue_turn, tuple):
human = "Human: " + dialogue_turn[0]
ai = "Assistant: " + dialogue_turn[1]
buffer += "\n" + "\n".join([human, ai])
else:
raise ValueError(
f"Unsupported chat history format: {type(dialogue_turn)}."
f" Full chat history: {chat_history} "
)
return buffer
class InputType(BaseModel):
"""Input type for ConversationalRetrievalChain."""
question: str
"""The question to answer."""
chat_history: List[CHAT_TURN_TYPE] = Field(default_factory=list)
"""The chat history to use for retrieval."""
class BaseConversationalRetrievalChain(Chain):
"""Chain for chatting with an index."""
combine_docs_chain: BaseCombineDocumentsChain
"""The chain used to combine any retrieved documents."""
question_generator: LLMChain
"""The chain used to generate a new question for the sake of retrieval.
This chain will take in the current question (with variable `question`)
and any chat history (with variable `chat_history`) and will produce
a new standalone question to be used later on."""
output_key: str = "answer"
"""The output key to return the final answer of this chain in."""
rephrase_question: bool = True
"""Whether or not to pass the new generated question to the combine_docs_chain.
If True, will pass the new generated question along.
If False, will only use the new generated question for retrieval and pass the
original question along to the combine_docs_chain."""
return_source_documents: bool = False
"""Return the retrieved source documents as part of the final result."""
return_generated_question: bool = False
"""Return the generated question as part of the final result."""
get_chat_history: Optional[Callable[[List[CHAT_TURN_TYPE]], str]] = None
"""An optional function to get a string of the chat history.
If None is provided, will use a default."""
response_if_no_docs_found: Optional[str] = None
"""If specified, the chain will return a fixed response if no docs
are found for the question. """
model_config = ConfigDict(
populate_by_name=True,
arbitrary_types_allowed=True,
extra="forbid",
)
@property
def input_keys(self) -> List[str]:
"""Input keys."""
return ["question", "chat_history"]
def get_input_schema(
self, config: Optional[RunnableConfig] = None
) -> Type[BaseModel]:
return InputType
@property
def output_keys(self) -> List[str]:
"""Return the output keys.
:meta private:
"""
_output_keys = [self.output_key]
if self.return_source_documents:
_output_keys = _output_keys + ["source_documents"]
if self.return_generated_question:
_output_keys = _output_keys + ["generated_question"]
return _output_keys
@abstractmethod
def _get_docs(
self,
question: str,
inputs: Dict[str, Any],
*,
run_manager: CallbackManagerForChainRun,
) -> List[Document]:
"""Get docs."""
def _call(
self,
inputs: Dict[str, Any],
run_manager: Optional[CallbackManagerForChainRun] = None,
) -> Dict[str, Any]:
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
question = inputs["question"]
get_chat_history = self.get_chat_history or _get_chat_history
chat_history_str = get_chat_history(inputs["chat_history"])
if chat_history_str:
callbacks = _run_manager.get_child()
new_question = self.question_generator.run(
question=question, chat_history=chat_history_str, callbacks=callbacks
)
else:
new_question = question
accepts_run_manager = (
"run_manager" in inspect.signature(self._get_docs).parameters
)
if accepts_run_manager:
docs = self._get_docs(new_question, inputs, run_manager=_run_manager)
else:
docs = self._get_docs(new_question, inputs) # type: ignore[call-arg]
output: Dict[str, Any] = {}
if self.response_if_no_docs_found is not None and len(docs) == 0:
output[self.output_key] = self.response_if_no_docs_found
else:
new_inputs = inputs.copy()
if self.rephrase_question:
new_inputs["question"] = new_question
new_inputs["chat_history"] = chat_history_str
answer = self.combine_docs_chain.run(
input_documents=docs, callbacks=_run_manager.get_child(), **new_inputs
)
output[self.output_key] = answer
if self.return_source_documents:
output["source_documents"] = docs
if self.return_generated_question:
output["generated_question"] = new_question
return output
@abstractmethod
async def _aget_docs(
self,
question: str,
inputs: Dict[str, Any],
*,
run_manager: AsyncCallbackManagerForChainRun,
) -> List[Document]:
"""Get docs."""
async def _acall(
self,
inputs: Dict[str, Any],
run_manager: Optional[AsyncCallbackManagerForChainRun] = None,
) -> Dict[str, Any]:
_run_manager = run_manager or AsyncCallbackManagerForChainRun.get_noop_manager()
question = inputs["question"]
get_chat_history = self.get_chat_history or _get_chat_history
chat_history_str = get_chat_history(inputs["chat_history"])
if chat_history_str:
callbacks = _run_manager.get_child()
new_question = await self.question_generator.arun(
question=question, chat_history=chat_history_str, callbacks=callbacks
)
else:
new_question = question
accepts_run_manager = (
"run_manager" in inspect.signature(self._aget_docs).parameters
)
if accepts_run_manager:
docs = await self._aget_docs(new_question, inputs, run_manager=_run_manager)
else:
docs = await self._aget_docs(new_question, inputs) # type: ignore[call-arg]
output: Dict[str, Any] = {}
if self.response_if_no_docs_found is not None and len(docs) == 0:
output[self.output_key] = self.response_if_no_docs_found
else:
new_inputs = inputs.copy()
if self.rephrase_question:
new_inputs["question"] = new_question
new_inputs["chat_history"] = chat_history_str
answer = await self.combine_docs_chain.arun(
input_documents=docs, callbacks=_run_manager.get_child(), **new_inputs
)
output[self.output_key] = answer
if self.return_source_documents:
output["source_documents"] = docs
if self.return_generated_question:
output["generated_question"] = new_question
return output
def save(self, file_path: Union[Path, str]) -> None:
if self.get_chat_history:
raise ValueError("Chain not saveable when `get_chat_history` is not None.")
super().save(file_path)
| |
155399
|
@deprecated(
since="0.1.17",
alternative=(
"create_history_aware_retriever together with create_retrieval_chain "
"(see example in docstring)"
),
removal="1.0",
)
class ConversationalRetrievalChain(BaseConversationalRetrievalChain):
"""Chain for having a conversation based on retrieved documents.
This class is deprecated. See below for an example implementation using
`create_retrieval_chain`. Additional walkthroughs can be found at
https://python.langchain.com/docs/use_cases/question_answering/chat_history
.. code-block:: python
from langchain.chains import (
create_history_aware_retriever,
create_retrieval_chain,
)
from langchain.chains.combine_documents import create_stuff_documents_chain
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain_openai import ChatOpenAI
retriever = ... # Your retriever
llm = ChatOpenAI()
# Contextualize question
contextualize_q_system_prompt = (
"Given a chat history and the latest user question "
"which might reference context in the chat history, "
"formulate a standalone question which can be understood "
"without the chat history. Do NOT answer the question, just "
"reformulate it if needed and otherwise return it as is."
)
contextualize_q_prompt = ChatPromptTemplate.from_messages(
[
("system", contextualize_q_system_prompt),
MessagesPlaceholder("chat_history"),
("human", "{input}"),
]
)
history_aware_retriever = create_history_aware_retriever(
llm, retriever, contextualize_q_prompt
)
# Answer question
qa_system_prompt = (
"You are an assistant for question-answering tasks. Use "
"the following pieces of retrieved context to answer the "
"question. If you don't know the answer, just say that you "
"don't know. Use three sentences maximum and keep the answer "
"concise."
"\n\n"
"{context}"
)
qa_prompt = ChatPromptTemplate.from_messages(
[
("system", qa_system_prompt),
MessagesPlaceholder("chat_history"),
("human", "{input}"),
]
)
# Below we use create_stuff_documents_chain to feed all retrieved context
# into the LLM. Note that we can also use StuffDocumentsChain and other
# instances of BaseCombineDocumentsChain.
question_answer_chain = create_stuff_documents_chain(llm, qa_prompt)
rag_chain = create_retrieval_chain(
history_aware_retriever, question_answer_chain
)
# Usage:
chat_history = [] # Collect chat history here (a sequence of messages)
rag_chain.invoke({"input": query, "chat_history": chat_history})
This chain takes in chat history (a list of messages) and new questions,
and then returns an answer to that question.
The algorithm for this chain consists of three parts:
1. Use the chat history and the new question to create a "standalone question".
This is done so that this question can be passed into the retrieval step to fetch
relevant documents. If only the new question was passed in, then relevant context
may be lacking. If the whole conversation was passed into retrieval, there may
be unnecessary information there that would distract from retrieval.
2. This new question is passed to the retriever and relevant documents are
returned.
3. The retrieved documents are passed to an LLM along with either the new question
(default behavior) or the original question and chat history to generate a final
response.
Example:
.. code-block:: python
from langchain.chains import (
StuffDocumentsChain, LLMChain, ConversationalRetrievalChain
)
from langchain_core.prompts import PromptTemplate
from langchain_community.llms import OpenAI
combine_docs_chain = StuffDocumentsChain(...)
vectorstore = ...
retriever = vectorstore.as_retriever()
# This controls how the standalone question is generated.
# Should take `chat_history` and `question` as input variables.
template = (
"Combine the chat history and follow up question into "
"a standalone question. Chat History: {chat_history}"
"Follow up question: {question}"
)
prompt = PromptTemplate.from_template(template)
llm = OpenAI()
question_generator_chain = LLMChain(llm=llm, prompt=prompt)
chain = ConversationalRetrievalChain(
combine_docs_chain=combine_docs_chain,
retriever=retriever,
question_generator=question_generator_chain,
)
"""
retriever: BaseRetriever
"""Retriever to use to fetch documents."""
max_tokens_limit: Optional[int] = None
"""If set, enforces that the documents returned are less than this limit.
This is only enforced if `combine_docs_chain` is of type StuffDocumentsChain."""
def _reduce_tokens_below_limit(self, docs: List[Document]) -> List[Document]:
num_docs = len(docs)
if self.max_tokens_limit and isinstance(
self.combine_docs_chain, StuffDocumentsChain
):
tokens = [
self.combine_docs_chain.llm_chain._get_num_tokens(doc.page_content)
for doc in docs
]
token_count = sum(tokens[:num_docs])
while token_count > self.max_tokens_limit:
num_docs -= 1
token_count -= tokens[num_docs]
return docs[:num_docs]
def _get_docs(
self,
question: str,
inputs: Dict[str, Any],
*,
run_manager: CallbackManagerForChainRun,
) -> List[Document]:
"""Get docs."""
docs = self.retriever.invoke(
question, config={"callbacks": run_manager.get_child()}
)
return self._reduce_tokens_below_limit(docs)
async def _aget_docs(
self,
question: str,
inputs: Dict[str, Any],
*,
run_manager: AsyncCallbackManagerForChainRun,
) -> List[Document]:
"""Get docs."""
docs = await self.retriever.ainvoke(
question, config={"callbacks": run_manager.get_child()}
)
return self._reduce_tokens_below_limit(docs)
@classmethod
def from_llm(
cls,
llm: BaseLanguageModel,
retriever: BaseRetriever,
condense_question_prompt: BasePromptTemplate = CONDENSE_QUESTION_PROMPT,
chain_type: str = "stuff",
verbose: bool = False,
condense_question_llm: Optional[BaseLanguageModel] = None,
combine_docs_chain_kwargs: Optional[Dict] = None,
callbacks: Callbacks = None,
**kwargs: Any,
) -> BaseConversationalRetrievalChain:
"""Convenience method to load chain from LLM and retriever.
This provides some logic to create the `question_generator` chain
as well as the combine_docs_chain.
Args:
llm: The default language model to use at every part of this chain
(eg in both the question generation and the answering)
retriever: The retriever to use to fetch relevant documents from.
condense_question_prompt: The prompt to use to condense the chat history
and new question into a standalone question.
chain_type: The chain type to use to create the combine_docs_chain, will
be sent to `load_qa_chain`.
verbose: Verbosity flag for logging to stdout.
condense_question_llm: The language model to use for condensing the chat
history and new question into a standalone question. If none is
provided, will default to `llm`.
combine_docs_chain_kwargs: Parameters to pass as kwargs to `load_qa_chain`
when constructing the combine_docs_chain.
callbacks: Callbacks to pass to all subchains.
kwargs: Additional parameters to pass when initializing
ConversationalRetrievalChain
"""
combine_docs_chain_kwargs = combine_docs_chain_kwargs or {}
doc_chain = load_qa_chain(
llm,
chain_type=chain_type,
verbose=verbose,
callbacks=callbacks,
**combine_docs_chain_kwargs,
)
_llm = condense_question_llm or llm
condense_question_chain = LLMChain(
llm=_llm,
prompt=condense_question_prompt,
verbose=verbose,
callbacks=callbacks,
)
return cls(
retriever=retriever,
combine_docs_chain=doc_chain,
question_generator=condense_question_chain,
callbacks=callbacks,
**kwargs,
)
| |
155400
|
class ChatVectorDBChain(BaseConversationalRetrievalChain):
"""Chain for chatting with a vector database."""
vectorstore: VectorStore = Field(alias="vectorstore")
top_k_docs_for_context: int = 4
search_kwargs: dict = Field(default_factory=dict)
@property
def _chain_type(self) -> str:
return "chat-vector-db"
@model_validator(mode="before")
@classmethod
def raise_deprecation(cls, values: Dict) -> Any:
warnings.warn(
"`ChatVectorDBChain` is deprecated - "
"please use `from langchain.chains import ConversationalRetrievalChain`"
)
return values
def _get_docs(
self,
question: str,
inputs: Dict[str, Any],
*,
run_manager: CallbackManagerForChainRun,
) -> List[Document]:
"""Get docs."""
vectordbkwargs = inputs.get("vectordbkwargs", {})
full_kwargs = {**self.search_kwargs, **vectordbkwargs}
return self.vectorstore.similarity_search(
question, k=self.top_k_docs_for_context, **full_kwargs
)
async def _aget_docs(
self,
question: str,
inputs: Dict[str, Any],
*,
run_manager: AsyncCallbackManagerForChainRun,
) -> List[Document]:
"""Get docs."""
raise NotImplementedError("ChatVectorDBChain does not support async")
@classmethod
def from_llm(
cls,
llm: BaseLanguageModel,
vectorstore: VectorStore,
condense_question_prompt: BasePromptTemplate = CONDENSE_QUESTION_PROMPT,
chain_type: str = "stuff",
combine_docs_chain_kwargs: Optional[Dict] = None,
callbacks: Callbacks = None,
**kwargs: Any,
) -> BaseConversationalRetrievalChain:
"""Load chain from LLM."""
combine_docs_chain_kwargs = combine_docs_chain_kwargs or {}
doc_chain = load_qa_chain(
llm,
chain_type=chain_type,
callbacks=callbacks,
**combine_docs_chain_kwargs,
)
condense_question_chain = LLMChain(
llm=llm, prompt=condense_question_prompt, callbacks=callbacks
)
return cls(
vectorstore=vectorstore,
combine_docs_chain=doc_chain,
question_generator=condense_question_chain,
callbacks=callbacks,
**kwargs,
)
| |
155413
|
"""Chain that carries on a conversation and calls an LLM."""
from typing import List
from langchain_core._api import deprecated
from langchain_core.memory import BaseMemory
from langchain_core.prompts import BasePromptTemplate
from pydantic import ConfigDict, Field, model_validator
from typing_extensions import Self
from langchain.chains.conversation.prompt import PROMPT
from langchain.chains.llm import LLMChain
from langchain.memory.buffer import ConversationBufferMemory
@deprecated(
since="0.2.7",
alternative=(
"RunnableWithMessageHistory: "
"https://python.langchain.com/v0.2/api_reference/core/runnables/langchain_core.runnables.history.RunnableWithMessageHistory.html" # noqa: E501
),
removal="1.0",
)
class ConversationChain(LLMChain):
"""Chain to have a conversation and load context from memory.
This class is deprecated in favor of ``RunnableWithMessageHistory``. Please refer
to this tutorial for more detail: https://python.langchain.com/docs/tutorials/chatbot/
``RunnableWithMessageHistory`` offers several benefits, including:
- Stream, batch, and async support;
- More flexible memory handling, including the ability to manage memory
outside the chain;
- Support for multiple threads.
Below is a minimal implementation, analogous to using ``ConversationChain`` with
the default ``ConversationBufferMemory``:
.. code-block:: python
from langchain_core.chat_history import InMemoryChatMessageHistory
from langchain_core.runnables.history import RunnableWithMessageHistory
from langchain_openai import ChatOpenAI
store = {} # memory is maintained outside the chain
def get_session_history(session_id: str) -> InMemoryChatMessageHistory:
if session_id not in store:
store[session_id] = InMemoryChatMessageHistory()
return store[session_id]
llm = ChatOpenAI(model="gpt-3.5-turbo-0125")
chain = RunnableWithMessageHistory(llm, get_session_history)
chain.invoke(
"Hi I'm Bob.",
config={"configurable": {"session_id": "1"}},
) # session_id determines thread
Memory objects can also be incorporated into the ``get_session_history`` callable:
.. code-block:: python
from langchain.memory import ConversationBufferWindowMemory
from langchain_core.chat_history import InMemoryChatMessageHistory
from langchain_core.runnables.history import RunnableWithMessageHistory
from langchain_openai import ChatOpenAI
store = {} # memory is maintained outside the chain
def get_session_history(session_id: str) -> InMemoryChatMessageHistory:
if session_id not in store:
store[session_id] = InMemoryChatMessageHistory()
return store[session_id]
memory = ConversationBufferWindowMemory(
chat_memory=store[session_id],
k=3,
return_messages=True,
)
assert len(memory.memory_variables) == 1
key = memory.memory_variables[0]
messages = memory.load_memory_variables({})[key]
store[session_id] = InMemoryChatMessageHistory(messages=messages)
return store[session_id]
llm = ChatOpenAI(model="gpt-3.5-turbo-0125")
chain = RunnableWithMessageHistory(llm, get_session_history)
chain.invoke(
"Hi I'm Bob.",
config={"configurable": {"session_id": "1"}},
) # session_id determines thread
Example:
.. code-block:: python
from langchain.chains import ConversationChain
from langchain_community.llms import OpenAI
conversation = ConversationChain(llm=OpenAI())
"""
memory: BaseMemory = Field(default_factory=ConversationBufferMemory)
"""Default memory store."""
prompt: BasePromptTemplate = PROMPT
"""Default conversation prompt to use."""
input_key: str = "input" #: :meta private:
output_key: str = "response" #: :meta private:
model_config = ConfigDict(
arbitrary_types_allowed=True,
extra="forbid",
)
@classmethod
def is_lc_serializable(cls) -> bool:
return False
@property
def input_keys(self) -> List[str]:
"""Use this since so some prompt vars come from history."""
return [self.input_key]
@model_validator(mode="after")
def validate_prompt_input_variables(self) -> Self:
"""Validate that prompt input variables are consistent."""
memory_keys = self.memory.memory_variables
input_key = self.input_key
if input_key in memory_keys:
raise ValueError(
f"The input key {input_key} was also found in the memory keys "
f"({memory_keys}) - please provide keys that don't overlap."
)
prompt_variables = self.prompt.input_variables
expected_keys = memory_keys + [input_key]
if set(expected_keys) != set(prompt_variables):
raise ValueError(
"Got unexpected prompt input variables. The prompt expects "
f"{prompt_variables}, but got {memory_keys} as inputs from "
f"memory, and {input_key} as the normal input key."
)
return self
| |
155422
|
from importlib import metadata
from langchain_core._api import warn_deprecated
## Create namespaces for pydantic v1 and v2.
# This code must stay at the top of the file before other modules may
# attempt to import pydantic since it adds pydantic_v1 and pydantic_v2 to sys.modules.
#
# This hack is done for the following reasons:
# * Langchain will attempt to remain compatible with both pydantic v1 and v2 since
# both dependencies and dependents may be stuck on either version of v1 or v2.
# * Creating namespaces for pydantic v1 and v2 should allow us to write code that
# unambiguously uses either v1 or v2 API.
# * This change is easier to roll out and roll back.
try:
from pydantic.v1 import * # noqa: F403
except ImportError:
from pydantic import * # type: ignore # noqa: F403
try:
_PYDANTIC_MAJOR_VERSION: int = int(metadata.version("pydantic").split(".")[0])
except metadata.PackageNotFoundError:
_PYDANTIC_MAJOR_VERSION = 0
warn_deprecated(
"0.3.0",
removal="1.0.0",
alternative="pydantic.v1 or pydantic",
message=(
"As of langchain-core 0.3.0, LangChain uses pydantic v2 internally. "
"The langchain.pydantic_v1 module was a "
"compatibility shim for pydantic v1, and should no longer be used. "
"Please update the code to import from Pydantic directly.\n\n"
"For example, replace imports like: "
"`from langchain.pydantic_v1 import BaseModel`\n"
"with: `from pydantic import BaseModel`\n"
"or the v1 compatibility namespace if you are working in a code base "
"that has not been fully upgraded to pydantic 2 yet. "
"\tfrom pydantic.v1 import BaseModel\n"
),
)
| |
155423
|
from langchain_core._api import warn_deprecated
try:
from pydantic.v1.dataclasses import * # noqa: F403
except ImportError:
from pydantic.dataclasses import * # type: ignore # noqa: F403
warn_deprecated(
"0.3.0",
removal="1.0.0",
alternative="pydantic.v1 or pydantic",
message=(
"As of langchain-core 0.3.0, LangChain uses pydantic v2 internally. "
"The langchain.pydantic_v1 module was a "
"compatibility shim for pydantic v1, and should no longer be used. "
"Please update the code to import from Pydantic directly.\n\n"
"For example, replace imports like: "
"`from langchain.pydantic_v1 import BaseModel`\n"
"with: `from pydantic import BaseModel`\n"
"or the v1 compatibility namespace if you are working in a code base "
"that has not been fully upgraded to pydantic 2 yet. "
"\tfrom pydantic.v1 import BaseModel\n"
),
)
| |
155424
|
from langchain_core._api import warn_deprecated
try:
from pydantic.v1.main import * # noqa: F403
except ImportError:
from pydantic.main import * # type: ignore # noqa: F403
warn_deprecated(
"0.3.0",
removal="1.0.0",
alternative="pydantic.v1 or pydantic",
message=(
"As of langchain-core 0.3.0, LangChain uses pydantic v2 internally. "
"The langchain.pydantic_v1 module was a "
"compatibility shim for pydantic v1, and should no longer be used. "
"Please update the code to import from Pydantic directly.\n\n"
"For example, replace imports like: "
"`from langchain.pydantic_v1 import BaseModel`\n"
"with: `from pydantic import BaseModel`\n"
"or the v1 compatibility namespace if you are working in a code base "
"that has not been fully upgraded to pydantic 2 yet. "
"\tfrom pydantic.v1 import BaseModel\n"
),
)
| |
155427
|
from typing import Any, Dict, List, Optional, Type
from langchain_core.document_loaders import BaseLoader
from langchain_core.documents import Document
from langchain_core.embeddings import Embeddings
from langchain_core.language_models import BaseLanguageModel
from langchain_core.vectorstores import VectorStore
from langchain_text_splitters import RecursiveCharacterTextSplitter, TextSplitter
from pydantic import BaseModel, ConfigDict, Field
from langchain.chains.qa_with_sources.retrieval import RetrievalQAWithSourcesChain
from langchain.chains.retrieval_qa.base import RetrievalQA
def _get_default_text_splitter() -> TextSplitter:
return RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
class VectorStoreIndexWrapper(BaseModel):
"""Wrapper around a vectorstore for easy access."""
vectorstore: VectorStore
model_config = ConfigDict(
arbitrary_types_allowed=True,
extra="forbid",
)
def query(
self,
question: str,
llm: Optional[BaseLanguageModel] = None,
retriever_kwargs: Optional[Dict[str, Any]] = None,
**kwargs: Any,
) -> str:
"""Query the vectorstore."""
if llm is None:
raise NotImplementedError(
"This API has been changed to require an LLM. "
"Please provide an llm to use for querying the vectorstore.\n"
"For example,\n"
"from langchain_openai import OpenAI\n"
"llm = OpenAI(temperature=0)"
)
retriever_kwargs = retriever_kwargs or {}
chain = RetrievalQA.from_chain_type(
llm, retriever=self.vectorstore.as_retriever(**retriever_kwargs), **kwargs
)
return chain.invoke({chain.input_key: question})[chain.output_key]
async def aquery(
self,
question: str,
llm: Optional[BaseLanguageModel] = None,
retriever_kwargs: Optional[Dict[str, Any]] = None,
**kwargs: Any,
) -> str:
"""Query the vectorstore."""
if llm is None:
raise NotImplementedError(
"This API has been changed to require an LLM. "
"Please provide an llm to use for querying the vectorstore.\n"
"For example,\n"
"from langchain_openai import OpenAI\n"
"llm = OpenAI(temperature=0)"
)
retriever_kwargs = retriever_kwargs or {}
chain = RetrievalQA.from_chain_type(
llm, retriever=self.vectorstore.as_retriever(**retriever_kwargs), **kwargs
)
return (await chain.ainvoke({chain.input_key: question}))[chain.output_key]
def query_with_sources(
self,
question: str,
llm: Optional[BaseLanguageModel] = None,
retriever_kwargs: Optional[Dict[str, Any]] = None,
**kwargs: Any,
) -> dict:
"""Query the vectorstore and get back sources."""
if llm is None:
raise NotImplementedError(
"This API has been changed to require an LLM. "
"Please provide an llm to use for querying the vectorstore.\n"
"For example,\n"
"from langchain_openai import OpenAI\n"
"llm = OpenAI(temperature=0)"
)
retriever_kwargs = retriever_kwargs or {}
chain = RetrievalQAWithSourcesChain.from_chain_type(
llm, retriever=self.vectorstore.as_retriever(**retriever_kwargs), **kwargs
)
return chain.invoke({chain.question_key: question})
async def aquery_with_sources(
self,
question: str,
llm: Optional[BaseLanguageModel] = None,
retriever_kwargs: Optional[Dict[str, Any]] = None,
**kwargs: Any,
) -> dict:
"""Query the vectorstore and get back sources."""
if llm is None:
raise NotImplementedError(
"This API has been changed to require an LLM. "
"Please provide an llm to use for querying the vectorstore.\n"
"For example,\n"
"from langchain_openai import OpenAI\n"
"llm = OpenAI(temperature=0)"
)
retriever_kwargs = retriever_kwargs or {}
chain = RetrievalQAWithSourcesChain.from_chain_type(
llm, retriever=self.vectorstore.as_retriever(**retriever_kwargs), **kwargs
)
return await chain.ainvoke({chain.question_key: question})
def _get_in_memory_vectorstore() -> Type[VectorStore]:
"""Get the InMemoryVectorStore."""
import warnings
try:
from langchain_community.vectorstores.inmemory import InMemoryVectorStore
except ImportError:
raise ImportError(
"Please install langchain-community to use the InMemoryVectorStore."
)
warnings.warn(
"Using InMemoryVectorStore as the default vectorstore."
"This memory store won't persist data. You should explicitly"
"specify a vectorstore when using VectorstoreIndexCreator"
)
return InMemoryVectorStore
class VectorstoreIndexCreator(BaseModel):
"""Logic for creating indexes."""
vectorstore_cls: Type[VectorStore] = Field(
default_factory=_get_in_memory_vectorstore
)
embedding: Embeddings
text_splitter: TextSplitter = Field(default_factory=_get_default_text_splitter)
vectorstore_kwargs: dict = Field(default_factory=dict)
model_config = ConfigDict(
arbitrary_types_allowed=True,
extra="forbid",
)
def from_loaders(self, loaders: List[BaseLoader]) -> VectorStoreIndexWrapper:
"""Create a vectorstore index from loaders."""
docs = []
for loader in loaders:
docs.extend(loader.load())
return self.from_documents(docs)
async def afrom_loaders(self, loaders: List[BaseLoader]) -> VectorStoreIndexWrapper:
"""Create a vectorstore index from loaders."""
docs = []
for loader in loaders:
async for doc in loader.alazy_load():
docs.append(doc)
return await self.afrom_documents(docs)
def from_documents(self, documents: List[Document]) -> VectorStoreIndexWrapper:
"""Create a vectorstore index from documents."""
sub_docs = self.text_splitter.split_documents(documents)
vectorstore = self.vectorstore_cls.from_documents(
sub_docs, self.embedding, **self.vectorstore_kwargs
)
return VectorStoreIndexWrapper(vectorstore=vectorstore)
async def afrom_documents(
self, documents: List[Document]
) -> VectorStoreIndexWrapper:
"""Create a vectorstore index from documents."""
sub_docs = self.text_splitter.split_documents(documents)
vectorstore = await self.vectorstore_cls.afrom_documents(
sub_docs, self.embedding, **self.vectorstore_kwargs
)
return VectorStoreIndexWrapper(vectorstore=vectorstore)
| |
155445
|
import warnings
from langchain_core.globals import get_debug as core_get_debug
from langchain_core.globals import get_verbose as core_get_verbose
from langchain_core.globals import set_debug as core_set_debug
from langchain_core.globals import set_verbose as core_set_verbose
from langchain.globals import get_debug, get_verbose, set_debug, set_verbose
def test_no_warning() -> None:
with warnings.catch_warnings():
warnings.simplefilter("error")
get_debug()
set_debug(False)
get_verbose()
set_verbose(False)
core_get_debug()
core_set_debug(False)
core_get_verbose()
core_set_verbose(False)
def test_debug_is_settable_directly() -> None:
from langchain_core.callbacks.manager import _get_debug
import langchain
previous_value = langchain.debug
previous_fn_reading = _get_debug()
assert previous_value == previous_fn_reading
# Flip the value of the flag.
langchain.debug = not previous_value
new_value = langchain.debug
new_fn_reading = _get_debug()
try:
# We successfully changed the value of `debug`.
assert new_value != previous_value
# If we access `debug` via a function used elsewhere in langchain,
# it also sees the same new value.
assert new_value == new_fn_reading
# If we access `debug` via `get_debug()` we also get the same value.
assert new_value == get_debug()
finally:
# Make sure we don't alter global state, even if the test fails.
# Always reset `debug` to the value it had before.
set_debug(previous_value)
def test_debug_is_settable_via_setter() -> None:
from langchain_core.callbacks.manager import _get_debug
from langchain import globals
previous_value = globals._debug
previous_fn_reading = _get_debug()
assert previous_value == previous_fn_reading
# Flip the value of the flag.
set_debug(not previous_value)
new_value = globals._debug
new_fn_reading = _get_debug()
try:
# We successfully changed the value of `debug`.
assert new_value != previous_value
# If we access `debug` via a function used elsewhere in langchain,
# it also sees the same new value.
assert new_value == new_fn_reading
# If we access `debug` via `get_debug()` we also get the same value.
assert new_value == get_debug()
finally:
# Make sure we don't alter global state, even if the test fails.
# Always reset `debug` to the value it had before.
set_debug(previous_value)
def test_verbose_is_settable_directly() -> None:
import langchain
from langchain.chains.base import _get_verbosity
previous_value = langchain.verbose
previous_fn_reading = _get_verbosity()
assert previous_value == previous_fn_reading
# Flip the value of the flag.
langchain.verbose = not previous_value
new_value = langchain.verbose
new_fn_reading = _get_verbosity()
try:
# We successfully changed the value of `verbose`.
assert new_value != previous_value
# If we access `verbose` via a function used elsewhere in langchain,
# it also sees the same new value.
assert new_value == new_fn_reading
# If we access `verbose` via `get_verbose()` we also get the same value.
assert new_value == get_verbose()
finally:
# Make sure we don't alter global state, even if the test fails.
# Always reset `verbose` to the value it had before.
set_verbose(previous_value)
def test_verbose_is_settable_via_setter() -> None:
from langchain import globals
from langchain.chains.base import _get_verbosity
previous_value = globals._verbose
previous_fn_reading = _get_verbosity()
assert previous_value == previous_fn_reading
# Flip the value of the flag.
set_verbose(not previous_value)
new_value = globals._verbose
new_fn_reading = _get_verbosity()
try:
# We successfully changed the value of `verbose`.
assert new_value != previous_value
# If we access `verbose` via a function used elsewhere in langchain,
# it also sees the same new value.
assert new_value == new_fn_reading
# If we access `verbose` via `get_verbose()` we also get the same value.
assert new_value == get_verbose()
finally:
# Make sure we don't alter global state, even if the test fails.
# Always reset `verbose` to the value it had before.
set_verbose(previous_value)
| |
155482
|
from langchain import chat_models
EXPECTED_ALL = [
"init_chat_model",
"ChatOpenAI",
"BedrockChat",
"AzureChatOpenAI",
"FakeListChatModel",
"PromptLayerChatOpenAI",
"ChatEverlyAI",
"ChatAnthropic",
"ChatCohere",
"ChatDatabricks",
"ChatGooglePalm",
"ChatMlflow",
"ChatMLflowAIGateway",
"ChatOllama",
"ChatVertexAI",
"JinaChat",
"HumanInputChatModel",
"MiniMaxChat",
"ChatAnyscale",
"ChatLiteLLM",
"ErnieBotChat",
"ChatJavelinAIGateway",
"ChatKonko",
"PaiEasChatEndpoint",
"QianfanChatEndpoint",
"ChatFireworks",
"ChatYandexGPT",
"ChatBaichuan",
"ChatHunyuan",
"GigaChat",
"VolcEngineMaasChat",
]
def test_all_imports() -> None:
assert set(chat_models.__all__) == set(EXPECTED_ALL)
| |
155488
|
def test_agent_stream() -> None:
"""Test react chain with callbacks by setting verbose globally."""
tool = "Search"
responses = [
f"FooBarBaz\nAction: {tool}\nAction Input: misalignment",
f"FooBarBaz\nAction: {tool}\nAction Input: something else",
"Oh well\nFinal Answer: curses foiled again",
]
# Only fake LLM gets callbacks for handler2
fake_llm = FakeListLLM(responses=responses)
tools = [
Tool(
name="Search",
func=lambda x: f"Results for: {x}",
description="Useful for searching",
),
]
agent = initialize_agent(
tools,
fake_llm,
agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,
)
output = [a for a in agent.stream("when was langchain made")]
assert output == [
{
"actions": [
AgentAction(
tool="Search",
tool_input="misalignment",
log="FooBarBaz\nAction: Search\nAction Input: misalignment",
)
],
"messages": [
AIMessage(
content="FooBarBaz\nAction: Search\nAction Input: misalignment"
)
],
},
{
"steps": [
AgentStep(
action=AgentAction(
tool="Search",
tool_input="misalignment",
log="FooBarBaz\nAction: Search\nAction Input: misalignment",
),
observation="Results for: misalignment",
)
],
"messages": [HumanMessage(content="Results for: misalignment")],
},
{
"actions": [
AgentAction(
tool="Search",
tool_input="something else",
log="FooBarBaz\nAction: Search\nAction Input: something else",
)
],
"messages": [
AIMessage(
content="FooBarBaz\nAction: Search\nAction Input: something else"
)
],
},
{
"steps": [
AgentStep(
action=AgentAction(
tool="Search",
tool_input="something else",
log="FooBarBaz\nAction: Search\nAction Input: something else",
),
observation="Results for: something else",
)
],
"messages": [HumanMessage(content="Results for: something else")],
},
{
"output": "curses foiled again",
"messages": [
AIMessage(content="Oh well\nFinal Answer: curses foiled again")
],
},
]
assert add(output) == {
"actions": [
AgentAction(
tool="Search",
tool_input="misalignment",
log="FooBarBaz\nAction: Search\nAction Input: misalignment",
),
AgentAction(
tool="Search",
tool_input="something else",
log="FooBarBaz\nAction: Search\nAction Input: something else",
),
],
"steps": [
AgentStep(
action=AgentAction(
tool="Search",
tool_input="misalignment",
log="FooBarBaz\nAction: Search\nAction Input: misalignment",
),
observation="Results for: misalignment",
),
AgentStep(
action=AgentAction(
tool="Search",
tool_input="something else",
log="FooBarBaz\nAction: Search\nAction Input: something else",
),
observation="Results for: something else",
),
],
"messages": [
AIMessage(content="FooBarBaz\nAction: Search\nAction Input: misalignment"),
HumanMessage(content="Results for: misalignment"),
AIMessage(
content="FooBarBaz\nAction: Search\nAction Input: something else"
),
HumanMessage(content="Results for: something else"),
AIMessage(content="Oh well\nFinal Answer: curses foiled again"),
],
"output": "curses foiled again",
}
def test_agent_tool_return_direct() -> None:
"""Test agent using tools that return directly."""
tool = "Search"
responses = [
f"FooBarBaz\nAction: {tool}\nAction Input: misalignment",
"Oh well\nFinal Answer: curses foiled again",
]
fake_llm = FakeListLLM(responses=responses)
tools = [
Tool(
name="Search",
func=lambda x: x,
description="Useful for searching",
return_direct=True,
),
]
agent = initialize_agent(
tools,
fake_llm,
agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,
)
output = agent.run("when was langchain made")
assert output == "misalignment"
def test_agent_tool_return_direct_in_intermediate_steps() -> None:
"""Test agent using tools that return directly."""
tool = "Search"
responses = [
f"FooBarBaz\nAction: {tool}\nAction Input: misalignment",
"Oh well\nFinal Answer: curses foiled again",
]
fake_llm = FakeListLLM(responses=responses)
tools = [
Tool(
name="Search",
func=lambda x: x,
description="Useful for searching",
return_direct=True,
),
]
agent = initialize_agent(
tools,
fake_llm,
agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,
return_intermediate_steps=True,
)
resp = agent("when was langchain made")
assert isinstance(resp, dict)
assert resp["output"] == "misalignment"
assert len(resp["intermediate_steps"]) == 1
action, _action_intput = resp["intermediate_steps"][0]
assert action.tool == "Search"
def test_agent_with_new_prefix_suffix() -> None:
"""Test agent initialization kwargs with new prefix and suffix."""
fake_llm = FakeListLLM(
responses=["FooBarBaz\nAction: Search\nAction Input: misalignment"]
)
tools = [
Tool(
name="Search",
func=lambda x: x,
description="Useful for searching",
return_direct=True,
),
]
prefix = "FooBarBaz"
suffix = "Begin now!\nInput: {input}\nThought: {agent_scratchpad}"
agent = initialize_agent(
tools=tools,
llm=fake_llm,
agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,
agent_kwargs={"prefix": prefix, "suffix": suffix},
)
# avoids "BasePromptTemplate" has no attribute "template" error
assert hasattr(agent.agent.llm_chain.prompt, "template") # type: ignore
prompt_str = agent.agent.llm_chain.prompt.template # type: ignore
assert prompt_str.startswith(prefix), "Prompt does not start with prefix"
assert prompt_str.endswith(suffix), "Prompt does not end with suffix"
def test_agent_lookup_tool() -> None:
"""Test agent lookup tool."""
fake_llm = FakeListLLM(
responses=["FooBarBaz\nAction: Search\nAction Input: misalignment"]
)
tools = [
Tool(
name="Search",
func=lambda x: x,
description="Useful for searching",
return_direct=True,
),
]
agent = initialize_agent(
tools=tools,
llm=fake_llm,
agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,
)
assert agent.lookup_tool("Search") == tools[0]
def test_agent_invalid_tool() -> None:
"""Test agent invalid tool and correct suggestions."""
fake_llm = FakeListLLM(responses=["FooBarBaz\nAction: Foo\nAction Input: Bar"])
tools = [
Tool(
name="Search",
func=lambda x: x,
description="Useful for searching",
return_direct=True,
),
]
agent = initialize_agent(
tools=tools,
llm=fake_llm,
agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,
return_intermediate_steps=True,
max_iterations=1,
)
resp = agent("when was langchain made")
resp["intermediate_steps"][0][1] == "Foo is not a valid tool, try one of [Search]."
| |
155494
|
async def test_agent_stream() -> None:
"""Test react chain with callbacks by setting verbose globally."""
tool = "Search"
responses = [
f"FooBarBaz\nAction: {tool}\nAction Input: misalignment",
f"FooBarBaz\nAction: {tool}\nAction Input: something else",
"Oh well\nFinal Answer: curses foiled again",
]
# Only fake LLM gets callbacks for handler2
fake_llm = FakeListLLM(responses=responses)
tools = [
Tool(
name="Search",
func=lambda x: f"Results for: {x}",
description="Useful for searching",
),
]
agent = initialize_agent(
tools,
fake_llm,
agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,
)
output = [a async for a in agent.astream("when was langchain made")]
assert output == [
{
"actions": [
AgentAction(
tool="Search",
tool_input="misalignment",
log="FooBarBaz\nAction: Search\nAction Input: misalignment",
)
],
"messages": [
AIMessage(
content="FooBarBaz\nAction: Search\nAction Input: misalignment"
)
],
},
{
"steps": [
AgentStep(
action=AgentAction(
tool="Search",
tool_input="misalignment",
log="FooBarBaz\nAction: Search\nAction Input: misalignment",
),
observation="Results for: misalignment",
)
],
"messages": [HumanMessage(content="Results for: misalignment")],
},
{
"actions": [
AgentAction(
tool="Search",
tool_input="something else",
log="FooBarBaz\nAction: Search\nAction Input: something else",
)
],
"messages": [
AIMessage(
content="FooBarBaz\nAction: Search\nAction Input: something else"
)
],
},
{
"steps": [
AgentStep(
action=AgentAction(
tool="Search",
tool_input="something else",
log="FooBarBaz\nAction: Search\nAction Input: something else",
),
observation="Results for: something else",
)
],
"messages": [HumanMessage(content="Results for: something else")],
},
{
"output": "curses foiled again",
"messages": [
AIMessage(content="Oh well\nFinal Answer: curses foiled again")
],
},
]
assert add(output) == {
"actions": [
AgentAction(
tool="Search",
tool_input="misalignment",
log="FooBarBaz\nAction: Search\nAction Input: misalignment",
),
AgentAction(
tool="Search",
tool_input="something else",
log="FooBarBaz\nAction: Search\nAction Input: something else",
),
],
"steps": [
AgentStep(
action=AgentAction(
tool="Search",
tool_input="misalignment",
log="FooBarBaz\nAction: Search\nAction Input: misalignment",
),
observation="Results for: misalignment",
),
AgentStep(
action=AgentAction(
tool="Search",
tool_input="something else",
log="FooBarBaz\nAction: Search\nAction Input: something else",
),
observation="Results for: something else",
),
],
"messages": [
AIMessage(content="FooBarBaz\nAction: Search\nAction Input: misalignment"),
HumanMessage(content="Results for: misalignment"),
AIMessage(
content="FooBarBaz\nAction: Search\nAction Input: something else"
),
HumanMessage(content="Results for: something else"),
AIMessage(content="Oh well\nFinal Answer: curses foiled again"),
],
"output": "curses foiled again",
}
async def test_agent_tool_return_direct() -> None:
"""Test agent using tools that return directly."""
tool = "Search"
responses = [
f"FooBarBaz\nAction: {tool}\nAction Input: misalignment",
"Oh well\nFinal Answer: curses foiled again",
]
fake_llm = FakeListLLM(responses=responses)
tools = [
Tool(
name="Search",
func=lambda x: x,
description="Useful for searching",
return_direct=True,
),
]
agent = initialize_agent(
tools,
fake_llm,
agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,
)
output = await agent.arun("when was langchain made")
assert output == "misalignment"
async def test_agent_tool_return_direct_in_intermediate_steps() -> None:
"""Test agent using tools that return directly."""
tool = "Search"
responses = [
f"FooBarBaz\nAction: {tool}\nAction Input: misalignment",
"Oh well\nFinal Answer: curses foiled again",
]
fake_llm = FakeListLLM(responses=responses)
tools = [
Tool(
name="Search",
func=lambda x: x,
description="Useful for searching",
return_direct=True,
),
]
agent = initialize_agent(
tools,
fake_llm,
agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,
return_intermediate_steps=True,
)
resp = await agent.acall("when was langchain made")
assert isinstance(resp, dict)
assert resp["output"] == "misalignment"
assert len(resp["intermediate_steps"]) == 1
action, _action_intput = resp["intermediate_steps"][0]
assert action.tool == "Search"
async def test_agent_invalid_tool() -> None:
"""Test agent invalid tool and correct suggestions."""
fake_llm = FakeListLLM(responses=["FooBarBaz\nAction: Foo\nAction Input: Bar"])
tools = [
Tool(
name="Search",
func=lambda x: x,
description="Useful for searching",
return_direct=True,
),
]
agent = initialize_agent(
tools=tools,
llm=fake_llm,
agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,
return_intermediate_steps=True,
max_iterations=1,
)
resp = await agent.acall("when was langchain made")
resp["intermediate_steps"][0][1] == "Foo is not a valid tool, try one of [Search]."
| |
155510
|
from langchain_core.agents import AgentAction, AgentFinish
from langchain.agents.output_parsers.json import JSONAgentOutputParser
def test_tool_usage() -> None:
parser = JSONAgentOutputParser()
_input = """ ```
{
"action": "search",
"action_input": "2+2"
}
```"""
output = parser.invoke(_input)
expected_output = AgentAction(tool="search", tool_input="2+2", log=_input)
assert output == expected_output
def test_finish() -> None:
parser = JSONAgentOutputParser()
_input = """```
{
"action": "Final Answer",
"action_input": "4"
}
```"""
output = parser.invoke(_input)
expected_output = AgentFinish(return_values={"output": "4"}, log=_input)
assert output == expected_output
| |
155536
|
from langchain.schema.output_parser import __all__
EXPECTED_ALL = [
"BaseCumulativeTransformOutputParser",
"BaseGenerationOutputParser",
"BaseLLMOutputParser",
"BaseOutputParser",
"BaseTransformOutputParser",
"NoOpOutputParser",
"OutputParserException",
"StrOutputParser",
"T",
]
def test_all_imports() -> None:
assert set(__all__) == set(EXPECTED_ALL)
| |
155660
|
"""Test conversation chain and memory."""
from langchain_core.documents import Document
from langchain_core.language_models import FakeListLLM
from langchain.chains.conversational_retrieval.base import (
ConversationalRetrievalChain,
)
from langchain.memory.buffer import ConversationBufferMemory
from tests.unit_tests.retrievers.sequential_retriever import SequentialRetriever
async def test_simplea() -> None:
fixed_resp = "I don't know"
answer = "I know the answer!"
llm = FakeListLLM(responses=[answer])
retriever = SequentialRetriever(sequential_responses=[[]])
memory = ConversationBufferMemory( # type: ignore[call-arg]
k=1, output_key="answer", memory_key="chat_history", return_messages=True
)
qa_chain = ConversationalRetrievalChain.from_llm(
llm=llm,
memory=memory,
retriever=retriever,
return_source_documents=True,
rephrase_question=False,
response_if_no_docs_found=fixed_resp,
verbose=True,
)
got = await qa_chain.acall("What is the answer?")
assert got["chat_history"][1].content == fixed_resp
assert got["answer"] == fixed_resp
async def test_fixed_message_response_when_docs_founda() -> None:
fixed_resp = "I don't know"
answer = "I know the answer!"
llm = FakeListLLM(responses=[answer])
retriever = SequentialRetriever(
sequential_responses=[[Document(page_content=answer)]]
)
memory = ConversationBufferMemory( # type: ignore[call-arg]
k=1, output_key="answer", memory_key="chat_history", return_messages=True
)
qa_chain = ConversationalRetrievalChain.from_llm(
llm=llm,
memory=memory,
retriever=retriever,
return_source_documents=True,
rephrase_question=False,
response_if_no_docs_found=fixed_resp,
verbose=True,
)
got = await qa_chain.acall("What is the answer?")
assert got["chat_history"][1].content == answer
assert got["answer"] == answer
def test_fixed_message_response_when_no_docs_found() -> None:
fixed_resp = "I don't know"
answer = "I know the answer!"
llm = FakeListLLM(responses=[answer])
retriever = SequentialRetriever(sequential_responses=[[]])
memory = ConversationBufferMemory( # type: ignore[call-arg]
k=1, output_key="answer", memory_key="chat_history", return_messages=True
)
qa_chain = ConversationalRetrievalChain.from_llm(
llm=llm,
memory=memory,
retriever=retriever,
return_source_documents=True,
rephrase_question=False,
response_if_no_docs_found=fixed_resp,
verbose=True,
)
got = qa_chain("What is the answer?")
assert got["chat_history"][1].content == fixed_resp
assert got["answer"] == fixed_resp
def test_fixed_message_response_when_docs_found() -> None:
fixed_resp = "I don't know"
answer = "I know the answer!"
llm = FakeListLLM(responses=[answer])
retriever = SequentialRetriever(
sequential_responses=[[Document(page_content=answer)]]
)
memory = ConversationBufferMemory( # type: ignore[call-arg]
k=1, output_key="answer", memory_key="chat_history", return_messages=True
)
qa_chain = ConversationalRetrievalChain.from_llm(
llm=llm,
memory=memory,
retriever=retriever,
return_source_documents=True,
rephrase_question=False,
response_if_no_docs_found=fixed_resp,
verbose=True,
)
got = qa_chain("What is the answer?")
assert got["chat_history"][1].content == answer
assert got["answer"] == answer
| |
155731
|
#!/bin/bash
set -eu
# Initialize a variable to keep track of errors
errors=0
# Check the conditions
git grep '^from langchain import' langchain | grep -vE 'from langchain import (__version__|hub)' && errors=$((errors+1))
git grep '^from langchain\.' langchain/pydantic_v1 | grep -vE 'from langchain.(pydantic_v1|_api)' && errors=$((errors+1))
git grep '^from langchain\.' langchain/load | grep -vE 'from langchain.(pydantic_v1|load|_api)' && errors=$((errors+1))
git grep '^from langchain\.' langchain/utils | grep -vE 'from langchain.(pydantic_v1|utils|_api)' && errors=$((errors+1))
git grep '^from langchain\.' langchain/schema | grep -vE 'from langchain.(pydantic_v1|utils|schema|load|env|_api)' && errors=$((errors+1))
git grep '^from langchain\.' langchain/adapters | grep -vE 'from langchain.(pydantic_v1|utils|schema|load|_api)' && errors=$((errors+1))
git grep '^from langchain\.' langchain/callbacks | grep -vE 'from langchain.(pydantic_v1|utils|schema|load|callbacks|env|_api)' && errors=$((errors+1))
# TODO: it's probably not amazing so that so many other modules depend on `langchain_community.utilities`, because there can be a lot of imports there
git grep '^from langchain\.' langchain/utilities | grep -vE 'from langchain.(pydantic_v1|utils|schema|load|callbacks|env|utilities|_api)' && errors=$((errors+1))
git grep '^from langchain\.' langchain/storage | grep -vE 'from langchain.(pydantic_v1|utils|schema|load|callbacks|env|storage|utilities|_api)' && errors=$((errors+1))
git grep '^from langchain\.' langchain/prompts | grep -vE 'from langchain.(pydantic_v1|utils|schema|load|callbacks|env|prompts|_api)' && errors=$((errors+1))
git grep '^from langchain\.' langchain/output_parsers | grep -vE 'from langchain.(pydantic_v1|utils|schema|load|callbacks|env|prompts|_api|output_parsers|_api)' && errors=$((errors+1))
git grep '^from langchain\.' langchain/llms | grep -vE 'from langchain.(pydantic_v1|utils|schema|load|callbacks|env|prompts|llms|utilities|globals|_api)' && errors=$((errors+1))
git grep '^from langchain\.' langchain/chat_models | grep -vE 'from langchain.(pydantic_v1|utils|schema|load|callbacks|env|llms|prompts|adapters|chat_models|utilities|globals|_api)' && errors=$((errors+1))
git grep '^from langchain\.' langchain/embeddings | grep -vE 'from langchain.(pydantic_v1|utils|schema|load|callbacks|env|storage|llms|embeddings|utilities|_api)' && errors=$((errors+1))
git grep '^from langchain\.' langchain/docstore | grep -vE 'from langchain.(pydantic_v1|utils|schema|docstore|_api)' && errors=$((errors+1))
git grep '^from langchain\.' langchain/vectorstores | grep -vE 'from
langchain.(pydantic_v1|utils|schema|load|callbacks|env|_api|storage|llms|docstore|vectorstores|utilities|_api)' && errors=$((errors+1))
# make sure not importing from langchain_experimental
git --no-pager grep '^from langchain_experimental\.' . && errors=$((errors+1))
# Add a basic lint rule to prevent imports from the global namespaces of langchain_community
# This lint rule won't catch imports from local scope.
# We can't add that rule without a more complex script to ignore imports from inside
# a if TYPE_CHECKING block.
git grep '^from langchain_community' | grep -vE '# ignore: community-import' && errors=$((errors+1))
# Decide on an exit status based on the errors
if [ "$errors" -gt 0 ]; then
exit 1
else
exit 0
fi
| |
155906
|
# langchain-chroma
This package contains the LangChain integration with Chroma.
## Installation
```bash
pip install -U langchain-chroma
```
## Usage
The `Chroma` class exposes the connection to the Chroma vector store.
```python
from langchain_chroma import Chroma
embeddings = ... # use a LangChain Embeddings class
vectorstore = Chroma(embeddings=embeddings)
```
| |
155982
|
def test_chroma_update_document() -> None:
"""Test the update_document function in the Chroma class."""
# Make a consistent embedding
embedding = ConsistentFakeEmbeddings()
# Initial document content and id
initial_content = "foo"
document_id = "doc1"
# Create an instance of Document with initial content and metadata
original_doc = Document(page_content=initial_content, metadata={"page": "0"})
# Initialize a Chroma instance with the original document
docsearch = Chroma.from_documents(
collection_name="test_collection",
documents=[original_doc],
embedding=embedding,
ids=[document_id],
)
old_embedding = docsearch._collection.peek()["embeddings"][ # type: ignore
docsearch._collection.peek()["ids"].index(document_id)
]
# Define updated content for the document
updated_content = "updated foo"
# Create a new Document instance with the updated content and the same id
updated_doc = Document(page_content=updated_content, metadata={"page": "0"})
# Update the document in the Chroma instance
docsearch.update_document(document_id=document_id, document=updated_doc)
# Perform a similarity search with the updated content
output = docsearch.similarity_search(updated_content, k=1)
# Assert that the new embedding is correct
new_embedding = docsearch._collection.peek()["embeddings"][ # type: ignore
docsearch._collection.peek()["ids"].index(document_id)
]
docsearch.delete_collection()
# Assert that the updated document is returned by the search
assert output == [Document(page_content=updated_content, metadata={"page": "0"})]
assert new_embedding == embedding.embed_documents([updated_content])[0]
assert new_embedding != old_embedding
# TODO: RELEVANCE SCORE IS BROKEN. FIX TEST
def test_chroma_with_relevance_score_custom_normalization_fn() -> None:
"""Test searching with relevance score and custom normalization function."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": str(i)} for i in range(len(texts))]
docsearch = Chroma.from_texts(
collection_name="test1_collection",
texts=texts,
embedding=FakeEmbeddings(),
metadatas=metadatas,
relevance_score_fn=lambda d: d * 0,
collection_metadata={"hnsw:space": "l2"},
)
output = docsearch.similarity_search_with_relevance_scores("foo", k=3)
docsearch.delete_collection()
assert output == [
(Document(page_content="foo", metadata={"page": "0"}), 0.0),
(Document(page_content="bar", metadata={"page": "1"}), 0.0),
(Document(page_content="baz", metadata={"page": "2"}), 0.0),
]
def test_init_from_client(client: chromadb.ClientAPI) -> None:
Chroma(client=client)
def test_init_from_client_settings() -> None:
import chromadb
client_settings = chromadb.config.Settings()
Chroma(client_settings=client_settings)
def test_chroma_add_documents_no_metadata() -> None:
db = Chroma(embedding_function=FakeEmbeddings())
db.add_documents([Document(page_content="foo")])
db.delete_collection()
def test_chroma_add_documents_mixed_metadata() -> None:
db = Chroma(embedding_function=FakeEmbeddings())
docs = [
Document(page_content="foo"),
Document(page_content="bar", metadata={"baz": 1}),
]
ids = ["0", "1"]
actual_ids = db.add_documents(docs, ids=ids)
search = db.similarity_search("foo bar")
db.delete_collection()
assert actual_ids == ids
assert sorted(search, key=lambda d: d.page_content) == sorted(
docs, key=lambda d: d.page_content
)
def is_api_accessible(url: str) -> bool:
try:
response = requests.get(url)
return response.status_code == 200
except Exception:
return False
def batch_support_chroma_version() -> bool:
major, minor, patch = chromadb.__version__.split(".")
if int(major) == 0 and int(minor) >= 4 and int(patch) >= 10:
return True
return False
@pytest.mark.requires("chromadb")
@pytest.mark.skipif(
not is_api_accessible("http://localhost:8000/api/v1/heartbeat"),
reason="API not accessible",
)
@pytest.mark.skipif(
not batch_support_chroma_version(),
reason="ChromaDB version does not support batching",
)
def test_chroma_large_batch() -> None:
client = chromadb.HttpClient()
embedding_function = Fak(size=255)
col = client.get_or_create_collection(
"my_collection",
embedding_function=embedding_function.embed_documents, # type: ignore
)
docs = ["This is a test document"] * (client.max_batch_size + 100) # type: ignore
db = Chroma.from_texts(
client=client,
collection_name=col.name,
texts=docs,
embedding=embedding_function,
ids=[str(uuid.uuid4()) for _ in range(len(docs))],
)
db.delete_collection()
@pytest.mark.requires("chromadb")
@pytest.mark.skipif(
not is_api_accessible("http://localhost:8000/api/v1/heartbeat"),
reason="API not accessible",
)
@pytest.mark.skipif(
not batch_support_chroma_version(),
reason="ChromaDB version does not support batching",
)
def test_chroma_large_batch_update() -> None:
client = chromadb.HttpClient()
embedding_function = Fak(size=255)
col = client.get_or_create_collection(
"my_collection",
embedding_function=embedding_function.embed_documents, # type: ignore
)
docs = ["This is a test document"] * (client.max_batch_size + 100) # type: ignore
ids = [str(uuid.uuid4()) for _ in range(len(docs))]
db = Chroma.from_texts(
client=client,
collection_name=col.name,
texts=docs,
embedding=embedding_function,
ids=ids,
)
new_docs = [
Document(
page_content="This is a new test document", metadata={"doc_id": f"{i}"}
)
for i in range(len(docs) - 10)
]
new_ids = [_id for _id in ids[: len(new_docs)]]
db.update_documents(ids=new_ids, documents=new_docs)
db.delete_collection()
@pytest.mark.requires("chromadb")
@pytest.mark.skipif(
not is_api_accessible("http://localhost:8000/api/v1/heartbeat"),
reason="API not accessible",
)
@pytest.mark.skipif(
batch_support_chroma_version(), reason="ChromaDB version does not support batching"
)
def test_chroma_legacy_batching() -> None:
client = chromadb.HttpClient()
embedding_function = Fak(size=255)
col = client.get_or_create_collection(
"my_collection",
embedding_function=embedding_function.embed_documents, # type: ignore
)
docs = ["This is a test document"] * 100
db = Chroma.from_texts(
client=client,
collection_name=col.name,
texts=docs,
embedding=embedding_function,
ids=[str(uuid.uuid4()) for _ in range(len(docs))],
)
db.delete_collection()
def test_create_collection_if_not_exist_default() -> None:
"""Tests existing behaviour without the new create_collection_if_not_exists flag."""
texts = ["foo", "bar", "baz"]
docsearch = Chroma.from_texts(
collection_name="test_collection", texts=texts, embedding=FakeEmbeddings()
)
assert docsearch._client.get_collection("test_collection") is not None
docsearch.delete_collection()
def test_create_collection_if_not_exist_true_existing(
client: chromadb.ClientAPI,
) -> None:
"""Tests create_collection_if_not_exists=True and collection already existing."""
client.create_collection("test_collection")
vectorstore = Chroma(
client=client,
collection_name="test_collection",
embedding_function=FakeEmbeddings(),
create_collection_if_not_exists=True,
)
assert vectorstore._client.get_collection("test_collection") is not None
vectorstore.delete_collection()
def test_create_collection_if_not_exist_false_existing(
client: chromadb.ClientAPI,
) -> None:
"""Tests create_collection_if_not_exists=False and collection already existing."""
client.create_collection("test_collection")
vectorstore = Chroma(
client=client,
collection_name="test_collection",
embedding_function=FakeEmbeddings(),
create_collection_if_not_exists=False,
)
assert vectorstore._client.get_collection("test_collection") is not None
vectorstore.delete_collection()
| |
155985
|
class Chroma(VectorStore):
"""Chroma vector store integration.
Setup:
Install ``chromadb``, ``langchain-chroma`` packages:
.. code-block:: bash
pip install -qU chromadb langchain-chroma
Key init args — indexing params:
collection_name: str
Name of the collection.
embedding_function: Embeddings
Embedding function to use.
Key init args — client params:
client: Optional[Client]
Chroma client to use.
client_settings: Optional[chromadb.config.Settings]
Chroma client settings.
persist_directory: Optional[str]
Directory to persist the collection.
Instantiate:
.. code-block:: python
from langchain_chroma import Chroma
from langchain_openai import OpenAIEmbeddings
vector_store = Chroma(
collection_name="foo",
embedding_function=OpenAIEmbeddings(),
# other params...
)
Add Documents:
.. code-block:: python
from langchain_core.documents import Document
document_1 = Document(page_content="foo", metadata={"baz": "bar"})
document_2 = Document(page_content="thud", metadata={"bar": "baz"})
document_3 = Document(page_content="i will be deleted :(")
documents = [document_1, document_2, document_3]
ids = ["1", "2", "3"]
vector_store.add_documents(documents=documents, ids=ids)
Update Documents:
.. code-block:: python
updated_document = Document(
page_content="qux",
metadata={"bar": "baz"}
)
vector_store.update_documents(ids=["1"],documents=[updated_document])
Delete Documents:
.. code-block:: python
vector_store.delete(ids=["3"])
Search:
.. code-block:: python
results = vector_store.similarity_search(query="thud",k=1)
for doc in results:
print(f"* {doc.page_content} [{doc.metadata}]")
.. code-block:: python
* thud [{'baz': 'bar'}]
Search with filter:
.. code-block:: python
results = vector_store.similarity_search(query="thud",k=1,filter={"baz": "bar"})
for doc in results:
print(f"* {doc.page_content} [{doc.metadata}]")
.. code-block:: python
* foo [{'baz': 'bar'}]
Search with score:
.. code-block:: python
results = vector_store.similarity_search_with_score(query="qux",k=1)
for doc, score in results:
print(f"* [SIM={score:3f}] {doc.page_content} [{doc.metadata}]")
.. code-block:: python
* [SIM=0.000000] qux [{'bar': 'baz', 'baz': 'bar'}]
Async:
.. code-block:: python
# add documents
# await vector_store.aadd_documents(documents=documents, ids=ids)
# delete documents
# await vector_store.adelete(ids=["3"])
# search
# results = vector_store.asimilarity_search(query="thud",k=1)
# search with score
results = await vector_store.asimilarity_search_with_score(query="qux",k=1)
for doc,score in results:
print(f"* [SIM={score:3f}] {doc.page_content} [{doc.metadata}]")
.. code-block:: python
* [SIM=0.335463] foo [{'baz': 'bar'}]
Use as Retriever:
.. code-block:: python
retriever = vector_store.as_retriever(
search_type="mmr",
search_kwargs={"k": 1, "fetch_k": 2, "lambda_mult": 0.5},
)
retriever.invoke("thud")
.. code-block:: python
[Document(metadata={'baz': 'bar'}, page_content='thud')]
""" # noqa: E501
_LANGCHAIN_DEFAULT_COLLECTION_NAME = "langchain"
def __init__(
self,
collection_name: str = _LANGCHAIN_DEFAULT_COLLECTION_NAME,
embedding_function: Optional[Embeddings] = None,
persist_directory: Optional[str] = None,
client_settings: Optional[chromadb.config.Settings] = None,
collection_metadata: Optional[Dict] = None,
client: Optional[chromadb.ClientAPI] = None,
relevance_score_fn: Optional[Callable[[float], float]] = None,
create_collection_if_not_exists: Optional[bool] = True,
) -> None:
"""Initialize with a Chroma client.
Args:
collection_name: Name of the collection to create.
embedding_function: Embedding class object. Used to embed texts.
persist_directory: Directory to persist the collection.
client_settings: Chroma client settings
collection_metadata: Collection configurations.
client: Chroma client. Documentation:
https://docs.trychroma.com/reference/js-client#class:-chromaclient
relevance_score_fn: Function to calculate relevance score from distance.
Used only in `similarity_search_with_relevance_scores`
create_collection_if_not_exists: Whether to create collection
if it doesn't exist. Defaults to True.
"""
if client is not None:
self._client_settings = client_settings
self._client = client
self._persist_directory = persist_directory
else:
if client_settings:
# If client_settings is provided with persist_directory specified,
# then it is "in-memory and persisting to disk" mode.
client_settings.persist_directory = (
persist_directory or client_settings.persist_directory
)
_client_settings = client_settings
elif persist_directory:
_client_settings = chromadb.config.Settings(is_persistent=True)
_client_settings.persist_directory = persist_directory
else:
_client_settings = chromadb.config.Settings()
self._client_settings = _client_settings
self._client = chromadb.Client(_client_settings)
self._persist_directory = (
_client_settings.persist_directory or persist_directory
)
self._embedding_function = embedding_function
self._chroma_collection: Optional[chromadb.Collection] = None
self._collection_name = collection_name
self._collection_metadata = collection_metadata
if create_collection_if_not_exists:
self.__ensure_collection()
else:
self._chroma_collection = self._client.get_collection(name=collection_name)
self.override_relevance_score_fn = relevance_score_fn
def __ensure_collection(self) -> None:
"""Ensure that the collection exists or create it."""
self._chroma_collection = self._client.get_or_create_collection(
name=self._collection_name,
embedding_function=None,
metadata=self._collection_metadata,
)
@property
def _collection(self) -> chromadb.Collection:
"""Returns the underlying Chroma collection or throws an exception."""
if self._chroma_collection is None:
raise ValueError(
"Chroma collection not initialized. "
"Use `reset_collection` to re-create and initialize the collection. "
)
return self._chroma_collection
@property
def embeddings(self) -> Optional[Embeddings]:
"""Access the query embedding object."""
return self._embedding_function
@xor_args(("query_texts", "query_embeddings"))
def __query_collection(
self,
query_texts: Optional[List[str]] = None,
query_embeddings: Optional[List[List[float]]] = None,
n_results: int = 4,
where: Optional[Dict[str, str]] = None,
where_document: Optional[Dict[str, str]] = None,
**kwargs: Any,
) -> Union[List[Document], chromadb.QueryResult]:
"""Query the chroma collection.
Args:
query_texts: List of query texts.
query_embeddings: List of query embeddings.
n_results: Number of results to return. Defaults to 4.
where: dict used to filter results by
e.g. {"color" : "red", "price": 4.20}.
where_document: dict used to filter by the documents.
E.g. {$contains: {"text": "hello"}}.
kwargs: Additional keyword arguments to pass to Chroma collection query.
Returns:
List of `n_results` nearest neighbor embeddings for provided
query_embeddings or query_texts.
See more: https://docs.trychroma.com/reference/py-collection#query
"""
return self._collection.query(
query_texts=query_texts,
query_embeddings=query_embeddings, # type: ignore
n_results=n_results,
where=where, # type: ignore
where_document=where_document, # type: ignore
**kwargs,
)
def encode_image(self, uri: str) -> str:
"""Get base64 string from image URI."""
with open(uri, "rb") as image_file:
return base64.b64encode(image_file.read()).decode("utf-8")
| |
155986
|
add_images(
self,
uris: List[str],
metadatas: Optional[List[dict]] = None,
ids: Optional[List[str]] = None,
**kwargs: Any,
) -> List[str]:
"""Run more images through the embeddings and add to the vectorstore.
Args:
uris: File path to the image.
metadatas: Optional list of metadatas.
When querying, you can filter on this metadata.
ids: Optional list of IDs.
kwargs: Additional keyword arguments to pass.
Returns:
List of IDs of the added images.
Raises:
ValueError: When metadata is incorrect.
"""
# Map from uris to b64 encoded strings
b64_texts = [self.encode_image(uri=uri) for uri in uris]
# Populate IDs
if ids is None:
ids = [str(uuid.uuid4()) for _ in uris]
embeddings = None
# Set embeddings
if self._embedding_function is not None and hasattr(
self._embedding_function, "embed_image"
):
embeddings = self._embedding_function.embed_image(uris=uris)
if metadatas:
# fill metadatas with empty dicts if somebody
# did not specify metadata for all images
length_diff = len(uris) - len(metadatas)
if length_diff:
metadatas = metadatas + [{}] * length_diff
empty_ids = []
non_empty_ids = []
for idx, m in enumerate(metadatas):
if m:
non_empty_ids.append(idx)
else:
empty_ids.append(idx)
if non_empty_ids:
metadatas = [metadatas[idx] for idx in non_empty_ids]
images_with_metadatas = [b64_texts[idx] for idx in non_empty_ids]
embeddings_with_metadatas = (
[embeddings[idx] for idx in non_empty_ids] if embeddings else None
)
ids_with_metadata = [ids[idx] for idx in non_empty_ids]
try:
self._collection.upsert(
metadatas=metadatas, # type: ignore
embeddings=embeddings_with_metadatas, # type: ignore
documents=images_with_metadatas,
ids=ids_with_metadata,
)
except ValueError as e:
if "Expected metadata value to be" in str(e):
msg = (
"Try filtering complex metadata using "
"langchain_community.vectorstores.utils.filter_complex_metadata."
)
raise ValueError(e.args[0] + "\n\n" + msg)
else:
raise e
if empty_ids:
images_without_metadatas = [b64_texts[j] for j in empty_ids]
embeddings_without_metadatas = (
[embeddings[j] for j in empty_ids] if embeddings else None
)
ids_without_metadatas = [ids[j] for j in empty_ids]
self._collection.upsert(
embeddings=embeddings_without_metadatas,
documents=images_without_metadatas,
ids=ids_without_metadatas,
)
else:
self._collection.upsert(
embeddings=embeddings,
documents=b64_texts,
ids=ids,
)
return ids
def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
ids: Optional[List[str]] = None,
**kwargs: Any,
) -> List[str]:
"""Run more texts through the embeddings and add to the vectorstore.
Args:
texts: Texts to add to the vectorstore.
metadatas: Optional list of metadatas.
When querying, you can filter on this metadata.
ids: Optional list of IDs.
kwargs: Additional keyword arguments.
Returns:
List of IDs of the added texts.
Raises:
ValueError: When metadata is incorrect.
"""
if ids is None:
ids = [str(uuid.uuid4()) for _ in texts]
embeddings = None
texts = list(texts)
if self._embedding_function is not None:
embeddings = self._embedding_function.embed_documents(texts)
if metadatas:
# fill metadatas with empty dicts if somebody
# did not specify metadata for all texts
length_diff = len(texts) - len(metadatas)
if length_diff:
metadatas = metadatas + [{}] * length_diff
empty_ids = []
non_empty_ids = []
for idx, m in enumerate(metadatas):
if m:
non_empty_ids.append(idx)
else:
empty_ids.append(idx)
if non_empty_ids:
metadatas = [metadatas[idx] for idx in non_empty_ids]
texts_with_metadatas = [texts[idx] for idx in non_empty_ids]
embeddings_with_metadatas = (
[embeddings[idx] for idx in non_empty_ids] if embeddings else None
)
ids_with_metadata = [ids[idx] for idx in non_empty_ids]
try:
self._collection.upsert(
metadatas=metadatas, # type: ignore
embeddings=embeddings_with_metadatas, # type: ignore
documents=texts_with_metadatas,
ids=ids_with_metadata,
)
except ValueError as e:
if "Expected metadata value to be" in str(e):
msg = (
"Try filtering complex metadata from the document using "
"langchain_community.vectorstores.utils.filter_complex_metadata."
)
raise ValueError(e.args[0] + "\n\n" + msg)
else:
raise e
if empty_ids:
texts_without_metadatas = [texts[j] for j in empty_ids]
embeddings_without_metadatas = (
[embeddings[j] for j in empty_ids] if embeddings else None
)
ids_without_metadatas = [ids[j] for j in empty_ids]
self._collection.upsert(
embeddings=embeddings_without_metadatas, # type: ignore
documents=texts_without_metadatas,
ids=ids_without_metadatas,
)
else:
self._collection.upsert(
embeddings=embeddings, # type: ignore
documents=texts,
ids=ids,
)
return ids
def similarity_search(
self,
query: str,
k: int = DEFAULT_K,
filter: Optional[Dict[str, str]] = None,
**kwargs: Any,
) -> List[Document]:
"""Run similarity search with Chroma.
Args:
query: Query text to search for.
k: Number of results to return. Defaults to 4.
filter: Filter by metadata. Defaults to None.
kwargs: Additional keyword arguments to pass to Chroma collection query.
Returns:
List of documents most similar to the query text.
"""
docs_and_scores = self.similarity_search_with_score(
query, k, filter=filter, **kwargs
)
return [doc for doc, _ in docs_and_scores]
def similarity_search_by_vector(
self,
embedding: List[float],
k: int = DEFAULT_K,
filter: Optional[Dict[str, str]] = None,
where_document: Optional[Dict[str, str]] = None,
**kwargs: Any,
) -> List[Document]:
"""Return docs most similar to embedding vector.
Args:
embedding: Embedding to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
filter: Filter by metadata. Defaults to None.
where_document: dict used to filter by the documents.
E.g. {$contains: {"text": "hello"}}.
kwargs: Additional keyword arguments to pass to Chroma collection query.
Returns:
List of Documents most similar to the query vector.
"""
results = self.__query_collection(
query_embeddings=embedding,
n_results=k,
where=filter,
where_document=where_document,
**kwargs,
)
return _results_to_docs(results)
| |
155987
|
similarity_search_by_vector_with_relevance_scores(
self,
embedding: List[float],
k: int = DEFAULT_K,
filter: Optional[Dict[str, str]] = None,
where_document: Optional[Dict[str, str]] = None,
**kwargs: Any,
) -> List[Tuple[Document, float]]:
"""Return docs most similar to embedding vector and similarity score.
Args:
embedding (List[float]): Embedding to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
filter: Filter by metadata. Defaults to None.
where_document: dict used to filter by the documents.
E.g. {$contains: {"text": "hello"}}.
kwargs: Additional keyword arguments to pass to Chroma collection query.
Returns:
List of documents most similar to the query text and relevance score
in float for each. Lower score represents more similarity.
"""
results = self.__query_collection(
query_embeddings=embedding,
n_results=k,
where=filter,
where_document=where_document,
**kwargs,
)
return _results_to_docs_and_scores(results)
def similarity_search_with_score(
self,
query: str,
k: int = DEFAULT_K,
filter: Optional[Dict[str, str]] = None,
where_document: Optional[Dict[str, str]] = None,
**kwargs: Any,
) -> List[Tuple[Document, float]]:
"""Run similarity search with Chroma with distance.
Args:
query: Query text to search for.
k: Number of results to return. Defaults to 4.
filter: Filter by metadata. Defaults to None.
where_document: dict used to filter by the documents.
E.g. {$contains: {"text": "hello"}}.
kwargs: Additional keyword arguments to pass to Chroma collection query.
Returns:
List of documents most similar to the query text and
distance in float for each. Lower score represents more similarity.
"""
if self._embedding_function is None:
results = self.__query_collection(
query_texts=[query],
n_results=k,
where=filter,
where_document=where_document,
**kwargs,
)
else:
query_embedding = self._embedding_function.embed_query(query)
results = self.__query_collection(
query_embeddings=[query_embedding],
n_results=k,
where=filter,
where_document=where_document,
**kwargs,
)
return _results_to_docs_and_scores(results)
def _select_relevance_score_fn(self) -> Callable[[float], float]:
"""Select the relevance score function based on collections distance metric.
The most similar documents will have the lowest relevance score. Default
relevance score function is euclidean distance. Distance metric must be
provided in `collection_metadata` during initizalition of Chroma object.
Example: collection_metadata={"hnsw:space": "cosine"}. Available distance
metrics are: 'cosine', 'l2' and 'ip'.
Returns:
The relevance score function.
Raises:
ValueError: If the distance metric is not supported.
"""
if self.override_relevance_score_fn:
return self.override_relevance_score_fn
distance = "l2"
distance_key = "hnsw:space"
metadata = self._collection.metadata
if metadata and distance_key in metadata:
distance = metadata[distance_key]
if distance == "cosine":
return self._cosine_relevance_score_fn
elif distance == "l2":
return self._euclidean_relevance_score_fn
elif distance == "ip":
return self._max_inner_product_relevance_score_fn
else:
raise ValueError(
"No supported normalization function"
f" for distance metric of type: {distance}."
"Consider providing relevance_score_fn to Chroma constructor."
)
def similarity_search_by_image(
self,
uri: str,
k: int = DEFAULT_K,
filter: Optional[Dict[str, str]] = None,
**kwargs: Any,
) -> List[Document]:
"""Search for similar images based on the given image URI.
Args:
uri (str): URI of the image to search for.
k (int, optional): Number of results to return. Defaults to DEFAULT_K.
filter (Optional[Dict[str, str]], optional): Filter by metadata.
**kwargs (Any): Additional arguments to pass to function.
Returns:
List of Images most similar to the provided image.
Each element in list is a Langchain Document Object.
The page content is b64 encoded image, metadata is default or
as defined by user.
Raises:
ValueError: If the embedding function does not support image embeddings.
"""
if self._embedding_function is None or not hasattr(
self._embedding_function, "embed_image"
):
raise ValueError("The embedding function must support image embedding.")
# Obtain image embedding
# Assuming embed_image returns a single embedding
image_embedding = self._embedding_function.embed_image(uris=[uri])
# Perform similarity search based on the obtained embedding
results = self.similarity_search_by_vector(
embedding=image_embedding,
k=k,
filter=filter,
**kwargs,
)
return results
def similarity_search_by_image_with_relevance_score(
self,
uri: str,
k: int = DEFAULT_K,
filter: Optional[Dict[str, str]] = None,
**kwargs: Any,
) -> List[Tuple[Document, float]]:
"""Search for similar images based on the given image URI.
Args:
uri (str): URI of the image to search for.
k (int, optional): Number of results to return.
Defaults to DEFAULT_K.
filter (Optional[Dict[str, str]], optional): Filter by metadata.
**kwargs (Any): Additional arguments to pass to function.
Returns:
List[Tuple[Document, float]]: List of tuples containing documents similar
to the query image and their similarity scores.
0th element in each tuple is a Langchain Document Object.
The page content is b64 encoded img, metadata is default or defined by user.
Raises:
ValueError: If the embedding function does not support image embeddings.
"""
if self._embedding_function is None or not hasattr(
self._embedding_function, "embed_image"
):
raise ValueError("The embedding function must support image embedding.")
# Obtain image embedding
# Assuming embed_image returns a single embedding
image_embedding = self._embedding_function.embed_image(uris=[uri])
# Perform similarity search based on the obtained embedding
results = self.similarity_search_by_vector_with_relevance_scores(
embedding=image_embedding,
k=k,
filter=filter,
**kwargs,
)
return results
def max_marginal_relevance_search_by_vector(
self,
embedding: List[float],
k: int = DEFAULT_K,
fetch_k: int = 20,
lambda_mult: float = 0.5,
filter: Optional[Dict[str, str]] = None,
where_document: Optional[Dict[str, str]] = None,
**kwargs: Any,
) -> List[Document]:
"""Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
embedding: Embedding to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
fetch_k: Number of Documents to fetch to pass to MMR algorithm. Defaults to
20.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
filter: Filter by metadata. Defaults to None.
where_document: dict used to filter by the documents.
E.g. {$contains: {"text": "hello"}}.
kwargs: Additional keyword arguments to pass to Chroma collection query.
Returns:
List of Documents selected by maximal marginal relevance.
"""
results = self.__query_collection(
query_embeddings=embedding,
n_results=fetch_k,
where=filter,
where_document=where_document,
include=["metadatas", "documents", "distances", "embeddings"],
**kwargs,
)
mmr_selected = maximal_marginal_relevance(
np.array(embedding, dtype=np.float32),
results["embeddings"][0],
k=k,
lambda_mult=lambda_mult,
)
candidates = _results_to_docs(results)
selected_results = [r for i, r in enumerate(candidates) if i in mmr_selected]
return selected_results
| |
155988
|
max_marginal_relevance_search(
self,
query: str,
k: int = DEFAULT_K,
fetch_k: int = 20,
lambda_mult: float = 0.5,
filter: Optional[Dict[str, str]] = None,
where_document: Optional[Dict[str, str]] = None,
**kwargs: Any,
) -> List[Document]:
"""Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
fetch_k: Number of Documents to fetch to pass to MMR algorithm.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
filter: Filter by metadata. Defaults to None.
where_document: dict used to filter by the documents.
E.g. {$contains: {"text": "hello"}}.
kwargs: Additional keyword arguments to pass to Chroma collection query.
Returns:
List of Documents selected by maximal marginal relevance.
Raises:
ValueError: If the embedding function is not provided.
"""
if self._embedding_function is None:
raise ValueError(
"For MMR search, you must specify an embedding function on" "creation."
)
embedding = self._embedding_function.embed_query(query)
return self.max_marginal_relevance_search_by_vector(
embedding,
k,
fetch_k,
lambda_mult=lambda_mult,
filter=filter,
where_document=where_document,
)
def delete_collection(self) -> None:
"""Delete the collection."""
self._client.delete_collection(self._collection.name)
self._chroma_collection = None
def reset_collection(self) -> None:
"""Resets the collection.
Resets the collection by deleting the collection and recreating an empty one.
"""
self.delete_collection()
self.__ensure_collection()
def get(
self,
ids: Optional[OneOrMany[ID]] = None,
where: Optional[Where] = None,
limit: Optional[int] = None,
offset: Optional[int] = None,
where_document: Optional[WhereDocument] = None,
include: Optional[List[str]] = None,
) -> Dict[str, Any]:
"""Gets the collection.
Args:
ids: The ids of the embeddings to get. Optional.
where: A Where type dict used to filter results by.
E.g. `{"$and": [{"color": "red"}, {"price": 4.20}]}` Optional.
limit: The number of documents to return. Optional.
offset: The offset to start returning results from.
Useful for paging results with limit. Optional.
where_document: A WhereDocument type dict used to filter by the documents.
E.g. `{$contains: "hello"}`. Optional.
include: A list of what to include in the results.
Can contain `"embeddings"`, `"metadatas"`, `"documents"`.
Ids are always included.
Defaults to `["metadatas", "documents"]`. Optional.
Return:
A dict with the keys `"ids"`, `"embeddings"`, `"metadatas"`, `"documents"`.
"""
kwargs = {
"ids": ids,
"where": where,
"limit": limit,
"offset": offset,
"where_document": where_document,
}
if include is not None:
kwargs["include"] = include
return self._collection.get(**kwargs) # type: ignore
def update_document(self, document_id: str, document: Document) -> None:
"""Update a document in the collection.
Args:
document_id: ID of the document to update.
document: Document to update.
"""
return self.update_documents([document_id], [document])
# type: ignore
def update_documents(self, ids: List[str], documents: List[Document]) -> None:
"""Update a document in the collection.
Args:
ids: List of ids of the document to update.
documents: List of documents to update.
Raises:
ValueError: If the embedding function is not provided.
"""
text = [document.page_content for document in documents]
metadata = [document.metadata for document in documents]
if self._embedding_function is None:
raise ValueError(
"For update, you must specify an embedding function on creation."
)
embeddings = self._embedding_function.embed_documents(text)
if hasattr(
self._collection._client, "max_batch_size"
): # for Chroma 0.4.10 and above
from chromadb.utils.batch_utils import create_batches
for batch in create_batches(
api=self._collection._client,
ids=ids,
metadatas=metadata, # type: ignore
documents=text,
embeddings=embeddings, # type: ignore
):
self._collection.update(
ids=batch[0],
embeddings=batch[1],
documents=batch[3],
metadatas=batch[2],
)
else:
self._collection.update(
ids=ids,
embeddings=embeddings, # type: ignore
documents=text,
metadatas=metadata, # type: ignore
)
@classmethod
def from_texts(
cls: Type[Chroma],
texts: List[str],
embedding: Optional[Embeddings] = None,
metadatas: Optional[List[dict]] = None,
ids: Optional[List[str]] = None,
collection_name: str = _LANGCHAIN_DEFAULT_COLLECTION_NAME,
persist_directory: Optional[str] = None,
client_settings: Optional[chromadb.config.Settings] = None,
client: Optional[chromadb.ClientAPI] = None,
collection_metadata: Optional[Dict] = None,
**kwargs: Any,
) -> Chroma:
"""Create a Chroma vectorstore from a raw documents.
If a persist_directory is specified, the collection will be persisted there.
Otherwise, the data will be ephemeral in-memory.
Args:
texts: List of texts to add to the collection.
collection_name: Name of the collection to create.
persist_directory: Directory to persist the collection.
embedding: Embedding function. Defaults to None.
metadatas: List of metadatas. Defaults to None.
ids: List of document IDs. Defaults to None.
client_settings: Chroma client settings.
client: Chroma client. Documentation:
https://docs.trychroma.com/reference/js-client#class:-chromaclient
collection_metadata: Collection configurations.
Defaults to None.
kwargs: Additional keyword arguments to initialize a Chroma client.
Returns:
Chroma: Chroma vectorstore.
"""
chroma_collection = cls(
collection_name=collection_name,
embedding_function=embedding,
persist_directory=persist_directory,
client_settings=client_settings,
client=client,
collection_metadata=collection_metadata,
**kwargs,
)
if ids is None:
ids = [str(uuid.uuid4()) for _ in texts]
if hasattr(
chroma_collection._client, "max_batch_size"
): # for Chroma 0.4.10 and above
from chromadb.utils.batch_utils import create_batches
for batch in create_batches(
api=chroma_collection._client,
ids=ids,
metadatas=metadatas, # type: ignore
documents=texts,
):
chroma_collection.add_texts(
texts=batch[3] if batch[3] else [],
metadatas=batch[2] if batch[2] else None, # type: ignore
ids=batch[0],
)
else:
chroma_collection.add_texts(texts=texts, metadatas=metadatas, ids=ids)
return chroma_collection
| |
155989
|
ssmethod
def from_documents(
cls: Type[Chroma],
documents: List[Document],
embedding: Optional[Embeddings] = None,
ids: Optional[List[str]] = None,
collection_name: str = _LANGCHAIN_DEFAULT_COLLECTION_NAME,
persist_directory: Optional[str] = None,
client_settings: Optional[chromadb.config.Settings] = None,
client: Optional[chromadb.ClientAPI] = None, # Add this line
collection_metadata: Optional[Dict] = None,
**kwargs: Any,
) -> Chroma:
"""Create a Chroma vectorstore from a list of documents.
If a persist_directory is specified, the collection will be persisted there.
Otherwise, the data will be ephemeral in-memory.
Args:
collection_name: Name of the collection to create.
persist_directory: Directory to persist the collection.
ids : List of document IDs. Defaults to None.
documents: List of documents to add to the vectorstore.
embedding: Embedding function. Defaults to None.
client_settings: Chroma client settings.
client: Chroma client. Documentation:
https://docs.trychroma.com/reference/js-client#class:-chromaclient
collection_metadata: Collection configurations.
Defaults to None.
kwargs: Additional keyword arguments to initialize a Chroma client.
Returns:
Chroma: Chroma vectorstore.
"""
texts = [doc.page_content for doc in documents]
metadatas = [doc.metadata for doc in documents]
return cls.from_texts(
texts=texts,
embedding=embedding,
metadatas=metadatas,
ids=ids,
collection_name=collection_name,
persist_directory=persist_directory,
client_settings=client_settings,
client=client,
collection_metadata=collection_metadata,
**kwargs,
)
def delete(self, ids: Optional[List[str]] = None, **kwargs: Any) -> None:
"""Delete by vector IDs.
Args:
ids: List of ids to delete.
kwargs: Additional keyword arguments.
"""
self._collection.delete(ids=ids, **kwargs)
| |
156151
|
from typing import Optional, Tuple, Union
from langchain.agents import AgentOutputParser
from langchain_core.agents import AgentAction, AgentFinish
def extract_action_details(text: str) -> Tuple[Optional[str], Optional[str]]:
# Split the text into lines and strip whitespace
lines = [line.strip() for line in text.strip().split("\n")]
# Initialize variables to hold the extracted values
action = None
action_input = None
# Iterate through the lines to find and extract the desired information
for line in lines:
if line.startswith("Action:"):
action = line.split(":", 1)[1].strip()
elif line.startswith("Action Input:"):
action_input = line.split(":", 1)[1].strip()
return action, action_input
class FakeOutputParser(AgentOutputParser):
def parse(self, text: str) -> Union[AgentAction, AgentFinish]:
print("FakeOutputParser", text)
action, input = extract_action_details(text)
if action:
log = f"\nInvoking: `{action}` with `{input}"
return AgentAction(tool=action, tool_input=(input or ""), log=log)
elif "Final Answer" in text:
return AgentFinish({"output": text}, text)
return AgentAction(
"Intermediate Answer", "after_colon", "Final Answer: This should end"
)
@property
def _type(self) -> str:
return "self_ask"
| |
156377
|
# langchain-pinecone
This package contains the LangChain integration with Pinecone.
## Installation
```bash
pip install -U langchain-pinecone
```
And you should configure credentials by setting the following environment variables:
- `PINECONE_API_KEY`
- `PINECONE_INDEX_NAME`
## Usage
The `PineconeVectorStore` class exposes the connection to the Pinecone vector store.
```python
from langchain_pinecone import PineconeVectorStore
embeddings = ... # use a LangChain Embeddings class
vectorstore = PineconeVectorStore(embeddings=embeddings)
```
| |
156434
|
from __future__ import annotations
import logging
import os
import uuid
from typing import (
TYPE_CHECKING,
Any,
Callable,
Iterable,
List,
Optional,
Tuple,
TypeVar,
)
import numpy as np
from langchain_core._api.deprecation import deprecated
from langchain_core.documents import Document
from langchain_core.embeddings import Embeddings
from langchain_core.utils.iter import batch_iterate
from langchain_core.vectorstores import VectorStore
from pinecone import Pinecone as PineconeClient # type: ignore
from langchain_pinecone._utilities import DistanceStrategy, maximal_marginal_relevance
if TYPE_CHECKING:
from pinecone import Index
logger = logging.getLogger(__name__)
VST = TypeVar("VST", bound=VectorStore)
class PineconeVectorStore(VectorStore):
"""Pinecone vector store integration.
Setup:
Install ``langchain-pinecone`` and set the environment variable ``PINECONE_API_KEY``.
.. code-block:: bash
pip install -qU langchain-pinecone
export PINECONE_API_KEY = "your-pinecone-api-key"
Key init args — indexing params:
embedding: Embeddings
Embedding function to use.
Key init args — client params:
index: Optional[Index]
Index to use.
# TODO: Replace with relevant init params.
Instantiate:
.. code-block:: python
import time
import os
from pinecone import Pinecone, ServerlessSpec
from langchain_pinecone import PineconeVectorStore
from langchain_openai import OpenAIEmbeddings
pc = Pinecone(api_key=os.environ.get("PINECONE_API_KEY"))
index_name = "langchain-test-index" # change if desired
existing_indexes = [index_info["name"] for index_info in pc.list_indexes()]
if index_name not in existing_indexes:
pc.create_index(
name=index_name,
dimension=1536,
metric="cosine",
spec=ServerlessSpec(cloud="aws", region="us-east-1"),
)
while not pc.describe_index(index_name).status["ready"]:
time.sleep(1)
index = pc.Index(index_name)
vector_store = PineconeVectorStore(index=index, embedding=OpenAIEmbeddings())
Add Documents:
.. code-block:: python
from langchain_core.documents import Document
document_1 = Document(page_content="foo", metadata={"baz": "bar"})
document_2 = Document(page_content="thud", metadata={"bar": "baz"})
document_3 = Document(page_content="i will be deleted :(")
documents = [document_1, document_2, document_3]
ids = ["1", "2", "3"]
vector_store.add_documents(documents=documents, ids=ids)
Delete Documents:
.. code-block:: python
vector_store.delete(ids=["3"])
Search:
.. code-block:: python
results = vector_store.similarity_search(query="thud",k=1)
for doc in results:
print(f"* {doc.page_content} [{doc.metadata}]")
.. code-block:: python
* thud [{'bar': 'baz'}]
Search with filter:
.. code-block:: python
results = vector_store.similarity_search(query="thud",k=1,filter={"bar": "baz"})
for doc in results:
print(f"* {doc.page_content} [{doc.metadata}]")
.. code-block:: python
* thud [{'bar': 'baz'}]
Search with score:
.. code-block:: python
results = vector_store.similarity_search_with_score(query="qux",k=1)
for doc, score in results:
print(f"* [SIM={score:3f}] {doc.page_content} [{doc.metadata}]")
.. code-block:: python
* [SIM=0.832268] foo [{'baz': 'bar'}]
Async:
.. code-block:: python
# add documents
# await vector_store.aadd_documents(documents=documents, ids=ids)
# delete documents
# await vector_store.adelete(ids=["3"])
# search
# results = vector_store.asimilarity_search(query="thud",k=1)
# search with score
results = await vector_store.asimilarity_search_with_score(query="qux",k=1)
for doc,score in results:
print(f"* [SIM={score:3f}] {doc.page_content} [{doc.metadata}]")
.. code-block:: python
* [SIM=0.832268] foo [{'baz': 'bar'}]
Use as Retriever:
.. code-block:: python
retriever = vector_store.as_retriever(
search_type="mmr",
search_kwargs={"k": 1, "fetch_k": 2, "lambda_mult": 0.5},
)
retriever.invoke("thud")
.. code-block:: python
[Document(metadata={'bar': 'baz'}, page_content='thud')]
""" # noqa: E501
def __init__(
self,
# setting default params to bypass having to pass in
# the index and embedding objects - manually throw
# exceptions if they are not passed in or set in environment
# (keeping param for backwards compatibility)
index: Optional[Any] = None,
embedding: Optional[Embeddings] = None,
text_key: Optional[str] = "text",
namespace: Optional[str] = None,
distance_strategy: Optional[DistanceStrategy] = DistanceStrategy.COSINE,
*,
pinecone_api_key: Optional[str] = None,
index_name: Optional[str] = None,
):
if embedding is None:
raise ValueError("Embedding must be provided")
self._embedding = embedding
if text_key is None:
raise ValueError("Text key must be provided")
self._text_key = text_key
self._namespace = namespace
self.distance_strategy = distance_strategy
if index:
# supports old way of initializing externally
self._index = index
else:
# all internal initialization
_pinecone_api_key = (
pinecone_api_key or os.environ.get("PINECONE_API_KEY") or ""
)
if not _pinecone_api_key:
raise ValueError(
"Pinecone API key must be provided in either `pinecone_api_key` "
"or `PINECONE_API_KEY` environment variable"
)
_index_name = index_name or os.environ.get("PINECONE_INDEX_NAME") or ""
if not _index_name:
raise ValueError(
"Pinecone index name must be provided in either `index_name` "
"or `PINECONE_INDEX_NAME` environment variable"
)
# needs
client = PineconeClient(api_key=_pinecone_api_key, source_tag="langchain")
self._index = client.Index(_index_name)
@property
def embeddings(self) -> Optional[Embeddings]:
"""Access the query embedding object if available."""
return self._embedding
| |
156436
|
max_marginal_relevance_search(
self,
query: str,
k: int = 4,
fetch_k: int = 20,
lambda_mult: float = 0.5,
filter: Optional[dict] = None,
namespace: Optional[str] = None,
**kwargs: Any,
) -> List[Document]:
"""Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
fetch_k: Number of Documents to fetch to pass to MMR algorithm.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
filter: Dictionary of argument(s) to filter on metadata
namespace: Namespace to search in. Default will search in '' namespace.
Returns:
List of Documents selected by maximal marginal relevance.
"""
embedding = self._embedding.embed_query(query)
return self.max_marginal_relevance_search_by_vector(
embedding, k, fetch_k, lambda_mult, filter, namespace
)
@classmethod
def get_pinecone_index(
cls,
index_name: Optional[str],
pool_threads: int = 4,
*,
pinecone_api_key: Optional[str] = None,
) -> Index:
"""Return a Pinecone Index instance.
Args:
index_name: Name of the index to use.
pool_threads: Number of threads to use for index upsert.
pinecone_api_key: The api_key of Pinecone.
Returns:
Pinecone Index instance."""
_pinecone_api_key = pinecone_api_key or os.environ.get("PINECONE_API_KEY") or ""
client = PineconeClient(
api_key=_pinecone_api_key, pool_threads=pool_threads, source_tag="langchain"
)
indexes = client.list_indexes()
index_names = [i.name for i in indexes.index_list["indexes"]]
if index_name in index_names:
index = client.Index(index_name)
elif len(index_names) == 0:
raise ValueError(
"No active indexes found in your Pinecone project, "
"are you sure you're using the right Pinecone API key and Environment? "
"Please double check your Pinecone dashboard."
)
else:
raise ValueError(
f"Index '{index_name}' not found in your Pinecone project. "
f"Did you mean one of the following indexes: {', '.join(index_names)}"
)
return index
@classmethod
def from_texts(
cls,
texts: List[str],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
ids: Optional[List[str]] = None,
batch_size: int = 32,
text_key: str = "text",
namespace: Optional[str] = None,
index_name: Optional[str] = None,
upsert_kwargs: Optional[dict] = None,
pool_threads: int = 4,
embeddings_chunk_size: int = 1000,
async_req: bool = True,
*,
id_prefix: Optional[str] = None,
**kwargs: Any,
) -> PineconeVectorStore:
"""Construct Pinecone wrapper from raw documents.
This is a user-friendly interface that:
1. Embeds documents.
2. Adds the documents to a provided Pinecone index
This is intended to be a quick way to get started.
The `pool_threads` affects the speed of the upsert operations.
Setup: set the `PINECONE_API_KEY` environment variable to your Pinecone API key.
Example:
.. code-block:: python
from langchain_pinecone import PineconeVectorStore, PineconeEmbeddings
embeddings = PineconeEmbeddings(model="multilingual-e5-large")
index_name = "my-index"
vectorstore = PineconeVectorStore.from_texts(
texts,
index_name=index_name,
embedding=embedding,
namespace=namespace,
)
"""
pinecone_index = cls.get_pinecone_index(index_name, pool_threads)
pinecone = cls(pinecone_index, embedding, text_key, namespace, **kwargs)
pinecone.add_texts(
texts,
metadatas=metadatas,
ids=ids,
namespace=namespace,
batch_size=batch_size,
embedding_chunk_size=embeddings_chunk_size,
async_req=async_req,
id_prefix=id_prefix,
**(upsert_kwargs or {}),
)
return pinecone
@classmethod
def from_existing_index(
cls,
index_name: str,
embedding: Embeddings,
text_key: str = "text",
namespace: Optional[str] = None,
pool_threads: int = 4,
) -> PineconeVectorStore:
"""Load pinecone vectorstore from index name."""
pinecone_index = cls.get_pinecone_index(index_name, pool_threads)
return cls(pinecone_index, embedding, text_key, namespace)
def delete(
self,
ids: Optional[List[str]] = None,
delete_all: Optional[bool] = None,
namespace: Optional[str] = None,
filter: Optional[dict] = None,
**kwargs: Any,
) -> None:
"""Delete by vector IDs or filter.
Args:
ids: List of ids to delete.
delete_all: Whether delete all vectors in the index.
filter: Dictionary of conditions to filter vectors to delete.
namespace: Namespace to search in. Default will search in '' namespace.
"""
if namespace is None:
namespace = self._namespace
if delete_all:
self._index.delete(delete_all=True, namespace=namespace, **kwargs)
elif ids is not None:
chunk_size = 1000
for i in range(0, len(ids), chunk_size):
chunk = ids[i : i + chunk_size]
self._index.delete(ids=chunk, namespace=namespace, **kwargs)
elif filter is not None:
self._index.delete(filter=filter, namespace=namespace, **kwargs)
else:
raise ValueError("Either ids, delete_all, or filter must be provided.")
return None
@deprecated(since="0.0.3", removal="0.3.0", alternative="PineconeVectorStore")
class Pinecone(PineconeVectorStore):
"""Deprecated. Use PineconeVectorStore instead."""
pass
| |
156511
|
@deprecated(since="0.1.2", alternative="QdrantVectorStore", removal="0.5.0")
class Qdrant(VectorStore):
"""`Qdrant` vector store.
Example:
.. code-block:: python
from qdrant_client import QdrantClient
from langchain_qdrant import Qdrant
client = QdrantClient()
collection_name = "MyCollection"
qdrant = Qdrant(client, collection_name, embedding_function)
"""
CONTENT_KEY: str = "page_content"
METADATA_KEY: str = "metadata"
VECTOR_NAME: Optional[str] = None
def __init__(
self,
client: Any,
collection_name: str,
embeddings: Optional[Embeddings] = None,
content_payload_key: str = CONTENT_KEY,
metadata_payload_key: str = METADATA_KEY,
distance_strategy: str = "COSINE",
vector_name: Optional[str] = VECTOR_NAME,
async_client: Optional[Any] = None,
embedding_function: Optional[Callable] = None, # deprecated
):
"""Initialize with necessary components."""
if not isinstance(client, QdrantClient):
raise ValueError(
f"client should be an instance of qdrant_client.QdrantClient, "
f"got {type(client)}"
)
if async_client is not None and not isinstance(async_client, AsyncQdrantClient):
raise ValueError(
f"async_client should be an instance of qdrant_client.AsyncQdrantClient"
f"got {type(async_client)}"
)
if embeddings is None and embedding_function is None:
raise ValueError(
"`embeddings` value can't be None. Pass `Embeddings` instance."
)
if embeddings is not None and embedding_function is not None:
raise ValueError(
"Both `embeddings` and `embedding_function` are passed. "
"Use `embeddings` only."
)
self._embeddings = embeddings
self._embeddings_function = embedding_function
self.client: QdrantClient = client
self.async_client: Optional[AsyncQdrantClient] = async_client
self.collection_name = collection_name
self.content_payload_key = content_payload_key or self.CONTENT_KEY
self.metadata_payload_key = metadata_payload_key or self.METADATA_KEY
self.vector_name = vector_name or self.VECTOR_NAME
if embedding_function is not None:
warnings.warn(
"Using `embedding_function` is deprecated. "
"Pass `Embeddings` instance to `embeddings` instead."
)
if not isinstance(embeddings, Embeddings):
warnings.warn(
"`embeddings` should be an instance of `Embeddings`."
"Using `embeddings` as `embedding_function` which is deprecated"
)
self._embeddings_function = embeddings
self._embeddings = None
self.distance_strategy = distance_strategy.upper()
@property
def embeddings(self) -> Optional[Embeddings]:
return self._embeddings
def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
ids: Optional[Sequence[str]] = None,
batch_size: int = 64,
**kwargs: Any,
) -> List[str]:
"""Run more texts through the embeddings and add to the vectorstore.
Args:
texts: Iterable of strings to add to the vectorstore.
metadatas: Optional list of metadatas associated with the texts.
ids:
Optional list of ids to associate with the texts. Ids have to be
uuid-like strings.
batch_size:
How many vectors upload per-request.
Default: 64
Returns:
List of ids from adding the texts into the vectorstore.
"""
added_ids = []
for batch_ids, points in self._generate_rest_batches(
texts, metadatas, ids, batch_size
):
self.client.upsert(
collection_name=self.collection_name, points=points, **kwargs
)
added_ids.extend(batch_ids)
return added_ids
@sync_call_fallback
async def aadd_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
ids: Optional[Sequence[str]] = None,
batch_size: int = 64,
**kwargs: Any,
) -> List[str]:
"""Run more texts through the embeddings and add to the vectorstore.
Args:
texts: Iterable of strings to add to the vectorstore.
metadatas: Optional list of metadatas associated with the texts.
ids:
Optional list of ids to associate with the texts. Ids have to be
uuid-like strings.
batch_size:
How many vectors upload per-request.
Default: 64
Returns:
List of ids from adding the texts into the vectorstore.
"""
if self.async_client is None or isinstance(
self.async_client._client, AsyncQdrantLocal
):
raise NotImplementedError(
"QdrantLocal cannot interoperate with sync and async clients"
)
added_ids = []
async for batch_ids, points in self._agenerate_rest_batches(
texts, metadatas, ids, batch_size
):
await self.async_client.upsert(
collection_name=self.collection_name, points=points, **kwargs
)
added_ids.extend(batch_ids)
return added_ids
def similarity_search(
self,
query: str,
k: int = 4,
filter: Optional[MetadataFilter] = None,
search_params: Optional[models.SearchParams] = None,
offset: int = 0,
score_threshold: Optional[float] = None,
consistency: Optional[models.ReadConsistency] = None,
**kwargs: Any,
) -> List[Document]:
"""Return docs most similar to query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
filter: Filter by metadata. Defaults to None.
search_params: Additional search params
offset:
Offset of the first result to return.
May be used to paginate results.
Note: large offset values may cause performance issues.
score_threshold:
Define a minimal score threshold for the result.
If defined, less similar results will not be returned.
Score of the returned result might be higher or smaller than the
threshold depending on the Distance function used.
E.g. for cosine similarity only higher scores will be returned.
consistency:
Read consistency of the search. Defines how many replicas should be
queried before returning the result.
Values:
- int - number of replicas to query, values should present in all
queried replicas
- 'majority' - query all replicas, but return values present in the
majority of replicas
- 'quorum' - query the majority of replicas, return values present in
all of them
- 'all' - query all replicas, and return values present in all replicas
**kwargs:
Any other named arguments to pass through to QdrantClient.search()
Returns:
List of Documents most similar to the query.
"""
results = self.similarity_search_with_score(
query,
k,
filter=filter,
search_params=search_params,
offset=offset,
score_threshold=score_threshold,
consistency=consistency,
**kwargs,
)
return list(map(itemgetter(0), results))
@sync_call_fallback
async def asimilarity_search(
self,
query: str,
k: int = 4,
filter: Optional[MetadataFilter] = None,
**kwargs: Any,
) -> List[Document]:
"""Return docs most similar to query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
filter: Filter by metadata. Defaults to None.
Returns:
List of Documents most similar to the query.
"""
results = await self.asimilarity_search_with_score(query, k, filter, **kwargs)
return list(map(itemgetter(0), results))
| |
156512
|
def similarity_search_with_score(
self,
query: str,
k: int = 4,
filter: Optional[MetadataFilter] = None,
search_params: Optional[models.SearchParams] = None,
offset: int = 0,
score_threshold: Optional[float] = None,
consistency: Optional[models.ReadConsistency] = None,
**kwargs: Any,
) -> List[Tuple[Document, float]]:
"""Return docs most similar to query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
filter: Filter by metadata. Defaults to None.
search_params: Additional search params
offset:
Offset of the first result to return.
May be used to paginate results.
Note: large offset values may cause performance issues.
score_threshold:
Define a minimal score threshold for the result.
If defined, less similar results will not be returned.
Score of the returned result might be higher or smaller than the
threshold depending on the Distance function used.
E.g. for cosine similarity only higher scores will be returned.
consistency:
Read consistency of the search. Defines how many replicas should be
queried before returning the result.
Values:
- int - number of replicas to query, values should present in all
queried replicas
- 'majority' - query all replicas, but return values present in the
majority of replicas
- 'quorum' - query the majority of replicas, return values present in
all of them
- 'all' - query all replicas, and return values present in all replicas
**kwargs:
Any other named arguments to pass through to QdrantClient.search()
Returns:
List of documents most similar to the query text and distance for each.
"""
return self.similarity_search_with_score_by_vector(
self._embed_query(query),
k,
filter=filter,
search_params=search_params,
offset=offset,
score_threshold=score_threshold,
consistency=consistency,
**kwargs,
)
@sync_call_fallback
async def asimilarity_search_with_score(
self,
query: str,
k: int = 4,
filter: Optional[MetadataFilter] = None,
search_params: Optional[models.SearchParams] = None,
offset: int = 0,
score_threshold: Optional[float] = None,
consistency: Optional[models.ReadConsistency] = None,
**kwargs: Any,
) -> List[Tuple[Document, float]]:
"""Return docs most similar to query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
filter: Filter by metadata. Defaults to None.
search_params: Additional search params
offset:
Offset of the first result to return.
May be used to paginate results.
Note: large offset values may cause performance issues.
score_threshold:
Define a minimal score threshold for the result.
If defined, less similar results will not be returned.
Score of the returned result might be higher or smaller than the
threshold depending on the Distance function used.
E.g. for cosine similarity only higher scores will be returned.
consistency:
Read consistency of the search. Defines how many replicas should be
queried before returning the result.
Values:
- int - number of replicas to query, values should present in all
queried replicas
- 'majority' - query all replicas, but return values present in the
majority of replicas
- 'quorum' - query the majority of replicas, return values present in
all of them
- 'all' - query all replicas, and return values present in all replicas
**kwargs:
Any other named arguments to pass through to
AsyncQdrantClient.Search().
Returns:
List of documents most similar to the query text and distance for each.
"""
query_embedding = await self._aembed_query(query)
return await self.asimilarity_search_with_score_by_vector(
query_embedding,
k,
filter=filter,
search_params=search_params,
offset=offset,
score_threshold=score_threshold,
consistency=consistency,
**kwargs,
)
def similarity_search_by_vector(
self,
embedding: List[float],
k: int = 4,
filter: Optional[MetadataFilter] = None,
search_params: Optional[models.SearchParams] = None,
offset: int = 0,
score_threshold: Optional[float] = None,
consistency: Optional[models.ReadConsistency] = None,
**kwargs: Any,
) -> List[Document]:
"""Return docs most similar to embedding vector.
Args:
embedding: Embedding vector to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
filter: Filter by metadata. Defaults to None.
search_params: Additional search params
offset:
Offset of the first result to return.
May be used to paginate results.
Note: large offset values may cause performance issues.
score_threshold:
Define a minimal score threshold for the result.
If defined, less similar results will not be returned.
Score of the returned result might be higher or smaller than the
threshold depending on the Distance function used.
E.g. for cosine similarity only higher scores will be returned.
consistency:
Read consistency of the search. Defines how many replicas should be
queried before returning the result.
Values:
- int - number of replicas to query, values should present in all
queried replicas
- 'majority' - query all replicas, but return values present in the
majority of replicas
- 'quorum' - query the majority of replicas, return values present in
all of them
- 'all' - query all replicas, and return values present in all replicas
**kwargs:
Any other named arguments to pass through to QdrantClient.search()
Returns:
List of Documents most similar to the query.
"""
results = self.similarity_search_with_score_by_vector(
embedding,
k,
filter=filter,
search_params=search_params,
offset=offset,
score_threshold=score_threshold,
consistency=consistency,
**kwargs,
)
return list(map(itemgetter(0), results))
@sync_call_fallback
async def asimilarity_search_by_vector(
self,
embedding: List[float],
k: int = 4,
filter: Optional[MetadataFilter] = None,
search_params: Optional[models.SearchParams] = None,
offset: int = 0,
score_threshold: Optional[float] = None,
consistency: Optional[models.ReadConsistency] = None,
**kwargs: Any,
) -> List[Document]:
"""Return docs most similar to embedding vector.
Args:
embedding: Embedding vector to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
filter: Filter by metadata. Defaults to None.
search_params: Additional search params
offset:
Offset of the first result to return.
May be used to paginate results.
Note: large offset values may cause performance issues.
score_threshold:
Define a minimal score threshold for the result.
If defined, less similar results will not be returned.
Score of the returned result might be higher or smaller than the
threshold depending on the Distance function used.
E.g. for cosine similarity only higher scores will be returned.
consistency:
Read consistency of the search. Defines how many replicas should be
queried before returning the result.
Values:
- int - number of replicas to query, values should present in all
queried replicas
- 'majority' - query all replicas, but return values present in the
majority of replicas
- 'quorum' - query the majority of replicas, return values present in
all of them
- 'all' - query all replicas, and return values present in all replicas
**kwargs:
Any other named arguments to pass through to
AsyncQdrantClient.Search().
Returns:
List of Documents most similar to the query.
"""
results = await self.asimilarity_search_with_score_by_vector(
embedding,
k,
filter=filter,
search_params=search_params,
offset=offset,
score_threshold=score_threshold,
consistency=consistency,
**kwargs,
)
return list(map(itemgetter(0), results))
| |
156513
|
def similarity_search_with_score_by_vector(
self,
embedding: List[float],
k: int = 4,
filter: Optional[MetadataFilter] = None,
search_params: Optional[models.SearchParams] = None,
offset: int = 0,
score_threshold: Optional[float] = None,
consistency: Optional[models.ReadConsistency] = None,
**kwargs: Any,
) -> List[Tuple[Document, float]]:
"""Return docs most similar to embedding vector.
Args:
embedding: Embedding vector to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
filter: Filter by metadata. Defaults to None.
search_params: Additional search params
offset:
Offset of the first result to return.
May be used to paginate results.
Note: large offset values may cause performance issues.
score_threshold:
Define a minimal score threshold for the result.
If defined, less similar results will not be returned.
Score of the returned result might be higher or smaller than the
threshold depending on the Distance function used.
E.g. for cosine similarity only higher scores will be returned.
consistency:
Read consistency of the search. Defines how many replicas should be
queried before returning the result.
Values:
- int - number of replicas to query, values should present in all
queried replicas
- 'majority' - query all replicas, but return values present in the
majority of replicas
- 'quorum' - query the majority of replicas, return values present in
all of them
- 'all' - query all replicas, and return values present in all replicas
**kwargs:
Any other named arguments to pass through to QdrantClient.search()
Returns:
List of documents most similar to the query text and distance for each.
"""
if filter is not None and isinstance(filter, dict):
warnings.warn(
"Using dict as a `filter` is deprecated. Please use qdrant-client "
"filters directly: "
"https://qdrant.tech/documentation/concepts/filtering/",
DeprecationWarning,
)
qdrant_filter = self._qdrant_filter_from_dict(filter)
else:
qdrant_filter = filter
query_vector = embedding
if self.vector_name is not None:
query_vector = (self.vector_name, embedding) # type: ignore[assignment]
results = self.client.search(
collection_name=self.collection_name,
query_vector=query_vector,
query_filter=qdrant_filter,
search_params=search_params,
limit=k,
offset=offset,
with_payload=True,
with_vectors=False, # Langchain does not expect vectors to be returned
score_threshold=score_threshold,
consistency=consistency,
**kwargs,
)
return [
(
self._document_from_scored_point(
result,
self.collection_name,
self.content_payload_key,
self.metadata_payload_key,
),
result.score,
)
for result in results
]
@sync_call_fallback
async def asimilarity_search_with_score_by_vector(
self,
embedding: List[float],
k: int = 4,
filter: Optional[MetadataFilter] = None,
search_params: Optional[models.SearchParams] = None,
offset: int = 0,
score_threshold: Optional[float] = None,
consistency: Optional[models.ReadConsistency] = None,
**kwargs: Any,
) -> List[Tuple[Document, float]]:
"""Return docs most similar to embedding vector.
Args:
embedding: Embedding vector to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
filter: Filter by metadata. Defaults to None.
search_params: Additional search params
offset:
Offset of the first result to return.
May be used to paginate results.
Note: large offset values may cause performance issues.
score_threshold:
Define a minimal score threshold for the result.
If defined, less similar results will not be returned.
Score of the returned result might be higher or smaller than the
threshold depending on the Distance function used.
E.g. for cosine similarity only higher scores will be returned.
consistency:
Read consistency of the search. Defines how many replicas should be
queried before returning the result.
Values:
- int - number of replicas to query, values should present in all
queried replicas
- 'majority' - query all replicas, but return values present in the
majority of replicas
- 'quorum' - query the majority of replicas, return values present in
all of them
- 'all' - query all replicas, and return values present in all replicas
**kwargs:
Any other named arguments to pass through to
AsyncQdrantClient.Search().
Returns:
List of documents most similar to the query text and distance for each.
"""
if self.async_client is None or isinstance(
self.async_client._client, AsyncQdrantLocal
):
raise NotImplementedError(
"QdrantLocal cannot interoperate with sync and async clients"
)
if filter is not None and isinstance(filter, dict):
warnings.warn(
"Using dict as a `filter` is deprecated. Please use qdrant-client "
"filters directly: "
"https://qdrant.tech/documentation/concepts/filtering/",
DeprecationWarning,
)
qdrant_filter = self._qdrant_filter_from_dict(filter)
else:
qdrant_filter = filter
query_vector = embedding
if self.vector_name is not None:
query_vector = (self.vector_name, embedding) # type: ignore[assignment]
results = await self.async_client.search(
collection_name=self.collection_name,
query_vector=query_vector,
query_filter=qdrant_filter,
search_params=search_params,
limit=k,
offset=offset,
with_payload=True,
with_vectors=False, # Langchain does not expect vectors to be returned
score_threshold=score_threshold,
consistency=consistency,
**kwargs,
)
return [
(
self._document_from_scored_point(
result,
self.collection_name,
self.content_payload_key,
self.metadata_payload_key,
),
result.score,
)
for result in results
]
def max_marginal_relevance_search(
self,
query: str,
k: int = 4,
fetch_k: int = 20,
lambda_mult: float = 0.5,
filter: Optional[MetadataFilter] = None,
search_params: Optional[models.SearchParams] = None,
score_threshold: Optional[float] = None,
consistency: Optional[models.ReadConsistency] = None,
**kwargs: Any,
) -> List[Document]:
"""Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
fetch_k: Number of Documents to fetch to pass to MMR algorithm.
Defaults to 20.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
filter: Filter by metadata. Defaults to None.
search_params: Additional search params
score_threshold:
Define a minimal score threshold for the result.
If defined, less similar results will not be returned.
Score of the returned result might be higher or smaller than the
threshold depending on the Distance function used.
E.g. for cosine similarity only higher scores will be returned.
consistency:
Read consistency of the search. Defines how many replicas should be
queried before returning the result.
Values:
- int - number of replicas to query, values should present in all
queried replicas
- 'majority' - query all replicas, but return values present in the
majority of replicas
- 'quorum' - query the majority of replicas, return values present in
all of them
- 'all' - query all replicas, and return values present in all replicas
**kwargs:
Any other named arguments to pass through to QdrantClient.search()
Returns:
List of Documents selected by maximal marginal relevance.
"""
query_embedding = self._embed_query(query)
return self.max_marginal_relevance_search_by_vector(
query_embedding,
k=k,
fetch_k=fetch_k,
lambda_mult=lambda_mult,
filter=filter,
search_params=search_params,
score_threshold=score_threshold,
consistency=consistency,
**kwargs,
)
| |
156518
|
@classmethod
def construct_instance(
cls: Type[Qdrant],
texts: List[str],
embedding: Embeddings,
location: Optional[str] = None,
url: Optional[str] = None,
port: Optional[int] = 6333,
grpc_port: int = 6334,
prefer_grpc: bool = False,
https: Optional[bool] = None,
api_key: Optional[str] = None,
prefix: Optional[str] = None,
timeout: Optional[int] = None,
host: Optional[str] = None,
path: Optional[str] = None,
collection_name: Optional[str] = None,
distance_func: str = "Cosine",
content_payload_key: str = CONTENT_KEY,
metadata_payload_key: str = METADATA_KEY,
vector_name: Optional[str] = VECTOR_NAME,
shard_number: Optional[int] = None,
replication_factor: Optional[int] = None,
write_consistency_factor: Optional[int] = None,
on_disk_payload: Optional[bool] = None,
hnsw_config: Optional[models.HnswConfigDiff] = None,
optimizers_config: Optional[models.OptimizersConfigDiff] = None,
wal_config: Optional[models.WalConfigDiff] = None,
quantization_config: Optional[models.QuantizationConfig] = None,
init_from: Optional[models.InitFrom] = None,
on_disk: Optional[bool] = None,
force_recreate: bool = False,
**kwargs: Any,
) -> Qdrant:
# Just do a single quick embedding to get vector size
partial_embeddings = embedding.embed_documents(texts[:1])
vector_size = len(partial_embeddings[0])
collection_name = collection_name or uuid.uuid4().hex
distance_func = distance_func.upper()
client, async_client = cls._generate_clients(
location=location,
url=url,
port=port,
grpc_port=grpc_port,
prefer_grpc=prefer_grpc,
https=https,
api_key=api_key,
prefix=prefix,
timeout=timeout,
host=host,
path=path,
**kwargs,
)
collection_exists = client.collection_exists(collection_name)
if collection_exists and force_recreate:
client.delete_collection(collection_name)
collection_exists = False
if collection_exists:
# Get the vector configuration of the existing collection and vector, if it
# was specified. If the old configuration does not match the current one,
# an exception is raised.
collection_info = client.get_collection(collection_name=collection_name)
current_vector_config = collection_info.config.params.vectors
if isinstance(current_vector_config, dict) and vector_name is not None:
if vector_name not in current_vector_config:
raise QdrantException(
f"Existing Qdrant collection {collection_name} does not "
f"contain vector named {vector_name}. Did you mean one of the "
f"existing vectors: {', '.join(current_vector_config.keys())}? "
f"If you want to recreate the collection, set `force_recreate` "
f"parameter to `True`."
)
current_vector_config = current_vector_config.get(vector_name) # type: ignore[assignment]
elif isinstance(current_vector_config, dict) and vector_name is None:
raise QdrantException(
f"Existing Qdrant collection {collection_name} uses named vectors. "
f"If you want to reuse it, please set `vector_name` to any of the "
f"existing named vectors: "
f"{', '.join(current_vector_config.keys())}."
f"If you want to recreate the collection, set `force_recreate` "
f"parameter to `True`."
)
elif (
not isinstance(current_vector_config, dict) and vector_name is not None
):
raise QdrantException(
f"Existing Qdrant collection {collection_name} doesn't use named "
f"vectors. If you want to reuse it, please set `vector_name` to "
f"`None`. If you want to recreate the collection, set "
f"`force_recreate` parameter to `True`."
)
assert isinstance(current_vector_config, models.VectorParams), (
"Expected current_vector_config to be an instance of "
f"models.VectorParams, but got {type(current_vector_config)}"
)
# Check if the vector configuration has the same dimensionality.
if current_vector_config.size != vector_size:
raise QdrantException(
f"Existing Qdrant collection is configured for vectors with "
f"{current_vector_config.size} "
f"dimensions. Selected embeddings are {vector_size}-dimensional. "
f"If you want to recreate the collection, set `force_recreate` "
f"parameter to `True`."
)
current_distance_func = (
current_vector_config.distance.name.upper() # type: ignore[union-attr]
)
if current_distance_func != distance_func:
raise QdrantException(
f"Existing Qdrant collection is configured for "
f"{current_distance_func} similarity, but requested "
f"{distance_func}. Please set `distance_func` parameter to "
f"`{current_distance_func}` if you want to reuse it. "
f"If you want to recreate the collection, set `force_recreate` "
f"parameter to `True`."
)
else:
vectors_config = models.VectorParams(
size=vector_size,
distance=models.Distance[distance_func],
on_disk=on_disk,
)
# If vector name was provided, we're going to use the named vectors feature
# with just a single vector.
if vector_name is not None:
vectors_config = { # type: ignore[assignment]
vector_name: vectors_config,
}
client.create_collection(
collection_name=collection_name,
vectors_config=vectors_config,
shard_number=shard_number,
replication_factor=replication_factor,
write_consistency_factor=write_consistency_factor,
on_disk_payload=on_disk_payload,
hnsw_config=hnsw_config,
optimizers_config=optimizers_config,
wal_config=wal_config,
quantization_config=quantization_config,
init_from=init_from,
timeout=timeout, # type: ignore[arg-type]
)
qdrant = cls(
client=client,
collection_name=collection_name,
embeddings=embedding,
content_payload_key=content_payload_key,
metadata_payload_key=metadata_payload_key,
distance_strategy=distance_func,
vector_name=vector_name,
async_client=async_client,
)
return qdrant
| |
156523
|
class QdrantVectorStore(VectorStore):
"""Qdrant vector store integration.
Setup:
Install ``langchain-qdrant`` package.
.. code-block:: bash
pip install -qU langchain-qdrant
Key init args — indexing params:
collection_name: str
Name of the collection.
embedding: Embeddings
Embedding function to use.
sparse_embedding: SparseEmbeddings
Optional sparse embedding function to use.
Key init args — client params:
client: QdrantClient
Qdrant client to use.
retrieval_mode: RetrievalMode
Retrieval mode to use.
Instantiate:
.. code-block:: python
from langchain_qdrant import QdrantVectorStore
from qdrant_client import QdrantClient
from qdrant_client.http.models import Distance, VectorParams
from langchain_openai import OpenAIEmbeddings
client = QdrantClient(":memory:")
client.create_collection(
collection_name="demo_collection",
vectors_config=VectorParams(size=1536, distance=Distance.COSINE),
)
vector_store = QdrantVectorStore(
client=client,
collection_name="demo_collection",
embedding=OpenAIEmbeddings(),
)
Add Documents:
.. code-block:: python
from langchain_core.documents import Document
from uuid import uuid4
document_1 = Document(page_content="foo", metadata={"baz": "bar"})
document_2 = Document(page_content="thud", metadata={"bar": "baz"})
document_3 = Document(page_content="i will be deleted :(")
documents = [document_1, document_2, document_3]
ids = [str(uuid4()) for _ in range(len(documents))]
vector_store.add_documents(documents=documents, ids=ids)
Delete Documents:
.. code-block:: python
vector_store.delete(ids=[ids[-1]])
Search:
.. code-block:: python
results = vector_store.similarity_search(query="thud",k=1)
for doc in results:
print(f"* {doc.page_content} [{doc.metadata}]")
.. code-block:: python
* thud [{'bar': 'baz', '_id': '0d706099-6dd9-412a-9df6-a71043e020de', '_collection_name': 'demo_collection'}]
Search with filter:
.. code-block:: python
from qdrant_client.http import models
results = vector_store.similarity_search(query="thud",k=1,filter=models.Filter(must=[models.FieldCondition(key="metadata.bar", match=models.MatchValue(value="baz"),)]))
for doc in results:
print(f"* {doc.page_content} [{doc.metadata}]")
.. code-block:: python
* thud [{'bar': 'baz', '_id': '0d706099-6dd9-412a-9df6-a71043e020de', '_collection_name': 'demo_collection'}]
Search with score:
.. code-block:: python
results = vector_store.similarity_search_with_score(query="qux",k=1)
for doc, score in results:
print(f"* [SIM={score:3f}] {doc.page_content} [{doc.metadata}]")
.. code-block:: python
* [SIM=0.832268] foo [{'baz': 'bar', '_id': '44ec7094-b061-45ac-8fbf-014b0f18e8aa', '_collection_name': 'demo_collection'}]
Async:
.. code-block:: python
# add documents
# await vector_store.aadd_documents(documents=documents, ids=ids)
# delete documents
# await vector_store.adelete(ids=["3"])
# search
# results = vector_store.asimilarity_search(query="thud",k=1)
# search with score
results = await vector_store.asimilarity_search_with_score(query="qux",k=1)
for doc,score in results:
print(f"* [SIM={score:3f}] {doc.page_content} [{doc.metadata}]")
.. code-block:: python
* [SIM=0.832268] foo [{'baz': 'bar', '_id': '44ec7094-b061-45ac-8fbf-014b0f18e8aa', '_collection_name': 'demo_collection'}]
Use as Retriever:
.. code-block:: python
retriever = vector_store.as_retriever(
search_type="mmr",
search_kwargs={"k": 1, "fetch_k": 2, "lambda_mult": 0.5},
)
retriever.invoke("thud")
.. code-block:: python
[Document(metadata={'bar': 'baz', '_id': '0d706099-6dd9-412a-9df6-a71043e020de', '_collection_name': 'demo_collection'}, page_content='thud')]
""" # noqa: E501
CONTENT_KEY: str = "page_content"
METADATA_KEY: str = "metadata"
VECTOR_NAME: str = "" # The default/unnamed vector - https://qdrant.tech/documentation/concepts/collections/#create-a-collection
SPARSE_VECTOR_NAME: str = "langchain-sparse"
def __init__(
self,
client: QdrantClient,
collection_name: str,
embedding: Optional[Embeddings] = None,
retrieval_mode: RetrievalMode = RetrievalMode.DENSE,
vector_name: str = VECTOR_NAME,
content_payload_key: str = CONTENT_KEY,
metadata_payload_key: str = METADATA_KEY,
distance: models.Distance = models.Distance.COSINE,
sparse_embedding: Optional[SparseEmbeddings] = None,
sparse_vector_name: str = SPARSE_VECTOR_NAME,
validate_embeddings: bool = True,
validate_collection_config: bool = True,
):
"""Initialize a new instance of `QdrantVectorStore`.
Example:
.. code-block:: python
qdrant = Qdrant(
client=client,
collection_name="my-collection",
embedding=OpenAIEmbeddings(),
retrieval_mode=RetrievalMode.HYBRID,
sparse_embedding=FastEmbedSparse(),
)
"""
if validate_embeddings:
self._validate_embeddings(retrieval_mode, embedding, sparse_embedding)
if validate_collection_config:
self._validate_collection_config(
client,
collection_name,
retrieval_mode,
vector_name,
sparse_vector_name,
distance,
embedding,
)
self._client = client
self.collection_name = collection_name
self._embeddings = embedding
self.retrieval_mode = retrieval_mode
self.vector_name = vector_name
self.content_payload_key = content_payload_key
self.metadata_payload_key = metadata_payload_key
self.distance = distance
self._sparse_embeddings = sparse_embedding
self.sparse_vector_name = sparse_vector_name
@property
def client(self) -> QdrantClient:
"""Get the Qdrant client instance that is being used.
Returns:
QdrantClient: An instance of `QdrantClient`.
"""
return self._client
@property
def embeddings(self) -> Embeddings:
"""Get the dense embeddings instance that is being used.
Raises:
ValueError: If embeddings are `None`.
Returns:
Embeddings: An instance of `Embeddings`.
"""
if self._embeddings is None:
raise ValueError(
"Embeddings are `None`. Please set using the `embedding` parameter."
)
return self._embeddings
@property
def sparse_embeddings(self) -> SparseEmbeddings:
"""Get the sparse embeddings instance that is being used.
Raises:
ValueError: If sparse embeddings are `None`.
Returns:
SparseEmbeddings: An instance of `SparseEmbeddings`.
"""
if self._sparse_embeddings is None:
raise ValueError(
"Sparse embeddings are `None`. "
"Please set using the `sparse_embedding` parameter."
)
return self._sparse_embeddings
| |
156527
|
ssmethod
def _validate_collection_for_dense(
cls: Type[QdrantVectorStore],
client: QdrantClient,
collection_name: str,
vector_name: str,
distance: models.Distance,
dense_embeddings: Union[Embeddings, List[float], None],
) -> None:
collection_info = client.get_collection(collection_name=collection_name)
vector_config = collection_info.config.params.vectors
if isinstance(vector_config, Dict):
# vector_config is a Dict[str, VectorParams]
if vector_name not in vector_config:
raise QdrantVectorStoreError(
f"Existing Qdrant collection {collection_name} does not "
f"contain dense vector named {vector_name}. "
"Did you mean one of the "
f"existing vectors: {', '.join(vector_config.keys())}? " # type: ignore
f"If you want to recreate the collection, set `force_recreate` "
f"parameter to `True`."
)
# Get the VectorParams object for the specified vector_name
vector_config = vector_config[vector_name] # type: ignore
else:
# vector_config is an instance of VectorParams
# Case of a collection with single/unnamed vector.
if vector_name != "":
raise QdrantVectorStoreError(
f"Existing Qdrant collection {collection_name} is built "
"with unnamed dense vector. "
f"If you want to reuse it, set `vector_name` to ''(empty string)."
f"If you want to recreate the collection, "
"set `force_recreate` to `True`."
)
assert vector_config is not None, "VectorParams is None"
if isinstance(dense_embeddings, Embeddings):
vector_size = len(dense_embeddings.embed_documents(["dummy_text"])[0])
elif isinstance(dense_embeddings, list):
vector_size = len(dense_embeddings)
else:
raise ValueError("Invalid `embeddings` type.")
if vector_config.size != vector_size:
raise QdrantVectorStoreError(
f"Existing Qdrant collection is configured for dense vectors with "
f"{vector_config.size} dimensions. "
f"Selected embeddings are {vector_size}-dimensional. "
f"If you want to recreate the collection, set `force_recreate` "
f"parameter to `True`."
)
if vector_config.distance != distance:
raise QdrantVectorStoreError(
f"Existing Qdrant collection is configured for "
f"{vector_config.distance.name} similarity, but requested "
f"{distance.upper()}. Please set `distance` parameter to "
f"`{vector_config.distance.name}` if you want to reuse it. "
f"If you want to recreate the collection, set `force_recreate` "
f"parameter to `True`."
)
@classmethod
def _validate_collection_for_sparse(
cls: Type[QdrantVectorStore],
client: QdrantClient,
collection_name: str,
sparse_vector_name: str,
) -> None:
collection_info = client.get_collection(collection_name=collection_name)
sparse_vector_config = collection_info.config.params.sparse_vectors
if (
sparse_vector_config is None
or sparse_vector_name not in sparse_vector_config
):
raise QdrantVectorStoreError(
f"Existing Qdrant collection {collection_name} does not "
f"contain sparse vectors named {sparse_vector_config}. "
f"If you want to recreate the collection, set `force_recreate` "
f"parameter to `True`."
)
@classmethod
def _validate_embeddings(
cls: Type[QdrantVectorStore],
retrieval_mode: RetrievalMode,
embedding: Optional[Embeddings],
sparse_embedding: Optional[SparseEmbeddings],
) -> None:
if retrieval_mode == RetrievalMode.DENSE and embedding is None:
raise ValueError(
"'embedding' cannot be None when retrieval mode is 'dense'"
)
elif retrieval_mode == RetrievalMode.SPARSE and sparse_embedding is None:
raise ValueError(
"'sparse_embedding' cannot be None when retrieval mode is 'sparse'"
)
elif retrieval_mode == RetrievalMode.HYBRID and any(
[embedding is None, sparse_embedding is None]
):
raise ValueError(
"Both 'embedding' and 'sparse_embedding' cannot be None "
"when retrieval mode is 'hybrid'"
)
| |
156554
|
import pytest
from langchain_core.documents import Document
from qdrant_client import models
from langchain_qdrant import QdrantVectorStore, RetrievalMode
from tests.integration_tests.common import (
ConsistentFakeEmbeddings,
ConsistentFakeSparseEmbeddings,
assert_documents_equals,
)
from tests.integration_tests.fixtures import qdrant_locations, retrieval_modes
@pytest.mark.parametrize("location", qdrant_locations())
@pytest.mark.parametrize("vector_name", ["", "my-vector"])
@pytest.mark.parametrize("retrieval_mode", retrieval_modes())
@pytest.mark.parametrize("batch_size", [1, 64])
def test_similarity_search(
location: str,
vector_name: str,
retrieval_mode: RetrievalMode,
batch_size: int,
) -> None:
"""Test end to end construction and search."""
texts = ["foo", "bar", "baz"]
docsearch = QdrantVectorStore.from_texts(
texts,
ConsistentFakeEmbeddings(),
location=location,
batch_size=batch_size,
vector_name=vector_name,
retrieval_mode=retrieval_mode,
sparse_embedding=ConsistentFakeSparseEmbeddings(),
)
output = docsearch.similarity_search("foo", k=1)
assert_documents_equals(actual=output, expected=[Document(page_content="foo")])
@pytest.mark.parametrize("location", qdrant_locations())
@pytest.mark.parametrize("content_payload_key", [QdrantVectorStore.CONTENT_KEY, "foo"])
@pytest.mark.parametrize(
"metadata_payload_key", [QdrantVectorStore.METADATA_KEY, "bar"]
)
@pytest.mark.parametrize("vector_name", ["", "my-vector"])
@pytest.mark.parametrize("batch_size", [1, 64])
def test_similarity_search_by_vector(
location: str,
content_payload_key: str,
metadata_payload_key: str,
vector_name: str,
batch_size: int,
) -> None:
"""Test end to end construction and search."""
texts = ["foo", "bar", "baz"]
docsearch = QdrantVectorStore.from_texts(
texts,
ConsistentFakeEmbeddings(),
location=location,
content_payload_key=content_payload_key,
metadata_payload_key=metadata_payload_key,
batch_size=batch_size,
vector_name=vector_name,
)
embeddings = ConsistentFakeEmbeddings().embed_query("foo")
output = docsearch.similarity_search_by_vector(embeddings, k=1)
assert_documents_equals(output, [Document(page_content="foo")])
@pytest.mark.parametrize("location", qdrant_locations())
@pytest.mark.parametrize(
"metadata_payload_key", [QdrantVectorStore.METADATA_KEY, "bar"]
)
@pytest.mark.parametrize("retrieval_mode", retrieval_modes())
def test_similarity_search_filters(
location: str,
metadata_payload_key: str,
retrieval_mode: RetrievalMode,
) -> None:
"""Test end to end construction and search."""
texts = ["foo", "bar", "baz"]
metadatas = [
{"page": i, "metadata": {"page": i + 1, "pages": [i + 2, -1]}}
for i in range(len(texts))
]
docsearch = QdrantVectorStore.from_texts(
texts,
ConsistentFakeEmbeddings(),
metadatas=metadatas,
location=location,
metadata_payload_key=metadata_payload_key,
retrieval_mode=retrieval_mode,
sparse_embedding=ConsistentFakeSparseEmbeddings(),
)
qdrant_filter = models.Filter(
must=[
models.FieldCondition(
key=f"{metadata_payload_key}.page", match=models.MatchValue(value=1)
)
]
)
output = docsearch.similarity_search("foo", k=1, filter=qdrant_filter)
assert_documents_equals(
actual=output,
expected=[
Document(
page_content="bar",
metadata={"page": 1, "metadata": {"page": 2, "pages": [3, -1]}},
)
],
)
@pytest.mark.parametrize("location", qdrant_locations())
@pytest.mark.parametrize("vector_name", ["", "my-vector"])
def test_similarity_relevance_search_no_threshold(
location: str,
vector_name: str,
) -> None:
"""Test end to end construction and search."""
texts = ["foo", "bar", "baz"]
metadatas = [
{"page": i, "metadata": {"page": i + 1, "pages": [i + 2, -1]}}
for i in range(len(texts))
]
docsearch = QdrantVectorStore.from_texts(
texts,
ConsistentFakeEmbeddings(),
metadatas=metadatas,
location=location,
vector_name=vector_name,
)
output = docsearch.similarity_search_with_relevance_scores(
"foo", k=3, score_threshold=None
)
assert len(output) == 3
for i in range(len(output)):
assert round(output[i][1], 2) >= 0
assert round(output[i][1], 2) <= 1
@pytest.mark.parametrize("location", qdrant_locations())
@pytest.mark.parametrize("vector_name", ["", "my-vector"])
def test_relevance_search_with_threshold(
location: str,
vector_name: str,
) -> None:
"""Test end to end construction and search."""
texts = ["foo", "bar", "baz"]
metadatas = [
{"page": i, "metadata": {"page": i + 1, "pages": [i + 2, -1]}}
for i in range(len(texts))
]
docsearch = QdrantVectorStore.from_texts(
texts,
ConsistentFakeEmbeddings(),
metadatas=metadatas,
location=location,
vector_name=vector_name,
)
score_threshold = 0.99
kwargs = {"score_threshold": score_threshold}
output = docsearch.similarity_search_with_relevance_scores("foo", k=3, **kwargs)
assert len(output) == 1
assert all([score >= score_threshold for _, score in output])
@pytest.mark.parametrize("location", qdrant_locations())
@pytest.mark.parametrize("content_payload_key", [QdrantVectorStore.CONTENT_KEY, "foo"])
@pytest.mark.parametrize(
"metadata_payload_key", [QdrantVectorStore.METADATA_KEY, "bar"]
)
@pytest.mark.parametrize("vector_name", ["", "my-vector"])
def test_relevance_search_with_threshold_and_filter(
location: str,
content_payload_key: str,
metadata_payload_key: str,
vector_name: str,
) -> None:
"""Test end to end construction and search."""
texts = ["foo", "bar", "baz"]
metadatas = [
{"page": i, "metadata": {"page": i + 1, "pages": [i + 2, -1]}}
for i in range(len(texts))
]
docsearch = QdrantVectorStore.from_texts(
texts,
ConsistentFakeEmbeddings(),
metadatas=metadatas,
location=location,
content_payload_key=content_payload_key,
metadata_payload_key=metadata_payload_key,
vector_name=vector_name,
)
score_threshold = 0.99 # for almost exact match
negative_filter = models.Filter(
must=[
models.FieldCondition(
key=f"{metadata_payload_key}.page", match=models.MatchValue(value=1)
)
]
)
kwargs = {"filter": negative_filter, "score_threshold": score_threshold}
output = docsearch.similarity_search_with_relevance_scores("foo", k=3, **kwargs)
assert len(output) == 0
positive_filter = models.Filter(
must=[
models.FieldCondition(
key=f"{metadata_payload_key}.page", match=models.MatchValue(value=0)
)
]
)
kwargs = {"filter": positive_filter, "score_threshold": score_threshold}
output = docsearch.similarity_search_with_relevance_scores("foo", k=3, **kwargs)
assert len(output) == 1
assert all([score >= score_threshold for _, score in output])
| |
156568
|
{file = "aiohttp-3.9.5-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:8cf142aa6c1a751fcb364158fd710b8a9be874b81889c2bd13aa8893197455e2"},
{file = "aiohttp-3.9.5-cp39-cp39-win32.whl", hash = "sha256:7b179eea70833c8dee51ec42f3b4097bd6370892fa93f510f76762105568cf09"},
{file = "aiohttp-3.9.5-cp39-cp39-win_amd64.whl", hash = "sha256:38d80498e2e169bc61418ff36170e0aad0cd268da8b38a17c4cf29d254a8b3f1"},
{file = "aiohttp-3.9.5.tar.gz", hash = "sha256:edea7d15772ceeb29db4aff55e482d4bcfb6ae160ce144f2682de02f6d693551"},
]
[package.dependencies]
aiosignal = ">=1.1.2"
async-timeout = {version = ">=4.0,<5.0", markers = "python_version < \"3.11\""}
attrs = ">=17.3.0"
frozenlist = ">=1.1.1"
multidict = ">=4.5,<7.0"
yarl = ">=1.0,<2.0"
[package.extras]
speedups = ["Brotli", "aiodns", "brotlicffi"]
[[package]]
name = "aiosignal"
version = "1.3.1"
description = "aiosignal: a list of registered asynchronous callbacks"
optional = false
python-versions = ">=3.7"
files = [
{file = "aiosignal-1.3.1-py3-none-any.whl", hash = "sha256:f8376fb07dd1e86a584e4fcdec80b36b7f81aac666ebc724e2c090300dd83b17"},
{file = "aiosignal-1.3.1.tar.gz", hash = "sha256:54cd96e15e1649b75d6c87526a6ff0b6c1b0dd3459f43d9ca11d48c339b68cfc"},
]
[package.dependencies]
frozenlist = ">=1.1.0"
[[package]]
name = "airbyte"
version = "0.7.6"
description = "PyAirbyte"
optional = false
python-versions = ">=3.9,<4.0"
files = [
{file = "airbyte-0.7.6-py3-none-any.whl", hash = "sha256:ed0486533732d600955dfe8f54b7009edbdde1114974679fa83fb5c66d9c91d8"},
{file = "airbyte-0.7.6.tar.gz", hash = "sha256:e4b40f97652ca8b6fd25a87be33bfa72098c0d062b5bc70d25f576e663665dca"},
]
[package.dependencies]
airbyte-cdk = ">=0.58.3,<0.59.0"
duckdb = "0.9.2"
duckdb-engine = "0.9.2"
google-auth = ">=2.27.0,<3.0"
google-cloud-bigquery = ">=3.12.0,<4.0"
jsonschema = ">=3.2.0,<5.0"
orjson = ">=3.9.10,<4.0.0"
overrides = ">=7.4.0,<8.0.0"
pandas = ">=1.5.3,<=2.1.4"
pendulum = "<=3.0.0"
psycopg2-binary = ">=2.9.9,<3.0.0"
pydantic = "<=2.0"
python-dotenv = ">=1.0.1,<2.0.0"
python-ulid = ">=2.2.0,<3.0.0"
requests = ">=2.31.0,<3.0.0"
rich = ">=13.7.0,<14.0.0"
snowflake-connector-python = "3.6.0"
snowflake-sqlalchemy = ">=1.5.1,<2.0.0"
sqlalchemy = "1.4.51"
sqlalchemy-bigquery = {version = "1.9.0", markers = "python_version < \"3.13\""}
types-pyyaml = ">=6.0.12.12,<7.0.0.0"
ulid = ">=1.1,<2.0"
[[package]]
name = "airbyte-cdk"
version = "0.58.9"
description = "A framework for writing Airbyte Connectors."
optional = false
python-versions = ">=3.8"
files = [
{file = "airbyte-cdk-0.58.9.tar.gz", hash = "sha256:e749bd4aab0911bd93c710e3ab2fcdde45d7a0bed2c0032d873006d3df701478"},
{file = "airbyte_cdk-0.58.9-py3-none-any.whl", hash = "sha256:45dfbac2d0ae86dd5872c07c140ce16be8481452b7b8f65b228bc9f892843871"},
]
[package.dependencies]
airbyte-protocol-models = "0.5.1"
backoff = "*"
cachetools = "*"
Deprecated = ">=1.2,<2.0"
dpath = ">=2.0.1,<2.1.0"
genson = "1.2.2"
isodate = ">=0.6.1,<0.7.0"
Jinja2 = ">=3.1.2,<3.2.0"
jsonref = ">=0.2,<1.0"
jsonschema = ">=3.2.0,<3.3.0"
pendulum = "<3.0.0"
pydantic = ">=1.10.8,<2.0.0"
pyrate-limiter = ">=3.1.0,<3.2.0"
python-dateutil = "*"
PyYAML = ">=6.0.1"
requests = "*"
requests-cache = "*"
wcmatch = "8.4"
[package.extras]
dev = ["avro (>=1.11.2,<1.12.0)", "cohere (==4.21)", "fastavro (>=1.8.0,<1.9.0)", "freezegun", "langchain (==0.0.271)", "markdown", "mypy", "openai[embeddings] (==0.27.9)", "pandas (==2.0.3)", "pdf2image (==1.16.3)", "pdfminer.six (==20221105)", "pyarrow (==12.0.1)", "pytesseract (==0.3.10)", "pytest", "pytest-cov", "pytest-httpserver", "pytest-mock", "requests-mock", "tiktoken (==0.4.0)", "unstructured (==0.10.27)", "unstructured.pytesseract (>=0.3.12)", "unstructured[docx,pptx] (==0.10.27)"]
file-based = ["avro (>=1.11.2,<1.12.0)", "fastavro (>=1.8.0,<1.9.0)", "markdown", "pdf2image (==1.16.3)", "pdfminer.six (==20221105)", "pyarrow (==12.0.1)", "pytesseract (==0.3.10)", "unstructured (==0.10.27)", "unstructured.pytesseract (>=0.3.12)", "unstructured[docx,pptx] (==0.10.27)"]
sphinx-docs = ["Sphinx (>=4.2,<5.0)", "sphinx-rtd-theme (>=1.0,<2.0)"]
vector-db-based = ["cohere (==4.21)", "langchain (==0.0.271)", "openai[embeddings] (==0.27.9)", "tiktoken (==0.4.0)"]
| |
156705
|
Anthropic chat models.
See https://docs.anthropic.com/en/docs/models-overview for a list of the latest models.
Setup:
Install ``langchain-anthropic`` and set environment variable ``ANTHROPIC_API_KEY``.
.. code-block:: bash
pip install -U langchain-anthropic
export ANTHROPIC_API_KEY="your-api-key"
Key init args — completion params:
model: str
Name of Anthropic model to use. E.g. "claude-3-sonnet-20240229".
temperature: float
Sampling temperature. Ranges from 0.0 to 1.0.
max_tokens: int
Max number of tokens to generate.
Key init args — client params:
timeout: Optional[float]
Timeout for requests.
max_retries: int
Max number of retries if a request fails.
api_key: Optional[str]
Anthropic API key. If not passed in will be read from env var ANTHROPIC_API_KEY.
base_url: Optional[str]
Base URL for API requests. Only specify if using a proxy or service
emulator.
See full list of supported init args and their descriptions in the params section.
Instantiate:
.. code-block:: python
from langchain_anthropic import ChatAnthropic
llm = ChatAnthropic(
model="claude-3-sonnet-20240229",
temperature=0,
max_tokens=1024,
timeout=None,
max_retries=2,
# api_key="...",
# base_url="...",
# other params...
)
**NOTE**: Any param which is not explicitly supported will be passed directly to the
``anthropic.Anthropic.messages.create(...)`` API every time to the model is
invoked. For example:
.. code-block:: python
from langchain_anthropic import ChatAnthropic
import anthropic
ChatAnthropic(..., extra_headers={}).invoke(...)
# results in underlying API call of:
anthropic.Anthropic(..).messages.create(..., extra_headers={})
# which is also equivalent to:
ChatAnthropic(...).invoke(..., extra_headers={})
Invoke:
.. code-block:: python
messages = [
("system", "You are a helpful translator. Translate the user sentence to French."),
("human", "I love programming."),
]
llm.invoke(messages)
.. code-block:: python
AIMessage(content="J'aime la programmation.", response_metadata={'id': 'msg_01Trik66aiQ9Z1higrD5XFx3', 'model': 'claude-3-sonnet-20240229', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 25, 'output_tokens': 11}}, id='run-5886ac5f-3c2e-49f5-8a44-b1e92808c929-0', usage_metadata={'input_tokens': 25, 'output_tokens': 11, 'total_tokens': 36})
Stream:
.. code-block:: python
for chunk in llm.stream(messages):
print(chunk)
.. code-block:: python
AIMessageChunk(content='J', id='run-272ff5f9-8485-402c-b90d-eac8babc5b25')
AIMessageChunk(content="'", id='run-272ff5f9-8485-402c-b90d-eac8babc5b25')
AIMessageChunk(content='a', id='run-272ff5f9-8485-402c-b90d-eac8babc5b25')
AIMessageChunk(content='ime', id='run-272ff5f9-8485-402c-b90d-eac8babc5b25')
AIMessageChunk(content=' la', id='run-272ff5f9-8485-402c-b90d-eac8babc5b25')
AIMessageChunk(content=' programm', id='run-272ff5f9-8485-402c-b90d-eac8babc5b25')
AIMessageChunk(content='ation', id='run-272ff5f9-8485-402c-b90d-eac8babc5b25')
AIMessageChunk(content='.', id='run-272ff5f9-8485-402c-b90d-eac8babc5b25')
.. code-block:: python
stream = llm.stream(messages)
full = next(stream)
for chunk in stream:
full += chunk
full
.. code-block:: python
AIMessageChunk(content="J'aime la programmation.", id='run-b34faef0-882f-4869-a19c-ed2b856e6361')
Async:
.. code-block:: python
await llm.ainvoke(messages)
# stream:
# async for chunk in (await llm.astream(messages))
# batch:
# await llm.abatch([messages])
.. code-block:: python
AIMessage(content="J'aime la programmation.", response_metadata={'id': 'msg_01Trik66aiQ9Z1higrD5XFx3', 'model': 'claude-3-sonnet-20240229', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 25, 'output_tokens': 11}}, id='run-5886ac5f-3c2e-49f5-8a44-b1e92808c929-0', usage_metadata={'input_tokens': 25, 'output_tokens': 11, 'total_tokens': 36})
Tool calling:
.. code-block:: python
from pydantic import BaseModel, Field
class GetWeather(BaseModel):
'''Get the current weather in a given location'''
location: str = Field(..., description="The city and state, e.g. San Francisco, CA")
class GetPopulation(BaseModel):
'''Get the current population in a given location'''
location: str = Field(..., description="The city and state, e.g. San Francisco, CA")
llm_with_tools = llm.bind_tools([GetWeather, GetPopulation])
ai_msg = llm_with_tools.invoke("Which city is hotter today and which is bigger: LA or NY?")
ai_msg.tool_calls
.. code-block:: python
[{'name': 'GetWeather',
'args': {'location': 'Los Angeles, CA'},
'id': 'toolu_01KzpPEAgzura7hpBqwHbWdo'},
{'name': 'GetWeather',
'args': {'location': 'New York, NY'},
'id': 'toolu_01JtgbVGVJbiSwtZk3Uycezx'},
{'name': 'GetPopulation',
'args': {'location': 'Los Angeles, CA'},
'id': 'toolu_01429aygngesudV9nTbCKGuw'},
{'name': 'GetPopulation',
'args': {'location': 'New York, NY'},
'id': 'toolu_01JPktyd44tVMeBcPPnFSEJG'}]
See ``ChatAnthropic.bind_tools()`` method for more.
Structured output:
.. code-block:: python
from typing import Optional
from pydantic import BaseModel, Field
class Joke(BaseModel):
'''Joke to tell user.'''
setup: str = Field(description="The setup of the joke")
punchline: str = Field(description="The punchline to the joke")
rating: Optional[int] = Field(description="How funny the joke is, from 1 to 10")
structured_llm = llm.with_structured_output(Joke)
structured_llm.invoke("Tell me a joke about cats")
.. code-block:: python
Joke(setup='Why was the cat sitting on the computer?', punchline='To keep an eye on the mouse!', rating=None)
See ``ChatAnthropic.with_structured_output()`` for more.
Image input:
.. code-block:: python
import base64
import httpx
from langchain_core.messages import HumanMessage
image_url = "https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg"
image_data = base64.b64encode(httpx.get(image_url).content).decode("utf-8")
message = HumanMessage(
content=[
{"type": "text", "text": "describe the weather in this image"},
{
"type": "image_url",
"image_url": {"url": f""},
},
],
)
ai_msg = llm.invoke([message])
ai_msg.content
.. code-block:: python
| |
156830
|
from typing import Any, Dict, List, Optional # type: ignore[import-not-found]
from langchain_core.embeddings import Embeddings
from pydantic import BaseModel, ConfigDict, Field
DEFAULT_MODEL_NAME = "sentence-transformers/all-mpnet-base-v2"
class HuggingFaceEmbeddings(BaseModel, Embeddings):
"""HuggingFace sentence_transformers embedding models.
To use, you should have the ``sentence_transformers`` python package installed.
Example:
.. code-block:: python
from langchain_huggingface import HuggingFaceEmbeddings
model_name = "sentence-transformers/all-mpnet-base-v2"
model_kwargs = {'device': 'cpu'}
encode_kwargs = {'normalize_embeddings': False}
hf = HuggingFaceEmbeddings(
model_name=model_name,
model_kwargs=model_kwargs,
encode_kwargs=encode_kwargs
)
"""
client: Any = None #: :meta private:
model_name: str = DEFAULT_MODEL_NAME
"""Model name to use."""
cache_folder: Optional[str] = None
"""Path to store models.
Can be also set by SENTENCE_TRANSFORMERS_HOME environment variable."""
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
"""Keyword arguments to pass to the Sentence Transformer model, such as `device`,
`prompts`, `default_prompt_name`, `revision`, `trust_remote_code`, or `token`.
See also the Sentence Transformer documentation: https://sbert.net/docs/package_reference/SentenceTransformer.html#sentence_transformers.SentenceTransformer"""
encode_kwargs: Dict[str, Any] = Field(default_factory=dict)
"""Keyword arguments to pass when calling the `encode` method of the Sentence
Transformer model, such as `prompt_name`, `prompt`, `batch_size`, `precision`,
`normalize_embeddings`, and more.
See also the Sentence Transformer documentation: https://sbert.net/docs/package_reference/SentenceTransformer.html#sentence_transformers.SentenceTransformer.encode"""
multi_process: bool = False
"""Run encode() on multiple GPUs."""
show_progress: bool = False
"""Whether to show a progress bar."""
def __init__(self, **kwargs: Any):
"""Initialize the sentence_transformer."""
super().__init__(**kwargs)
try:
import sentence_transformers # type: ignore[import]
except ImportError as exc:
raise ImportError(
"Could not import sentence_transformers python package. "
"Please install it with `pip install sentence-transformers`."
) from exc
self.client = sentence_transformers.SentenceTransformer(
self.model_name, cache_folder=self.cache_folder, **self.model_kwargs
)
model_config = ConfigDict(
extra="forbid",
protected_namespaces=(),
)
def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Compute doc embeddings using a HuggingFace transformer model.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
import sentence_transformers # type: ignore[import]
texts = list(map(lambda x: x.replace("\n", " "), texts))
if self.multi_process:
pool = self.client.start_multi_process_pool()
embeddings = self.client.encode_multi_process(texts, pool)
sentence_transformers.SentenceTransformer.stop_multi_process_pool(pool)
else:
embeddings = self.client.encode(
texts, show_progress_bar=self.show_progress, **self.encode_kwargs
)
return embeddings.tolist()
def embed_query(self, text: str) -> List[float]:
"""Compute query embeddings using a HuggingFace transformer model.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
return self.embed_documents([text])[0]
| |
156836
|
@model_validator(mode="after")
def validate_environment(self) -> Self:
"""Validate that package is installed and that the API token is valid."""
try:
from huggingface_hub import login # type: ignore[import]
except ImportError:
raise ImportError(
"Could not import huggingface_hub python package. "
"Please install it with `pip install huggingface_hub`."
)
huggingfacehub_api_token = self.huggingfacehub_api_token or os.getenv(
"HF_TOKEN"
)
if huggingfacehub_api_token is not None:
try:
login(token=huggingfacehub_api_token)
except Exception as e:
raise ValueError(
"Could not authenticate with huggingface_hub. "
"Please check your API token."
) from e
from huggingface_hub import AsyncInferenceClient, InferenceClient
# Instantiate clients with supported kwargs
sync_supported_kwargs = set(inspect.signature(InferenceClient).parameters)
self.client = InferenceClient(
model=self.model,
timeout=self.timeout,
token=huggingfacehub_api_token,
**{
key: value
for key, value in self.server_kwargs.items()
if key in sync_supported_kwargs
},
)
async_supported_kwargs = set(inspect.signature(AsyncInferenceClient).parameters)
self.async_client = AsyncInferenceClient(
model=self.model,
timeout=self.timeout,
token=huggingfacehub_api_token,
**{
key: value
for key, value in self.server_kwargs.items()
if key in async_supported_kwargs
},
)
ignored_kwargs = (
set(self.server_kwargs.keys())
- sync_supported_kwargs
- async_supported_kwargs
)
if len(ignored_kwargs) > 0:
logger.warning(
f"Ignoring following parameters as they are not supported by the "
f"InferenceClient or AsyncInferenceClient: {ignored_kwargs}."
)
return self
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the default parameters for calling text generation inference API."""
return {
"max_new_tokens": self.max_new_tokens,
"top_k": self.top_k,
"top_p": self.top_p,
"typical_p": self.typical_p,
"temperature": self.temperature,
"repetition_penalty": self.repetition_penalty,
"return_full_text": self.return_full_text,
"truncate": self.truncate,
"stop_sequences": self.stop_sequences,
"seed": self.seed,
"do_sample": self.do_sample,
"watermark": self.watermark,
**self.model_kwargs,
}
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
_model_kwargs = self.model_kwargs or {}
return {
**{"endpoint_url": self.endpoint_url, "task": self.task},
**{"model_kwargs": _model_kwargs},
}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "huggingface_endpoint"
def _invocation_params(
self, runtime_stop: Optional[List[str]], **kwargs: Any
) -> Dict[str, Any]:
params = {**self._default_params, **kwargs}
params["stop_sequences"] = params["stop_sequences"] + (runtime_stop or [])
return params
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call out to HuggingFace Hub's inference endpoint."""
invocation_params = self._invocation_params(stop, **kwargs)
if self.streaming:
completion = ""
for chunk in self._stream(prompt, stop, run_manager, **invocation_params):
completion += chunk.text
return completion
else:
invocation_params["stop"] = invocation_params[
"stop_sequences"
] # porting 'stop_sequences' into the 'stop' argument
response = self.client.post(
json={"inputs": prompt, "parameters": invocation_params},
stream=False,
task=self.task,
)
response_text = json.loads(response.decode())[0]["generated_text"]
# Maybe the generation has stopped at one of the stop sequences:
# then we remove this stop sequence from the end of the generated text
for stop_seq in invocation_params["stop_sequences"]:
if response_text[-len(stop_seq) :] == stop_seq:
response_text = response_text[: -len(stop_seq)]
return response_text
async def _acall(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
invocation_params = self._invocation_params(stop, **kwargs)
if self.streaming:
completion = ""
async for chunk in self._astream(
prompt, stop, run_manager, **invocation_params
):
completion += chunk.text
return completion
else:
invocation_params["stop"] = invocation_params["stop_sequences"]
response = await self.async_client.post(
json={"inputs": prompt, "parameters": invocation_params},
stream=False,
task=self.task,
)
response_text = json.loads(response.decode())[0]["generated_text"]
# Maybe the generation has stopped at one of the stop sequences:
# then remove this stop sequence from the end of the generated text
for stop_seq in invocation_params["stop_sequences"]:
if response_text[-len(stop_seq) :] == stop_seq:
response_text = response_text[: -len(stop_seq)]
return response_text
def _stream(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[GenerationChunk]:
invocation_params = self._invocation_params(stop, **kwargs)
for response in self.client.text_generation(
prompt, **invocation_params, stream=True
):
# identify stop sequence in generated text, if any
stop_seq_found: Optional[str] = None
for stop_seq in invocation_params["stop_sequences"]:
if stop_seq in response:
stop_seq_found = stop_seq
# identify text to yield
text: Optional[str] = None
if stop_seq_found:
text = response[: response.index(stop_seq_found)]
else:
text = response
# yield text, if any
if text:
chunk = GenerationChunk(text=text)
if run_manager:
run_manager.on_llm_new_token(chunk.text)
yield chunk
# break if stop sequence found
if stop_seq_found:
break
async def _astream(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> AsyncIterator[GenerationChunk]:
invocation_params = self._invocation_params(stop, **kwargs)
async for response in await self.async_client.text_generation(
prompt, **invocation_params, stream=True
):
# identify stop sequence in generated text, if any
stop_seq_found: Optional[str] = None
for stop_seq in invocation_params["stop_sequences"]:
if stop_seq in response:
stop_seq_found = stop_seq
# identify text to yield
text: Optional[str] = None
if stop_seq_found:
text = response[: response.index(stop_seq_found)]
else:
text = response
# yield text, if any
if text:
chunk = GenerationChunk(text=text)
if run_manager:
await run_manager.on_llm_new_token(chunk.text)
yield chunk
# break if stop sequence found
if stop_seq_found:
break
| |
156837
|
from __future__ import annotations # type: ignore[import-not-found]
import importlib.util
import logging
from typing import Any, Iterator, List, Mapping, Optional
from langchain_core.callbacks import CallbackManagerForLLMRun
from langchain_core.language_models.llms import BaseLLM
from langchain_core.outputs import Generation, GenerationChunk, LLMResult
from pydantic import ConfigDict
DEFAULT_MODEL_ID = "gpt2"
DEFAULT_TASK = "text-generation"
VALID_TASKS = (
"text2text-generation",
"text-generation",
"summarization",
"translation",
)
DEFAULT_BATCH_SIZE = 4
logger = logging.getLogger(__name__)
class HuggingFacePipeline(BaseLLM):
"""HuggingFace Pipeline API.
To use, you should have the ``transformers`` python package installed.
Only supports `text-generation`, `text2text-generation`, `summarization` and
`translation` for now.
Example using from_model_id:
.. code-block:: python
from langchain_huggingface import HuggingFacePipeline
hf = HuggingFacePipeline.from_model_id(
model_id="gpt2",
task="text-generation",
pipeline_kwargs={"max_new_tokens": 10},
)
Example passing pipeline in directly:
.. code-block:: python
from langchain_huggingface import HuggingFacePipeline
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
model_id = "gpt2"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id)
pipe = pipeline(
"text-generation", model=model, tokenizer=tokenizer, max_new_tokens=10
)
hf = HuggingFacePipeline(pipeline=pipe)
"""
pipeline: Any = None #: :meta private:
model_id: str = DEFAULT_MODEL_ID
"""Model name to use."""
model_kwargs: Optional[dict] = None
"""Keyword arguments passed to the model."""
pipeline_kwargs: Optional[dict] = None
"""Keyword arguments passed to the pipeline."""
batch_size: int = DEFAULT_BATCH_SIZE
"""Batch size to use when passing multiple documents to generate."""
model_config = ConfigDict(
extra="forbid",
)
@classmethod
def from_model_id(
cls,
model_id: str,
task: str,
backend: str = "default",
device: Optional[int] = -1,
device_map: Optional[str] = None,
model_kwargs: Optional[dict] = None,
pipeline_kwargs: Optional[dict] = None,
batch_size: int = DEFAULT_BATCH_SIZE,
**kwargs: Any,
) -> HuggingFacePipeline:
"""Construct the pipeline object from model_id and task."""
try:
from transformers import ( # type: ignore[import]
AutoModelForCausalLM,
AutoModelForSeq2SeqLM,
AutoTokenizer,
)
from transformers import pipeline as hf_pipeline # type: ignore[import]
except ImportError:
raise ValueError(
"Could not import transformers python package. "
"Please install it with `pip install transformers`."
)
_model_kwargs = model_kwargs or {}
tokenizer = AutoTokenizer.from_pretrained(model_id, **_model_kwargs)
try:
if task == "text-generation":
if backend == "openvino":
try:
from optimum.intel.openvino import ( # type: ignore[import]
OVModelForCausalLM,
)
except ImportError:
raise ValueError(
"Could not import optimum-intel python package. "
"Please install it with: "
"pip install 'optimum[openvino,nncf]' "
)
try:
# use local model
model = OVModelForCausalLM.from_pretrained(
model_id, **_model_kwargs
)
except Exception:
# use remote model
model = OVModelForCausalLM.from_pretrained(
model_id, export=True, **_model_kwargs
)
else:
model = AutoModelForCausalLM.from_pretrained(
model_id, **_model_kwargs
)
elif task in ("text2text-generation", "summarization", "translation"):
if backend == "openvino":
try:
from optimum.intel.openvino import OVModelForSeq2SeqLM
except ImportError:
raise ValueError(
"Could not import optimum-intel python package. "
"Please install it with: "
"pip install 'optimum[openvino,nncf]' "
)
try:
# use local model
model = OVModelForSeq2SeqLM.from_pretrained(
model_id, **_model_kwargs
)
except Exception:
# use remote model
model = OVModelForSeq2SeqLM.from_pretrained(
model_id, export=True, **_model_kwargs
)
else:
model = AutoModelForSeq2SeqLM.from_pretrained(
model_id, **_model_kwargs
)
else:
raise ValueError(
f"Got invalid task {task}, "
f"currently only {VALID_TASKS} are supported"
)
except ImportError as e:
raise ValueError(
f"Could not load the {task} model due to missing dependencies."
) from e
if tokenizer.pad_token is None:
tokenizer.pad_token_id = model.config.eos_token_id
if (
(
getattr(model, "is_loaded_in_4bit", False)
or getattr(model, "is_loaded_in_8bit", False)
)
and device is not None
and backend == "default"
):
logger.warning(
f"Setting the `device` argument to None from {device} to avoid "
"the error caused by attempting to move the model that was already "
"loaded on the GPU using the Accelerate module to the same or "
"another device."
)
device = None
if (
device is not None
and importlib.util.find_spec("torch") is not None
and backend == "default"
):
import torch
cuda_device_count = torch.cuda.device_count()
if device < -1 or (device >= cuda_device_count):
raise ValueError(
f"Got device=={device}, "
f"device is required to be within [-1, {cuda_device_count})"
)
if device_map is not None and device < 0:
device = None
if device is not None and device < 0 and cuda_device_count > 0:
logger.warning(
"Device has %d GPUs available. "
"Provide device={deviceId} to `from_model_id` to use available"
"GPUs for execution. deviceId is -1 (default) for CPU and "
"can be a positive integer associated with CUDA device id.",
cuda_device_count,
)
if device is not None and device_map is not None and backend == "openvino":
logger.warning("Please set device for OpenVINO through: `model_kwargs`")
if "trust_remote_code" in _model_kwargs:
_model_kwargs = {
k: v for k, v in _model_kwargs.items() if k != "trust_remote_code"
}
_pipeline_kwargs = pipeline_kwargs or {}
pipeline = hf_pipeline(
task=task,
model=model,
tokenizer=tokenizer,
device=device,
device_map=device_map,
batch_size=batch_size,
model_kwargs=_model_kwargs,
**_pipeline_kwargs,
)
if pipeline.task not in VALID_TASKS:
raise ValueError(
f"Got invalid task {pipeline.task}, "
f"currently only {VALID_TASKS} are supported"
)
return cls(
pipeline=pipeline,
model_id=model_id,
model_kwargs=_model_kwargs,
pipeline_kwargs=_pipeline_kwargs,
batch_size=batch_size,
**kwargs,
)
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {
"model_id": self.model_id,
"model_kwargs": self.model_kwargs,
"pipeline_kwargs": self.pipeline_kwargs,
}
@property
def _llm_type(self) -> str:
return "huggingface_pipeline"
| |
156838
|
def _generate(
self,
prompts: List[str],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> LLMResult:
# List to hold all results
text_generations: List[str] = []
pipeline_kwargs = kwargs.get("pipeline_kwargs", {})
skip_prompt = kwargs.get("skip_prompt", False)
for i in range(0, len(prompts), self.batch_size):
batch_prompts = prompts[i : i + self.batch_size]
# Process batch of prompts
responses = self.pipeline(
batch_prompts,
**pipeline_kwargs,
)
# Process each response in the batch
for j, response in enumerate(responses):
if isinstance(response, list):
# if model returns multiple generations, pick the top one
response = response[0]
if self.pipeline.task == "text-generation":
text = response["generated_text"]
elif self.pipeline.task == "text2text-generation":
text = response["generated_text"]
elif self.pipeline.task == "summarization":
text = response["summary_text"]
elif self.pipeline.task in "translation":
text = response["translation_text"]
else:
raise ValueError(
f"Got invalid task {self.pipeline.task}, "
f"currently only {VALID_TASKS} are supported"
)
if skip_prompt:
text = text[len(batch_prompts[j]) :]
# Append the processed text to results
text_generations.append(text)
return LLMResult(
generations=[[Generation(text=text)] for text in text_generations]
)
def _stream(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[GenerationChunk]:
from threading import Thread
import torch
from transformers import (
StoppingCriteria,
StoppingCriteriaList,
TextIteratorStreamer,
)
pipeline_kwargs = kwargs.get("pipeline_kwargs", {})
skip_prompt = kwargs.get("skip_prompt", True)
if stop is not None:
stop = self.pipeline.tokenizer.convert_tokens_to_ids(stop)
stopping_ids_list = stop or []
class StopOnTokens(StoppingCriteria):
def __call__(
self,
input_ids: torch.LongTensor,
scores: torch.FloatTensor,
**kwargs: Any,
) -> bool:
for stop_id in stopping_ids_list:
if input_ids[0][-1] == stop_id:
return True
return False
stopping_criteria = StoppingCriteriaList([StopOnTokens()])
inputs = self.pipeline.tokenizer(prompt, return_tensors="pt")
streamer = TextIteratorStreamer(
self.pipeline.tokenizer,
timeout=60.0,
skip_prompt=skip_prompt,
skip_special_tokens=True,
)
generation_kwargs = dict(
inputs,
streamer=streamer,
stopping_criteria=stopping_criteria,
**pipeline_kwargs,
)
t1 = Thread(target=self.pipeline.model.generate, kwargs=generation_kwargs)
t1.start()
for char in streamer:
chunk = GenerationChunk(text=char)
if run_manager:
run_manager.on_llm_new_token(chunk.text, chunk=chunk)
yield chunk
| |
156843
|
# langchain-ollama
This package contains the LangChain integration with Ollama
## Installation
```bash
pip install -U langchain-ollama
```
You will also need to run the Ollama server locally.
You can download it [here](https://ollama.com/download).
## Chat Models
`ChatOllama` class exposes chat models from Ollama.
```python
from langchain_ollama import ChatOllama
llm = ChatOllama(model="llama3-groq-tool-use")
llm.invoke("Sing a ballad of LangChain.")
```
## Embeddings
`OllamaEmbeddings` class exposes embeddings from Ollama.
```python
from langchain_ollama import OllamaEmbeddings
embeddings = OllamaEmbeddings(model="llama3")
embeddings.embed_query("What is the meaning of life?")
```
## LLMs
`OllamaLLM` class exposes LLMs from Ollama.
```python
from langchain_ollama import OllamaLLM
llm = OllamaLLM(model="llama3")
llm.invoke("The meaning of life is")
```
| |
156874
|
"""Ollama large language models."""
from typing import (
Any,
AsyncIterator,
Dict,
Iterator,
List,
Literal,
Mapping,
Optional,
Union,
)
from langchain_core.callbacks import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchain_core.language_models import BaseLLM, LangSmithParams
from langchain_core.outputs import GenerationChunk, LLMResult
from ollama import AsyncClient, Client, Options
from pydantic import PrivateAttr, model_validator
from typing_extensions import Self
class OllamaLLM(BaseLLM):
"""OllamaLLM large language models.
Example:
.. code-block:: python
from langchain_ollama import OllamaLLM
model = OllamaLLM(model="llama3")
model.invoke("Come up with 10 names for a song about parrots")
"""
model: str
"""Model name to use."""
mirostat: Optional[int] = None
"""Enable Mirostat sampling for controlling perplexity.
(default: 0, 0 = disabled, 1 = Mirostat, 2 = Mirostat 2.0)"""
mirostat_eta: Optional[float] = None
"""Influences how quickly the algorithm responds to feedback
from the generated text. A lower learning rate will result in
slower adjustments, while a higher learning rate will make
the algorithm more responsive. (Default: 0.1)"""
mirostat_tau: Optional[float] = None
"""Controls the balance between coherence and diversity
of the output. A lower value will result in more focused and
coherent text. (Default: 5.0)"""
num_ctx: Optional[int] = None
"""Sets the size of the context window used to generate the
next token. (Default: 2048) """
num_gpu: Optional[int] = None
"""The number of GPUs to use. On macOS it defaults to 1 to
enable metal support, 0 to disable."""
num_thread: Optional[int] = None
"""Sets the number of threads to use during computation.
By default, Ollama will detect this for optimal performance.
It is recommended to set this value to the number of physical
CPU cores your system has (as opposed to the logical number of cores)."""
num_predict: Optional[int] = None
"""Maximum number of tokens to predict when generating text.
(Default: 128, -1 = infinite generation, -2 = fill context)"""
repeat_last_n: Optional[int] = None
"""Sets how far back for the model to look back to prevent
repetition. (Default: 64, 0 = disabled, -1 = num_ctx)"""
repeat_penalty: Optional[float] = None
"""Sets how strongly to penalize repetitions. A higher value (e.g., 1.5)
will penalize repetitions more strongly, while a lower value (e.g., 0.9)
will be more lenient. (Default: 1.1)"""
temperature: Optional[float] = None
"""The temperature of the model. Increasing the temperature will
make the model answer more creatively. (Default: 0.8)"""
stop: Optional[List[str]] = None
"""Sets the stop tokens to use."""
tfs_z: Optional[float] = None
"""Tail free sampling is used to reduce the impact of less probable
tokens from the output. A higher value (e.g., 2.0) will reduce the
impact more, while a value of 1.0 disables this setting. (default: 1)"""
top_k: Optional[int] = None
"""Reduces the probability of generating nonsense. A higher value (e.g. 100)
will give more diverse answers, while a lower value (e.g. 10)
will be more conservative. (Default: 40)"""
top_p: Optional[float] = None
"""Works together with top-k. A higher value (e.g., 0.95) will lead
to more diverse text, while a lower value (e.g., 0.5) will
generate more focused and conservative text. (Default: 0.9)"""
format: Literal["", "json"] = ""
"""Specify the format of the output (options: json)"""
keep_alive: Optional[Union[int, str]] = None
"""How long the model will stay loaded into memory."""
base_url: Optional[str] = None
"""Base url the model is hosted under."""
client_kwargs: Optional[dict] = {}
"""Additional kwargs to pass to the httpx Client.
For a full list of the params, see [this link](https://pydoc.dev/httpx/latest/httpx.Client.html)
"""
_client: Client = PrivateAttr(default=None)
"""
The client to use for making requests.
"""
_async_client: AsyncClient = PrivateAttr(default=None)
"""
The async client to use for making requests.
"""
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the default parameters for calling Ollama."""
return {
"model": self.model,
"format": self.format,
"options": {
"mirostat": self.mirostat,
"mirostat_eta": self.mirostat_eta,
"mirostat_tau": self.mirostat_tau,
"num_ctx": self.num_ctx,
"num_gpu": self.num_gpu,
"num_thread": self.num_thread,
"num_predict": self.num_predict,
"repeat_last_n": self.repeat_last_n,
"repeat_penalty": self.repeat_penalty,
"temperature": self.temperature,
"stop": self.stop,
"tfs_z": self.tfs_z,
"top_k": self.top_k,
"top_p": self.top_p,
},
"keep_alive": self.keep_alive,
}
@property
def _llm_type(self) -> str:
"""Return type of LLM."""
return "ollama-llm"
def _get_ls_params(
self, stop: Optional[List[str]] = None, **kwargs: Any
) -> LangSmithParams:
"""Get standard params for tracing."""
params = super()._get_ls_params(stop=stop, **kwargs)
if max_tokens := kwargs.get("num_predict", self.num_predict):
params["ls_max_tokens"] = max_tokens
return params
@model_validator(mode="after")
def _set_clients(self) -> Self:
"""Set clients to use for ollama."""
client_kwargs = self.client_kwargs or {}
self._client = Client(host=self.base_url, **client_kwargs)
self._async_client = AsyncClient(host=self.base_url, **client_kwargs)
return self
async def _acreate_generate_stream(
self,
prompt: str,
stop: Optional[List[str]] = None,
**kwargs: Any,
) -> AsyncIterator[Union[Mapping[str, Any], str]]:
if self.stop is not None and stop is not None:
raise ValueError("`stop` found in both the input and default params.")
elif self.stop is not None:
stop = self.stop
params = self._default_params
for key in self._default_params:
if key in kwargs:
params[key] = kwargs[key]
params["options"]["stop"] = stop
async for part in await self._async_client.generate(
model=params["model"],
prompt=prompt,
stream=True,
options=Options(**params["options"]),
keep_alive=params["keep_alive"],
format=params["format"],
): # type: ignore
yield part
def _create_generate_stream(
self,
prompt: str,
stop: Optional[List[str]] = None,
**kwargs: Any,
) -> Iterator[Union[Mapping[str, Any], str]]:
if self.stop is not None and stop is not None:
raise ValueError("`stop` found in both the input and default params.")
elif self.stop is not None:
stop = self.stop
params = self._default_params
for key in self._default_params:
if key in kwargs:
params[key] = kwargs[key]
params["options"]["stop"] = stop
yield from self._client.generate(
model=params["model"],
prompt=prompt,
stream=True,
options=Options(**params["options"]),
keep_alive=params["keep_alive"],
format=params["format"],
)
| |
156890
|
# langchain-openai
This package contains the LangChain integrations for OpenAI through their `openai` SDK.
## Installation and Setup
- Install the LangChain partner package
```bash
pip install langchain-openai
```
- Get an OpenAI api key and set it as an environment variable (`OPENAI_API_KEY`)
## LLM
See a [usage example](http://python.langchain.com/docs/integrations/llms/openai).
```python
from langchain_openai import OpenAI
```
If you are using a model hosted on `Azure`, you should use different wrapper for that:
```python
from langchain_openai import AzureOpenAI
```
For a more detailed walkthrough of the `Azure` wrapper, see [here](http://python.langchain.com/docs/integrations/llms/azure_openai)
## Chat model
See a [usage example](http://python.langchain.com/docs/integrations/chat/openai).
```python
from langchain_openai import ChatOpenAI
```
If you are using a model hosted on `Azure`, you should use different wrapper for that:
```python
from langchain_openai import AzureChatOpenAI
```
For a more detailed walkthrough of the `Azure` wrapper, see [here](http://python.langchain.com/docs/integrations/chat/azure_chat_openai)
## Text Embedding Model
See a [usage example](http://python.langchain.com/docs/integrations/text_embedding/openai)
```python
from langchain_openai import OpenAIEmbeddings
```
If you are using a model hosted on `Azure`, you should use different wrapper for that:
```python
from langchain_openai import AzureOpenAIEmbeddings
```
For a more detailed walkthrough of the `Azure` wrapper, see [here](https://python.langchain.com/docs/integrations/text_embedding/azureopenai)
| |
156960
|
"""Test azure openai embeddings."""
import os
from typing import Any
import numpy as np
import openai
import pytest
from langchain_openai import AzureOpenAIEmbeddings
OPENAI_API_VERSION = os.environ.get("AZURE_OPENAI_API_VERSION", "")
OPENAI_API_BASE = os.environ.get("AZURE_OPENAI_API_BASE", "")
OPENAI_API_KEY = os.environ.get("AZURE_OPENAI_API_KEY", "")
DEPLOYMENT_NAME = os.environ.get(
"AZURE_OPENAI_DEPLOYMENT_NAME",
os.environ.get("AZURE_OPENAI_EMBEDDINGS_DEPLOYMENT_NAME", ""),
)
print
def _get_embeddings(**kwargs: Any) -> AzureOpenAIEmbeddings:
return AzureOpenAIEmbeddings( # type: ignore[call-arg]
azure_deployment=DEPLOYMENT_NAME,
api_version=OPENAI_API_VERSION,
azure_endpoint=OPENAI_API_BASE,
openai_api_key=OPENAI_API_KEY,
**kwargs,
)
@pytest.mark.scheduled
def test_azure_openai_embedding_documents() -> None:
"""Test openai embeddings."""
documents = ["foo bar"]
embedding = _get_embeddings()
output = embedding.embed_documents(documents)
assert len(output) == 1
assert len(output[0]) == 1536
@pytest.mark.scheduled
def test_azure_openai_embedding_documents_multiple() -> None:
"""Test openai embeddings."""
documents = ["foo bar", "bar foo", "foo"]
embedding = _get_embeddings(chunk_size=2)
embedding.embedding_ctx_length = 8191
output = embedding.embed_documents(documents)
assert embedding.chunk_size == 2
assert len(output) == 3
assert len(output[0]) == 1536
assert len(output[1]) == 1536
assert len(output[2]) == 1536
@pytest.mark.scheduled
def test_azure_openai_embedding_documents_chunk_size() -> None:
"""Test openai embeddings."""
documents = ["foo bar"] * 20
embedding = _get_embeddings()
embedding.embedding_ctx_length = 8191
output = embedding.embed_documents(documents)
# Max 2048 chunks per batch on Azure OpenAI embeddings
assert embedding.chunk_size == 2048
assert len(output) == 20
assert all([len(out) == 1536 for out in output])
@pytest.mark.scheduled
async def test_azure_openai_embedding_documents_async_multiple() -> None:
"""Test openai embeddings."""
documents = ["foo bar", "bar foo", "foo"]
embedding = _get_embeddings(chunk_size=2)
embedding.embedding_ctx_length = 8191
output = await embedding.aembed_documents(documents)
assert len(output) == 3
assert len(output[0]) == 1536
assert len(output[1]) == 1536
assert len(output[2]) == 1536
@pytest.mark.scheduled
def test_azure_openai_embedding_query() -> None:
"""Test openai embeddings."""
document = "foo bar"
embedding = _get_embeddings()
output = embedding.embed_query(document)
assert len(output) == 1536
@pytest.mark.scheduled
async def test_azure_openai_embedding_async_query() -> None:
"""Test openai embeddings."""
document = "foo bar"
embedding = _get_embeddings()
output = await embedding.aembed_query(document)
assert len(output) == 1536
@pytest.mark.scheduled
def test_azure_openai_embedding_with_empty_string() -> None:
"""Test openai embeddings with empty string."""
document = ["", "abc"]
embedding = _get_embeddings()
output = embedding.embed_documents(document)
assert len(output) == 2
assert len(output[0]) == 1536
expected_output = (
openai.AzureOpenAI(
api_version=OPENAI_API_VERSION,
api_key=OPENAI_API_KEY,
azure_endpoint=OPENAI_API_BASE,
azure_deployment=DEPLOYMENT_NAME,
) # type: ignore
.embeddings.create(input="", model="text-embedding-ada-002")
.data[0]
.embedding
)
assert np.allclose(output[0], expected_output, atol=0.0001)
assert len(output[1]) == 1536
@pytest.mark.scheduled
def test_embed_documents_normalized() -> None:
output = _get_embeddings().embed_documents(["foo walked to the market"])
assert np.isclose(np.linalg.norm(output[0]), 1.0)
@pytest.mark.scheduled
def test_embed_query_normalized() -> None:
output = _get_embeddings().embed_query("foo walked to the market")
assert np.isclose(np.linalg.norm(output), 1.0)
| |
156961
|
"""Test OpenAI embeddings."""
import numpy as np
import openai
import pytest
from langchain_openai.embeddings.base import OpenAIEmbeddings
def test_langchain_openai_embedding_documents() -> None:
"""Test openai embeddings."""
documents = ["foo bar"]
embedding = OpenAIEmbeddings()
output = embedding.embed_documents(documents)
assert len(output) == 1
assert len(output[0]) > 0
def test_langchain_openai_embedding_query() -> None:
"""Test openai embeddings."""
document = "foo bar"
embedding = OpenAIEmbeddings()
output = embedding.embed_query(document)
assert len(output) > 0
def test_langchain_openai_embeddings_dimensions() -> None:
"""Test openai embeddings."""
documents = ["foo bar"]
embedding = OpenAIEmbeddings(model="text-embedding-3-small", dimensions=128)
output = embedding.embed_documents(documents)
assert len(output) == 1
assert len(output[0]) == 128
@pytest.mark.skip(reason="flaky")
def test_langchain_openai_embeddings_equivalent_to_raw() -> None:
documents = ["disallowed special token '<|endoftext|>'"]
embedding = OpenAIEmbeddings()
lc_output = embedding.embed_documents(documents)[0]
direct_output = (
openai.OpenAI()
.embeddings.create(input=documents, model=embedding.model)
.data[0]
.embedding
)
assert np.isclose(lc_output, direct_output).all()
@pytest.mark.skip(reason="flaky")
async def test_langchain_openai_embeddings_equivalent_to_raw_async() -> None:
documents = ["disallowed special token '<|endoftext|>'"]
embedding = OpenAIEmbeddings()
lc_output = (await embedding.aembed_documents(documents))[0]
client = openai.AsyncOpenAI()
direct_output = (
(await client.embeddings.create(input=documents, model=embedding.model))
.data[0]
.embedding
)
assert np.isclose(lc_output, direct_output).all()
def test_langchain_openai_embeddings_dimensions_large_num() -> None:
"""Test openai embeddings."""
documents = [f"foo bar {i}" for i in range(2000)]
embedding = OpenAIEmbeddings(model="text-embedding-3-small", dimensions=128)
output = embedding.embed_documents(documents)
assert len(output) == 2000
assert len(output[0]) == 128
| |
156974
|
"""Azure OpenAI embeddings wrapper."""
from __future__ import annotations
from typing import Callable, Optional, Union
import openai
from langchain_core.utils import from_env, secret_from_env
from pydantic import Field, SecretStr, model_validator
from typing_extensions import Self, cast
from langchain_openai.embeddings.base import OpenAIEmbeddings
class AzureOpenAIEmbeddings(OpenAIEmbeddings):
"""AzureOpenAI embedding model integration.
Setup:
To access AzureOpenAI embedding models you'll need to create an Azure account,
get an API key, and install the `langchain-openai` integration package.
You’ll need to have an Azure OpenAI instance deployed.
You can deploy a version on Azure Portal following this
[guide](https://learn.microsoft.com/en-us/azure/ai-services/openai/how-to/create-resource?pivots=web-portal).
Once you have your instance running, make sure you have the name of your
instance and key. You can find the key in the Azure Portal,
under the “Keys and Endpoint” section of your instance.
.. code-block:: bash
pip install -U langchain_openai
# Set up your environment variables (or pass them directly to the model)
export AZURE_OPENAI_API_KEY="your-api-key"
export AZURE_OPENAI_ENDPOINT="https://<your-endpoint>.openai.azure.com/"
export AZURE_OPENAI_API_VERSION="2024-02-01"
Key init args — completion params:
model: str
Name of AzureOpenAI model to use.
dimensions: Optional[int]
Number of dimensions for the embeddings. Can be specified only
if the underlying model supports it.
Key init args — client params:
api_key: Optional[SecretStr]
See full list of supported init args and their descriptions in the params section.
Instantiate:
.. code-block:: python
from langchain_openai import AzureOpenAIEmbeddings
embeddings = AzureOpenAIEmbeddings(
model="text-embedding-3-large"
# dimensions: Optional[int] = None, # Can specify dimensions with new text-embedding-3 models
# azure_endpoint="https://<your-endpoint>.openai.azure.com/", If not provided, will read env variable AZURE_OPENAI_ENDPOINT
# api_key=... # Can provide an API key directly. If missing read env variable AZURE_OPENAI_API_KEY
# openai_api_version=..., # If not provided, will read env variable AZURE_OPENAI_API_VERSION
)
Embed single text:
.. code-block:: python
input_text = "The meaning of life is 42"
vector = embed.embed_query(input_text)
print(vector[:3])
.. code-block:: python
[-0.024603435769677162, -0.007543657906353474, 0.0039630369283258915]
Embed multiple texts:
.. code-block:: python
input_texts = ["Document 1...", "Document 2..."]
vectors = embed.embed_documents(input_texts)
print(len(vectors))
# The first 3 coordinates for the first vector
print(vectors[0][:3])
.. code-block:: python
2
[-0.024603435769677162, -0.007543657906353474, 0.0039630369283258915]
Async:
.. code-block:: python
vector = await embed.aembed_query(input_text)
print(vector[:3])
# multiple:
# await embed.aembed_documents(input_texts)
.. code-block:: python
[-0.009100092574954033, 0.005071679595857859, -0.0029193938244134188]
""" # noqa: E501
azure_endpoint: Optional[str] = Field(
default_factory=from_env("AZURE_OPENAI_ENDPOINT", default=None)
)
"""Your Azure endpoint, including the resource.
Automatically inferred from env var `AZURE_OPENAI_ENDPOINT` if not provided.
Example: `https://example-resource.azure.openai.com/`
"""
deployment: Optional[str] = Field(default=None, alias="azure_deployment")
"""A model deployment.
If given sets the base client URL to include `/deployments/{azure_deployment}`.
Note: this means you won't be able to use non-deployment endpoints.
"""
# Check OPENAI_KEY for backwards compatibility.
# TODO: Remove OPENAI_API_KEY support to avoid possible conflict when using
# other forms of azure credentials.
openai_api_key: Optional[SecretStr] = Field(
alias="api_key",
default_factory=secret_from_env(
["AZURE_OPENAI_API_KEY", "OPENAI_API_KEY"], default=None
),
)
"""Automatically inferred from env var `AZURE_OPENAI_API_KEY` if not provided."""
openai_api_version: Optional[str] = Field(
default_factory=from_env("OPENAI_API_VERSION", default="2023-05-15"),
alias="api_version",
)
"""Automatically inferred from env var `OPENAI_API_VERSION` if not provided.
Set to "2023-05-15" by default if env variable `OPENAI_API_VERSION` is not set.
"""
azure_ad_token: Optional[SecretStr] = Field(
default_factory=secret_from_env("AZURE_OPENAI_AD_TOKEN", default=None)
)
"""Your Azure Active Directory token.
Automatically inferred from env var `AZURE_OPENAI_AD_TOKEN` if not provided.
For more:
https://www.microsoft.com/en-us/security/business/identity-access/microsoft-entra-id.
"""
azure_ad_token_provider: Union[Callable[[], str], None] = None
"""A function that returns an Azure Active Directory token.
Will be invoked on every request.
"""
openai_api_type: Optional[str] = Field(
default_factory=from_env("OPENAI_API_TYPE", default="azure")
)
validate_base_url: bool = True
chunk_size: int = 2048
"""Maximum number of texts to embed in each batch"""
@model_validator(mode="after")
def validate_environment(self) -> Self:
"""Validate that api key and python package exists in environment."""
# For backwards compatibility. Before openai v1, no distinction was made
# between azure_endpoint and base_url (openai_api_base).
openai_api_base = self.openai_api_base
if openai_api_base and self.validate_base_url:
if "/openai" not in openai_api_base:
self.openai_api_base = cast(str, self.openai_api_base) + "/openai"
raise ValueError(
"As of openai>=1.0.0, Azure endpoints should be specified via "
"the `azure_endpoint` param not `openai_api_base` "
"(or alias `base_url`). "
)
if self.deployment:
raise ValueError(
"As of openai>=1.0.0, if `deployment` (or alias "
"`azure_deployment`) is specified then "
"`openai_api_base` (or alias `base_url`) should not be. "
"Instead use `deployment` (or alias `azure_deployment`) "
"and `azure_endpoint`."
)
client_params: dict = {
"api_version": self.openai_api_version,
"azure_endpoint": self.azure_endpoint,
"azure_deployment": self.deployment,
"api_key": (
self.openai_api_key.get_secret_value() if self.openai_api_key else None
),
"azure_ad_token": (
self.azure_ad_token.get_secret_value() if self.azure_ad_token else None
),
"azure_ad_token_provider": self.azure_ad_token_provider,
"organization": self.openai_organization,
"base_url": self.openai_api_base,
"timeout": self.request_timeout,
"max_retries": self.max_retries,
"default_headers": self.default_headers,
"default_query": self.default_query,
}
if not self.client:
sync_specific: dict = {"http_client": self.http_client}
self.client = openai.AzureOpenAI(
**client_params, # type: ignore[arg-type]
**sync_specific,
).embeddings
if not self.async_client:
async_specific: dict = {"http_client": self.http_async_client}
self.async_client = openai.AsyncAzureOpenAI(
**client_params, # type: ignore[arg-type]
**async_specific,
).embeddings
return self
@property
def _llm_type(self) -> str:
return "azure-openai-chat"
| |
156977
|
"""OpenAI embedding model integration.
Setup:
Install ``langchain_openai`` and set environment variable ``OPENAI_API_KEY``.
.. code-block:: bash
pip install -U langchain_openai
export OPENAI_API_KEY="your-api-key"
Key init args — embedding params:
model: str
Name of OpenAI model to use.
dimensions: Optional[int] = None
The number of dimensions the resulting output embeddings should have.
Only supported in `text-embedding-3` and later models.
Key init args — client params:
api_key: Optional[SecretStr] = None
OpenAI API key.
organization: Optional[str] = None
OpenAI organization ID. If not passed in will be read
from env var OPENAI_ORG_ID.
max_retries: int = 2
Maximum number of retries to make when generating.
request_timeout: Optional[Union[float, Tuple[float, float], Any]] = None
Timeout for requests to OpenAI completion API
See full list of supported init args and their descriptions in the params section.
Instantiate:
.. code-block:: python
from langchain_openai import OpenAIEmbeddings
embed = OpenAIEmbeddings(
model="text-embedding-3-large"
# With the `text-embedding-3` class
# of models, you can specify the size
# of the embeddings you want returned.
# dimensions=1024
)
Embed single text:
.. code-block:: python
input_text = "The meaning of life is 42"
vector = embeddings.embed_query("hello")
print(vector[:3])
.. code-block:: python
[-0.024603435769677162, -0.007543657906353474, 0.0039630369283258915]
Embed multiple texts:
.. code-block:: python
vectors = embeddings.embed_documents(["hello", "goodbye"])
# Showing only the first 3 coordinates
print(len(vectors))
print(vectors[0][:3])
.. code-block:: python
2
[-0.024603435769677162, -0.007543657906353474, 0.0039630369283258915]
Async:
.. code-block:: python
await embed.aembed_query(input_text)
print(vector[:3])
# multiple:
# await embed.aembed_documents(input_texts)
.. code-block:: python
[-0.009100092574954033, 0.005071679595857859, -0.0029193938244134188]
"""
client: Any = Field(default=None, exclude=True) #: :meta private:
async_client: Any = Field(default=None, exclude=True) #: :meta private:
model: str = "text-embedding-ada-002"
dimensions: Optional[int] = None
"""The number of dimensions the resulting output embeddings should have.
Only supported in `text-embedding-3` and later models.
"""
# to support Azure OpenAI Service custom deployment names
deployment: Optional[str] = model
# TODO: Move to AzureOpenAIEmbeddings.
openai_api_version: Optional[str] = Field(
default_factory=from_env("OPENAI_API_VERSION", default=None),
alias="api_version",
)
"""Automatically inferred from env var `OPENAI_API_VERSION` if not provided."""
# to support Azure OpenAI Service custom endpoints
openai_api_base: Optional[str] = Field(
alias="base_url", default_factory=from_env("OPENAI_API_BASE", default=None)
)
"""Base URL path for API requests, leave blank if not using a proxy or service
emulator."""
# to support Azure OpenAI Service custom endpoints
openai_api_type: Optional[str] = Field(
default_factory=from_env("OPENAI_API_TYPE", default=None)
)
# to support explicit proxy for OpenAI
openai_proxy: Optional[str] = Field(
default_factory=from_env("OPENAI_PROXY", default=None)
)
embedding_ctx_length: int = 8191
"""The maximum number of tokens to embed at once."""
openai_api_key: Optional[SecretStr] = Field(
alias="api_key", default_factory=secret_from_env("OPENAI_API_KEY", default=None)
)
"""Automatically inferred from env var `OPENAI_API_KEY` if not provided."""
openai_organization: Optional[str] = Field(
alias="organization",
default_factory=from_env(
["OPENAI_ORG_ID", "OPENAI_ORGANIZATION"], default=None
),
)
"""Automatically inferred from env var `OPENAI_ORG_ID` if not provided."""
allowed_special: Union[Literal["all"], Set[str], None] = None
disallowed_special: Union[Literal["all"], Set[str], Sequence[str], None] = None
chunk_size: int = 1000
"""Maximum number of texts to embed in each batch"""
max_retries: int = 2
"""Maximum number of retries to make when generating."""
request_timeout: Optional[Union[float, Tuple[float, float], Any]] = Field(
default=None, alias="timeout"
)
"""Timeout for requests to OpenAI completion API. Can be float, httpx.Timeout or
None."""
headers: Any = None
tiktoken_enabled: bool = True
"""Set this to False for non-OpenAI implementations of the embeddings API, e.g.
the `--extensions openai` extension for `text-generation-webui`"""
tiktoken_model_name: Optional[str] = None
"""The model name to pass to tiktoken when using this class.
Tiktoken is used to count the number of tokens in documents to constrain
them to be under a certain limit. By default, when set to None, this will
be the same as the embedding model name. However, there are some cases
where you may want to use this Embedding class with a model name not
supported by tiktoken. This can include when using Azure embeddings or
when using one of the many model providers that expose an OpenAI-like
API but with different models. In those cases, in order to avoid erroring
when tiktoken is called, you can specify a model name to use here."""
show_progress_bar: bool = False
"""Whether to show a progress bar when embedding."""
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
"""Holds any model parameters valid for `create` call not explicitly specified."""
skip_empty: bool = False
"""Whether to skip empty strings when embedding or raise an error.
Defaults to not skipping."""
default_headers: Union[Mapping[str, str], None] = None
default_query: Union[Mapping[str, object], None] = None
# Configure a custom httpx client. See the
# [httpx documentation](https://www.python-httpx.org/api/#client) for more details.
retry_min_seconds: int = 4
"""Min number of seconds to wait between retries"""
retry_max_seconds: int = 20
"""Max number of seconds to wait between retries"""
http_client: Union[Any, None] = None
"""Optional httpx.Client. Only used for sync invocations. Must specify
http_async_client as well if you'd like a custom client for async invocations.
"""
http_async_client: Union[Any, None] = None
"""Optional httpx.AsyncClient. Only used for async invocations. Must specify
http_client as well if you'd like a custom client for sync invocations."""
check_embedding_ctx_length: bool = True
"""Whether to check the token length of inputs and automatically split inputs
longer than embedding_ctx_length."""
model_config = ConfigDict(
extra="forbid", populate_by_name=True, protected_namespaces=()
)
@model_validator(mode="before")
@classmethod
def build_extra(cls, values: Dict[str, Any]) -> Any:
"""Build extra kwargs from additional params that were passed in."""
all_required_field_names = get_pydantic_field_names(cls)
extra = values.get("model_kwargs", {})
for field_name in list(values):
if field_name in extra:
raise ValueError(f"Found {field_name} supplied twice.")
if field_name not in all_required_field_names:
warnings.warn(
f"""WARNING! {field_name} is not default parameter.
{field_name} was transferred to model_kwargs.
Please confirm that {field_name} is what you intended."""
)
extra[field_name] = values.pop(field_name)
invalid_model_kwargs = all_required_field_names.intersection(extra.keys())
if invalid_model_kwargs:
raise ValueError(
f"Parameters {invalid_model_kwargs} should be specified explicitly. "
f"Instead they were passed in as part of `model_kwargs` parameter."
)
values["model_kwargs"] = extra
return values
| |
156981
|
\\n "random_ints": [23, 87, 45, 12, 78, 34, 56, 90, 11, 67]\\n}'
Image input:
.. code-block:: python
import base64
import httpx
from langchain_core.messages import HumanMessage
image_url = "https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg"
image_data = base64.b64encode(httpx.get(image_url).content).decode("utf-8")
message = HumanMessage(
content=[
{"type": "text", "text": "describe the weather in this image"},
{
"type": "image_url",
"image_url": {"url": f""},
},
]
)
ai_msg = llm.invoke([message])
ai_msg.content
.. code-block:: python
"The weather in the image appears to be quite pleasant. The sky is mostly clear"
Token usage:
.. code-block:: python
ai_msg = llm.invoke(messages)
ai_msg.usage_metadata
.. code-block:: python
{"input_tokens": 28, "output_tokens": 5, "total_tokens": 33}
Logprobs:
.. code-block:: python
logprobs_llm = llm.bind(logprobs=True)
ai_msg = logprobs_llm.invoke(messages)
ai_msg.response_metadata["logprobs"]
.. code-block:: python
{
"content": [
{
"token": "J",
"bytes": [74],
"logprob": -4.9617593e-06,
"top_logprobs": [],
},
{
"token": "'adore",
"bytes": [39, 97, 100, 111, 114, 101],
"logprob": -0.25202933,
"top_logprobs": [],
},
{
"token": " la",
"bytes": [32, 108, 97],
"logprob": -0.20141791,
"top_logprobs": [],
},
{
"token": " programmation",
"bytes": [
32,
112,
114,
111,
103,
114,
97,
109,
109,
97,
116,
105,
111,
110,
],
"logprob": -1.9361265e-07,
"top_logprobs": [],
},
{
"token": ".",
"bytes": [46],
"logprob": -1.2233183e-05,
"top_logprobs": [],
},
]
}
Response metadata
.. code-block:: python
ai_msg = llm.invoke(messages)
ai_msg.response_metadata
.. code-block:: python
{
"token_usage": {
"completion_tokens": 6,
"prompt_tokens": 28,
"total_tokens": 34,
},
"model_name": "gpt-35-turbo",
"system_fingerprint": None,
"prompt_filter_results": [
{
"prompt_index": 0,
"content_filter_results": {
"hate": {"filtered": False, "severity": "safe"},
"self_harm": {"filtered": False, "severity": "safe"},
"sexual": {"filtered": False, "severity": "safe"},
"violence": {"filtered": False, "severity": "safe"},
},
}
],
"finish_reason": "stop",
"logprobs": None,
"content_filter_results": {
"hate": {"filtered": False, "severity": "safe"},
"self_harm": {"filtered": False, "severity": "safe"},
"sexual": {"filtered": False, "severity": "safe"},
"violence": {"filtered": False, "severity": "safe"},
},
}
""" # noqa: E501
azure_endpoint: Optional[str] = Field(
default_factory=from_env("AZURE_OPENAI_ENDPOINT", default=None)
)
"""Your Azure endpoint, including the resource.
Automatically inferred from env var `AZURE_OPENAI_ENDPOINT` if not provided.
Example: `https://example-resource.azure.openai.com/`
"""
deployment_name: Union[str, None] = Field(default=None, alias="azure_deployment")
"""A model deployment.
If given sets the base client URL to include `/deployments/{azure_deployment}`.
Note: this means you won't be able to use non-deployment endpoints.
"""
openai_api_version: Optional[str] = Field(
alias="api_version",
default_factory=from_env("OPENAI_API_VERSION", default=None),
)
"""Automatically inferred from env var `OPENAI_API_VERSION` if not provided."""
# Check OPENAI_API_KEY for backwards compatibility.
# TODO: Remove OPENAI_API_KEY support to avoid possible conflict when using
# other forms of azure credentials.
openai_api_key: Optional[SecretStr] = Field(
alias="api_key",
default_factory=secret_from_env(
["AZURE_OPENAI_API_KEY", "OPENAI_API_KEY"], default=None
),
)
"""Automatically inferred from env var `AZURE_OPENAI_API_KEY` if not provided."""
azure_ad_token: Optional[SecretStr] = Field(
default_factory=secret_from_env("AZURE_OPENAI_AD_TOKEN", default=None)
)
"""Your Azure Active Directory token.
Automatically inferred from env var `AZURE_OPENAI_AD_TOKEN` if not provided.
For more:
https://www.microsoft.com/en-us/security/business/identity-access/microsoft-entra-id.
"""
azure_ad_token_provider: Union[Callable[[], str], None] = None
"""A function that returns an Azure Active Directory token.
Will be invoked on every request.
"""
model_version: str = ""
"""The version of the model (e.g. "0125" for gpt-3.5-0125).
Azure OpenAI doesn't return model version with the response by default so it must
be manually specified if you want to use this information downstream, e.g. when
calculating costs.
When you specify the version, it will be appended to the model name in the
response. Setting correct version will help you to calculate the cost properly.
Model version is not validated, so make sure you set it correctly to get the
correct cost.
"""
openai_api_type: Optional[str] = Field(
default_factory=from_env("OPENAI_API_TYPE", default="azure")
)
"""Legacy, for openai<1.0.0 support."""
validate_base_url: bool = True
"""If legacy arg openai_api_base is passed in, try to infer if it is a base_url or
azure_endpoint and update client params accordingly.
"""
model_name: Optional[str] = Field(default=None, alias="model") # type: ignore[assignment]
"""Name of the deployed OpenAI model, e.g. "gpt-4o", "gpt-35-turbo", etc.
Distinct from the Azure deployment name, which is set by the Azure user.
Used for tracing and token counting. Does NOT affect completion.
"""
disabled_params: Optional[Dict[str, Any]] = Field(default=None)
"""Parameters of the OpenAI client or chat.completions endpoint that should be
disabled for the given model.
Should be specified as ``{"param": None | ['val1', 'val2']}`` where the key is the
parameter and the value is either None, meaning that parameter should never be
used, or it's a list of disabled values for the parameter.
For example, older models may not support the 'parallel_tool_calls' parameter at
all, in which case ``disabled_params={"parallel_tool_calls: None}`` can ben passed
in.
If a parameter is disabled then it will not be used by default in any methods, e.g.
in
:meth:`~langchain_openai.chat_models.azure.AzureChatOpenAI.with_structured_output`.
However this does not prevent a user from directly passed in the parameter during
invocation.
By default, unless ``model_name="gpt-4o"`` is specified, then
'parallel_tools_calls' will be disabled.
"""
@classmethod
def get_lc_namespace(cls) -> List[str]:
"""Get the namespace of the langchain object."""
return ["langchain", "chat_models", "azure_openai"]
| |
156991
|
\\n\\n"
"What's heavier a pound of bricks or a pound of feathers?"
)
# -> {
# 'raw': AIMessage(content='{\\n "answer": "They are both the same weight.",\\n "justification": "Both a pound of bricks and a pound of feathers weigh one pound. The difference lies in the volume and density of the materials, not the weight." \\n}'),
# 'parsed': AnswerWithJustification(answer='They are both the same weight.', justification='Both a pound of bricks and a pound of feathers weigh one pound. The difference lies in the volume and density of the materials, not the weight.'),
# 'parsing_error': None
# }
.. dropdown:: Example: schema=None, method="json_mode", include_raw=True
.. code-block::
structured_llm = llm.with_structured_output(method="json_mode", include_raw=True)
structured_llm.invoke(
"Answer the following question. "
"Make sure to return a JSON blob with keys 'answer' and 'justification'.\\n\\n"
"What's heavier a pound of bricks or a pound of feathers?"
)
# -> {
# 'raw': AIMessage(content='{\\n "answer": "They are both the same weight.",\\n "justification": "Both a pound of bricks and a pound of feathers weigh one pound. The difference lies in the volume and density of the materials, not the weight." \\n}'),
# 'parsed': {
# 'answer': 'They are both the same weight.',
# 'justification': 'Both a pound of bricks and a pound of feathers weigh one pound. The difference lies in the volume and density of the materials, not the weight.'
# },
# 'parsing_error': None
# }
""" # noqa: E501
if kwargs:
raise ValueError(f"Received unsupported arguments {kwargs}")
if strict is not None and method == "json_mode":
raise ValueError(
"Argument `strict` is not supported with `method`='json_mode'"
)
is_pydantic_schema = _is_pydantic_class(schema)
if method == "function_calling":
if schema is None:
raise ValueError(
"schema must be specified when method is not 'json_mode'. "
"Received None."
)
tool_name = convert_to_openai_tool(schema)["function"]["name"]
bind_kwargs = self._filter_disabled_params(
tool_choice=tool_name, parallel_tool_calls=False, strict=strict
)
llm = self.bind_tools([schema], **bind_kwargs)
if is_pydantic_schema:
output_parser: Runnable = PydanticToolsParser(
tools=[schema], # type: ignore[list-item]
first_tool_only=True, # type: ignore[list-item]
)
else:
output_parser = JsonOutputKeyToolsParser(
key_name=tool_name, first_tool_only=True
)
elif method == "json_mode":
llm = self.bind(response_format={"type": "json_object"})
output_parser = (
PydanticOutputParser(pydantic_object=schema) # type: ignore[arg-type]
if is_pydantic_schema
else JsonOutputParser()
)
elif method == "json_schema":
if schema is None:
raise ValueError(
"schema must be specified when method is not 'json_mode'. "
"Received None."
)
strict = strict if strict is not None else True
response_format = _convert_to_openai_response_format(schema, strict=strict)
llm = self.bind(response_format=response_format)
if is_pydantic_schema:
output_parser = _oai_structured_outputs_parser.with_types(
output_type=cast(type, schema)
)
else:
output_parser = JsonOutputParser()
else:
raise ValueError(
f"Unrecognized method argument. Expected one of 'function_calling' or "
f"'json_mode'. Received: '{method}'"
)
if include_raw:
parser_assign = RunnablePassthrough.assign(
parsed=itemgetter("raw") | output_parser, parsing_error=lambda _: None
)
parser_none = RunnablePassthrough.assign(parsed=lambda _: None)
parser_with_fallback = parser_assign.with_fallbacks(
[parser_none], exception_key="parsing_error"
)
return RunnableMap(raw=llm) | parser_with_fallback
else:
return llm | output_parser
def _filter_disabled_params(self, **kwargs: Any) -> Dict[str, Any]:
if not self.disabled_params:
return kwargs
filtered = {}
for k, v in kwargs.items():
# Skip param
if k in self.disabled_params and (
self.disabled_params[k] is None or v in self.disabled_params[k]
):
continue
# Keep param
else:
filtered[k] = v
return filtered
class ChatOpenAI(BaseChatOpenAI):
"""
| |
156996
|
from __future__ import annotations
import logging
from typing import Any, Callable, Dict, List, Mapping, Optional, Union
import openai
from langchain_core.language_models import LangSmithParams
from langchain_core.utils import from_env, secret_from_env
from pydantic import Field, SecretStr, model_validator
from typing_extensions import Self, cast
from langchain_openai.llms.base import BaseOpenAI
logger = logging.getLogger(__name__)
class AzureOpenAI(BaseOpenAI):
"""Azure-specific OpenAI large language models.
To use, you should have the ``openai`` python package installed, and the
environment variable ``OPENAI_API_KEY`` set with your API key.
Any parameters that are valid to be passed to the openai.create call can be passed
in, even if not explicitly saved on this class.
Example:
.. code-block:: python
from langchain_openai import AzureOpenAI
openai = AzureOpenAI(model_name="gpt-3.5-turbo-instruct")
"""
azure_endpoint: Optional[str] = Field(
default_factory=from_env("AZURE_OPENAI_ENDPOINT", default=None)
)
"""Your Azure endpoint, including the resource.
Automatically inferred from env var `AZURE_OPENAI_ENDPOINT` if not provided.
Example: `https://example-resource.azure.openai.com/`
"""
deployment_name: Union[str, None] = Field(default=None, alias="azure_deployment")
"""A model deployment.
If given sets the base client URL to include `/deployments/{azure_deployment}`.
Note: this means you won't be able to use non-deployment endpoints.
"""
openai_api_version: Optional[str] = Field(
alias="api_version",
default_factory=from_env("OPENAI_API_VERSION", default=None),
)
"""Automatically inferred from env var `OPENAI_API_VERSION` if not provided."""
# Check OPENAI_KEY for backwards compatibility.
# TODO: Remove OPENAI_API_KEY support to avoid possible conflict when using
# other forms of azure credentials.
openai_api_key: Optional[SecretStr] = Field(
alias="api_key",
default_factory=secret_from_env(
["AZURE_OPENAI_API_KEY", "OPENAI_API_KEY"], default=None
),
)
azure_ad_token: Optional[SecretStr] = Field(
default_factory=secret_from_env("AZURE_OPENAI_AD_TOKEN", default=None)
)
"""Your Azure Active Directory token.
Automatically inferred from env var `AZURE_OPENAI_AD_TOKEN` if not provided.
For more:
https://www.microsoft.com/en-us/security/business/identity-access/microsoft-entra-id.
"""
azure_ad_token_provider: Union[Callable[[], str], None] = None
"""A function that returns an Azure Active Directory token.
Will be invoked on every request.
"""
openai_api_type: Optional[str] = Field(
default_factory=from_env("OPENAI_API_TYPE", default="azure")
)
"""Legacy, for openai<1.0.0 support."""
validate_base_url: bool = True
"""For backwards compatibility. If legacy val openai_api_base is passed in, try to
infer if it is a base_url or azure_endpoint and update accordingly.
"""
@classmethod
def get_lc_namespace(cls) -> List[str]:
"""Get the namespace of the langchain object."""
return ["langchain", "llms", "openai"]
@property
def lc_secrets(self) -> Dict[str, str]:
return {
"openai_api_key": "AZURE_OPENAI_API_KEY",
"azure_ad_token": "AZURE_OPENAI_AD_TOKEN",
}
@classmethod
def is_lc_serializable(cls) -> bool:
"""Return whether this model can be serialized by Langchain."""
return True
@model_validator(mode="after")
def validate_environment(self) -> Self:
"""Validate that api key and python package exists in environment."""
if self.n < 1:
raise ValueError("n must be at least 1.")
if self.streaming and self.n > 1:
raise ValueError("Cannot stream results when n > 1.")
if self.streaming and self.best_of > 1:
raise ValueError("Cannot stream results when best_of > 1.")
# For backwards compatibility. Before openai v1, no distinction was made
# between azure_endpoint and base_url (openai_api_base).
openai_api_base = self.openai_api_base
if openai_api_base and self.validate_base_url:
if "/openai" not in openai_api_base:
self.openai_api_base = (
cast(str, self.openai_api_base).rstrip("/") + "/openai"
)
raise ValueError(
"As of openai>=1.0.0, Azure endpoints should be specified via "
"the `azure_endpoint` param not `openai_api_base` "
"(or alias `base_url`)."
)
if self.deployment_name:
raise ValueError(
"As of openai>=1.0.0, if `deployment_name` (or alias "
"`azure_deployment`) is specified then "
"`openai_api_base` (or alias `base_url`) should not be. "
"Instead use `deployment_name` (or alias `azure_deployment`) "
"and `azure_endpoint`."
)
self.deployment_name = None
client_params: dict = {
"api_version": self.openai_api_version,
"azure_endpoint": self.azure_endpoint,
"azure_deployment": self.deployment_name,
"api_key": self.openai_api_key.get_secret_value()
if self.openai_api_key
else None,
"azure_ad_token": self.azure_ad_token.get_secret_value()
if self.azure_ad_token
else None,
"azure_ad_token_provider": self.azure_ad_token_provider,
"organization": self.openai_organization,
"base_url": self.openai_api_base,
"timeout": self.request_timeout,
"max_retries": self.max_retries,
"default_headers": self.default_headers,
"default_query": self.default_query,
}
if not self.client:
sync_specific = {"http_client": self.http_client}
self.client = openai.AzureOpenAI(
**client_params,
**sync_specific, # type: ignore[arg-type]
).completions
if not self.async_client:
async_specific = {"http_client": self.http_async_client}
self.async_client = openai.AsyncAzureOpenAI(
**client_params,
**async_specific, # type: ignore[arg-type]
).completions
return self
@property
def _identifying_params(self) -> Mapping[str, Any]:
return {
**{"deployment_name": self.deployment_name},
**super()._identifying_params,
}
@property
def _invocation_params(self) -> Dict[str, Any]:
openai_params = {"model": self.deployment_name}
return {**openai_params, **super()._invocation_params}
def _get_ls_params(
self, stop: Optional[List[str]] = None, **kwargs: Any
) -> LangSmithParams:
"""Get standard params for tracing."""
params = super()._get_ls_params(stop=stop, **kwargs)
invocation_params = self._invocation_params
params["ls_provider"] = "azure"
if model_name := invocation_params.get("model"):
params["ls_model_name"] = model_name
return params
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "azure"
@property
def lc_attributes(self) -> Dict[str, Any]:
return {
"openai_api_type": self.openai_api_type,
"openai_api_version": self.openai_api_version,
}
| |
156999
|
class BaseOpenAI(BaseLLM):
"""Base OpenAI large language model class."""
client: Any = Field(default=None, exclude=True) #: :meta private:
async_client: Any = Field(default=None, exclude=True) #: :meta private:
model_name: str = Field(default="gpt-3.5-turbo-instruct", alias="model")
"""Model name to use."""
temperature: float = 0.7
"""What sampling temperature to use."""
max_tokens: int = 256
"""The maximum number of tokens to generate in the completion.
-1 returns as many tokens as possible given the prompt and
the models maximal context size."""
top_p: float = 1
"""Total probability mass of tokens to consider at each step."""
frequency_penalty: float = 0
"""Penalizes repeated tokens according to frequency."""
presence_penalty: float = 0
"""Penalizes repeated tokens."""
n: int = 1
"""How many completions to generate for each prompt."""
best_of: int = 1
"""Generates best_of completions server-side and returns the "best"."""
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
"""Holds any model parameters valid for `create` call not explicitly specified."""
openai_api_key: Optional[SecretStr] = Field(
alias="api_key", default_factory=secret_from_env("OPENAI_API_KEY", default=None)
)
"""Automatically inferred from env var `OPENAI_API_KEY` if not provided."""
openai_api_base: Optional[str] = Field(
alias="base_url", default_factory=from_env("OPENAI_API_BASE", default=None)
)
"""Base URL path for API requests, leave blank if not using a proxy or service
emulator."""
openai_organization: Optional[str] = Field(
alias="organization",
default_factory=from_env(
["OPENAI_ORG_ID", "OPENAI_ORGANIZATION"], default=None
),
)
"""Automatically inferred from env var `OPENAI_ORG_ID` if not provided."""
# to support explicit proxy for OpenAI
openai_proxy: Optional[str] = Field(
default_factory=from_env("OPENAI_PROXY", default=None)
)
batch_size: int = 20
"""Batch size to use when passing multiple documents to generate."""
request_timeout: Union[float, Tuple[float, float], Any, None] = Field(
default=None, alias="timeout"
)
"""Timeout for requests to OpenAI completion API. Can be float, httpx.Timeout or
None."""
logit_bias: Optional[Dict[str, float]] = Field(default_factory=dict)
"""Adjust the probability of specific tokens being generated."""
max_retries: int = 2
"""Maximum number of retries to make when generating."""
seed: Optional[int] = None
"""Seed for generation"""
logprobs: Optional[int] = None
"""Include the log probabilities on the logprobs most likely output tokens,
as well the chosen tokens."""
streaming: bool = False
"""Whether to stream the results or not."""
allowed_special: Union[Literal["all"], AbstractSet[str]] = set()
"""Set of special tokens that are allowed。"""
disallowed_special: Union[Literal["all"], Collection[str]] = "all"
"""Set of special tokens that are not allowed。"""
tiktoken_model_name: Optional[str] = None
"""The model name to pass to tiktoken when using this class.
Tiktoken is used to count the number of tokens in documents to constrain
them to be under a certain limit. By default, when set to None, this will
be the same as the embedding model name. However, there are some cases
where you may want to use this Embedding class with a model name not
supported by tiktoken. This can include when using Azure embeddings or
when using one of the many model providers that expose an OpenAI-like
API but with different models. In those cases, in order to avoid erroring
when tiktoken is called, you can specify a model name to use here."""
default_headers: Union[Mapping[str, str], None] = None
default_query: Union[Mapping[str, object], None] = None
# Configure a custom httpx client. See the
# [httpx documentation](https://www.python-httpx.org/api/#client) for more details.
http_client: Union[Any, None] = None
"""Optional httpx.Client. Only used for sync invocations. Must specify
http_async_client as well if you'd like a custom client for async invocations.
"""
http_async_client: Union[Any, None] = None
"""Optional httpx.AsyncClient. Only used for async invocations. Must specify
http_client as well if you'd like a custom client for sync invocations."""
extra_body: Optional[Mapping[str, Any]] = None
"""Optional additional JSON properties to include in the request parameters when
making requests to OpenAI compatible APIs, such as vLLM."""
model_config = ConfigDict(populate_by_name=True)
@model_validator(mode="before")
@classmethod
def build_extra(cls, values: Dict[str, Any]) -> Any:
"""Build extra kwargs from additional params that were passed in."""
all_required_field_names = get_pydantic_field_names(cls)
values = _build_model_kwargs(values, all_required_field_names)
return values
@model_validator(mode="after")
def validate_environment(self) -> Self:
"""Validate that api key and python package exists in environment."""
if self.n < 1:
raise ValueError("n must be at least 1.")
if self.streaming and self.n > 1:
raise ValueError("Cannot stream results when n > 1.")
if self.streaming and self.best_of > 1:
raise ValueError("Cannot stream results when best_of > 1.")
client_params: dict = {
"api_key": (
self.openai_api_key.get_secret_value() if self.openai_api_key else None
),
"organization": self.openai_organization,
"base_url": self.openai_api_base,
"timeout": self.request_timeout,
"max_retries": self.max_retries,
"default_headers": self.default_headers,
"default_query": self.default_query,
}
if not self.client:
sync_specific = {"http_client": self.http_client}
self.client = openai.OpenAI(**client_params, **sync_specific).completions # type: ignore[arg-type]
if not self.async_client:
async_specific = {"http_client": self.http_async_client}
self.async_client = openai.AsyncOpenAI(
**client_params,
**async_specific, # type: ignore[arg-type]
).completions
return self
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the default parameters for calling OpenAI API."""
normal_params: Dict[str, Any] = {
"temperature": self.temperature,
"top_p": self.top_p,
"frequency_penalty": self.frequency_penalty,
"presence_penalty": self.presence_penalty,
"n": self.n,
"logit_bias": self.logit_bias,
"seed": self.seed,
"logprobs": self.logprobs,
}
if self.max_tokens is not None:
normal_params["max_tokens"] = self.max_tokens
if self.extra_body is not None:
normal_params["extra_body"] = self.extra_body
# Azure gpt-35-turbo doesn't support best_of
# don't specify best_of if it is 1
if self.best_of > 1:
normal_params["best_of"] = self.best_of
return {**normal_params, **self.model_kwargs}
def _stream(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[GenerationChunk]:
params = {**self._invocation_params, **kwargs, "stream": True}
self.get_sub_prompts(params, [prompt], stop) # this mutates params
for stream_resp in self.client.create(prompt=prompt, **params):
if not isinstance(stream_resp, dict):
stream_resp = stream_resp.model_dump()
chunk = _stream_response_to_generation_chunk(stream_resp)
if run_manager:
run_manager.on_llm_new_token(
chunk.text,
chunk=chunk,
verbose=self.verbose,
logprobs=(
chunk.generation_info["logprobs"]
if chunk.generation_info
else None
),
)
yield chunk
| |
157006
|
# langchain-couchbase
This package contains the LangChain integration with Couchbase
## Installation
```bash
pip install -U langchain-couchbase
```
## Usage
The `CouchbaseVectorStore` class exposes the connection to the Couchbase vector store.
```python
from langchain_couchbase.vectorstores import CouchbaseVectorStore
from couchbase.cluster import Cluster
from couchbase.auth import PasswordAuthenticator
from couchbase.options import ClusterOptions
from datetime import timedelta
auth = PasswordAuthenticator(username, password)
options = ClusterOptions(auth)
connect_string = "couchbases://localhost"
cluster = Cluster(connect_string, options)
# Wait until the cluster is ready for use.
cluster.wait_until_ready(timedelta(seconds=5))
embeddings = OpenAIEmbeddings()
vectorstore = CouchbaseVectorStore(
cluster=cluster,
bucket_name="",
scope_name="",
collection_name="",
embedding=embeddings,
index_name="vector-search-index",
)
```
| |
157103
|
"""Unit tests for chat models."""
import os
from abc import abstractmethod
from typing import Any, Dict, List, Literal, Optional, Tuple, Type
from unittest import mock
import pytest
from langchain_core.language_models import BaseChatModel
from langchain_core.load import dumpd, load
from langchain_core.runnables import RunnableBinding
from langchain_core.tools import tool
from pydantic import BaseModel, Field, SecretStr
from pydantic.v1 import (
BaseModel as BaseModelV1,
)
from pydantic.v1 import (
Field as FieldV1,
)
from pydantic.v1 import (
ValidationError as ValidationErrorV1,
)
from syrupy import SnapshotAssertion
from langchain_standard_tests.base import BaseStandardTests
from langchain_standard_tests.utils.pydantic import PYDANTIC_MAJOR_VERSION
class Person(BaseModel): # Used by some dependent tests. Should be deprecated.
"""Record attributes of a person."""
name: str = Field(..., description="The name of the person.")
age: int = Field(..., description="The age of the person.")
def generate_schema_pydantic_v1_from_2() -> Any:
"""Use to generate a schema from v1 namespace in pydantic 2."""
if PYDANTIC_MAJOR_VERSION != 2:
raise AssertionError("This function is only compatible with Pydantic v2.")
class PersonB(BaseModelV1):
"""Record attributes of a person."""
name: str = FieldV1(..., description="The name of the person.")
age: int = FieldV1(..., description="The age of the person.")
return PersonB
def generate_schema_pydantic() -> Any:
"""Works with either pydantic 1 or 2"""
class PersonA(BaseModel):
"""Record attributes of a person."""
name: str = Field(..., description="The name of the person.")
age: int = Field(..., description="The age of the person.")
return PersonA
TEST_PYDANTIC_MODELS = [generate_schema_pydantic()]
if PYDANTIC_MAJOR_VERSION == 2:
TEST_PYDANTIC_MODELS.append(generate_schema_pydantic_v1_from_2())
@tool
def my_adder_tool(a: int, b: int) -> int:
"""Takes two integers, a and b, and returns their sum."""
return a + b
def my_adder(a: int, b: int) -> int:
"""Takes two integers, a and b, and returns their sum."""
return a + b
class ChatModelTests(BaseStandardTests):
@property
@abstractmethod
def chat_model_class(self) -> Type[BaseChatModel]:
...
@property
def chat_model_params(self) -> dict:
return {}
@property
def standard_chat_model_params(self) -> dict:
return {
"temperature": 0,
"max_tokens": 100,
"timeout": 60,
"stop": [],
"max_retries": 2,
}
@pytest.fixture
def model(self) -> BaseChatModel:
return self.chat_model_class(
**{**self.standard_chat_model_params, **self.chat_model_params}
)
@property
def has_tool_calling(self) -> bool:
return self.chat_model_class.bind_tools is not BaseChatModel.bind_tools
@property
def tool_choice_value(self) -> Optional[str]:
"""Value to use for tool choice when used in tests."""
return None
@property
def has_structured_output(self) -> bool:
return (
self.chat_model_class.with_structured_output
is not BaseChatModel.with_structured_output
)
@property
def supports_image_inputs(self) -> bool:
return False
@property
def supports_video_inputs(self) -> bool:
return False
@property
def returns_usage_metadata(self) -> bool:
return True
@property
def supports_anthropic_inputs(self) -> bool:
return False
@property
def supports_image_tool_message(self) -> bool:
return False
@property
def supported_usage_metadata_details(
self,
) -> Dict[
Literal["invoke", "stream"],
List[
Literal[
"audio_input",
"audio_output",
"reasoning_output",
"cache_read_input",
"cache_creation_input",
]
],
]:
return {"invoke": [], "stream": []}
class ChatModelUnitTests(ChatModelTests):
@property
def standard_chat_model_params(self) -> dict:
params = super().standard_chat_model_params
params["api_key"] = "test"
return params
@property
def init_from_env_params(self) -> Tuple[dict, dict, dict]:
"""Return env vars, init args, and expected instance attrs for initializing
from env vars."""
return {}, {}, {}
def test_init(self) -> None:
model = self.chat_model_class(
**{**self.standard_chat_model_params, **self.chat_model_params}
)
assert model is not None
def test_init_from_env(self) -> None:
env_params, model_params, expected_attrs = self.init_from_env_params
if env_params:
with mock.patch.dict(os.environ, env_params):
model = self.chat_model_class(**model_params)
assert model is not None
for k, expected in expected_attrs.items():
actual = getattr(model, k)
if isinstance(actual, SecretStr):
actual = actual.get_secret_value()
assert actual == expected
def test_init_streaming(
self,
) -> None:
model = self.chat_model_class(
**{
**self.standard_chat_model_params,
**self.chat_model_params,
"streaming": True,
}
)
assert model is not None
def test_bind_tool_pydantic(
self,
model: BaseChatModel,
) -> None:
if not self.has_tool_calling:
return
tools = [my_adder_tool, my_adder]
for pydantic_model in TEST_PYDANTIC_MODELS:
model_schema = (
pydantic_model.model_json_schema()
if hasattr(pydantic_model, "model_json_schema")
else pydantic_model.schema()
)
tools.extend([pydantic_model, model_schema])
# Doing a mypy ignore here since some of the tools are from pydantic
# BaseModel 2 which isn't typed properly yet. This will need to be fixed
# so type checking does not become annoying to users.
tool_model = model.bind_tools(tools, tool_choice="any") # type: ignore
assert isinstance(tool_model, RunnableBinding)
@pytest.mark.parametrize("schema", TEST_PYDANTIC_MODELS)
def test_with_structured_output(
self,
model: BaseChatModel,
schema: Any,
) -> None:
if not self.has_structured_output:
return
assert model.with_structured_output(schema) is not None
def test_standard_params(self, model: BaseChatModel) -> None:
class ExpectedParams(BaseModelV1):
ls_provider: str
ls_model_name: str
ls_model_type: Literal["chat"]
ls_temperature: Optional[float]
ls_max_tokens: Optional[int]
ls_stop: Optional[List[str]]
ls_params = model._get_ls_params()
try:
ExpectedParams(**ls_params)
except ValidationErrorV1 as e:
pytest.fail(f"Validation error: {e}")
# Test optional params
model = self.chat_model_class(
max_tokens=10, stop=["test"], **self.chat_model_params
)
ls_params = model._get_ls_params()
try:
ExpectedParams(**ls_params)
except ValidationErrorV1 as e:
pytest.fail(f"Validation error: {e}")
def test_serdes(self, model: BaseChatModel, snapshot: SnapshotAssertion) -> None:
if not self.chat_model_class.is_lc_serializable():
return
env_params, model_params, expected_attrs = self.init_from_env_params
with mock.patch.dict(os.environ, env_params):
ser = dumpd(model)
assert ser == snapshot(name="serialized")
assert model.dict() == load(dumpd(model)).dict()
| |
157233
|
from langchain_community.chat_models import ChatDeepInfra
def test_deepinfra_model_name_param() -> None:
llm = ChatDeepInfra(model_name="foo") # type: ignore[call-arg]
assert llm.model_name == "foo"
def test_deepinfra_model_param() -> None:
llm = ChatDeepInfra(model="foo")
assert llm.model_name == "foo"
| |
157244
|
from langchain_community.chat_models import __all__, _module_lookup
EXPECTED_ALL = [
"AzureChatOpenAI",
"BedrockChat",
"ChatAnthropic",
"ChatAnyscale",
"ChatBaichuan",
"ChatCohere",
"ChatCoze",
"ChatDatabricks",
"ChatDeepInfra",
"ChatEverlyAI",
"ChatEdenAI",
"ChatFireworks",
"ChatFriendli",
"ChatGooglePalm",
"ChatHuggingFace",
"ChatHunyuan",
"ChatJavelinAIGateway",
"ChatKinetica",
"ChatKonko",
"ChatLiteLLM",
"ChatLiteLLMRouter",
"ChatLlamaCpp",
"ChatMLflowAIGateway",
"ChatMaritalk",
"ChatMlflow",
"ChatMLflowAIGateway",
"ChatMLX",
"ChatNebula",
"ChatOCIGenAI",
"ChatOCIModelDeployment",
"ChatOCIModelDeploymentVLLM",
"ChatOCIModelDeploymentTGI",
"ChatOllama",
"ChatOpenAI",
"ChatPerplexity",
"ChatPremAI",
"ChatSambaNovaCloud",
"ChatSambaStudio",
"ChatSparkLLM",
"ChatTongyi",
"ChatVertexAI",
"ChatYandexGPT",
"ChatYuan2",
"ChatZhipuAI",
"ErnieBotChat",
"FakeListChatModel",
"GPTRouter",
"GigaChat",
"HumanInputChatModel",
"JinaChat",
"LlamaEdgeChatService",
"MiniMaxChat",
"MoonshotChat",
"PaiEasChatEndpoint",
"PromptLayerChatOpenAI",
"SolarChat",
"QianfanChatEndpoint",
"VolcEngineMaasChat",
"ChatOctoAI",
"ChatSnowflakeCortex",
"ChatYi",
]
def test_all_imports() -> None:
assert set(__all__) == set(EXPECTED_ALL)
assert set(__all__) == set(_module_lookup.keys())
| |
157248
|
import json
import os
from unittest import mock
import pytest
from langchain_community.chat_models.azure_openai import AzureChatOpenAI
@mock.patch.dict(
os.environ,
{
"OPENAI_API_KEY": "test",
"OPENAI_API_BASE": "https://oai.azure.com/",
"OPENAI_API_VERSION": "2023-05-01",
},
)
@pytest.mark.requires("openai")
@pytest.mark.parametrize(
"model_name", ["gpt-4", "gpt-4-32k", "gpt-35-turbo", "gpt-35-turbo-16k"]
)
def test_model_name_set_on_chat_result_when_present_in_response(
model_name: str,
) -> None:
sample_response_text = f"""
{{
"id": "chatcmpl-7ryweq7yc8463fas879t9hdkkdf",
"object": "chat.completion",
"created": 1690381189,
"model": "{model_name}",
"choices": [
{{
"index": 0,
"finish_reason": "stop",
"message": {{
"role": "assistant",
"content": "I'm an AI assistant that can help you."
}}
}}
],
"usage": {{
"completion_tokens": 28,
"prompt_tokens": 15,
"total_tokens": 43
}}
}}
"""
# convert sample_response_text to instance of Mapping[str, Any]
sample_response = json.loads(sample_response_text)
mock_chat = AzureChatOpenAI()
chat_result = mock_chat._create_chat_result(sample_response)
assert (
chat_result.llm_output is not None
and chat_result.llm_output["model_name"] == model_name
)
| |
157269
|
from langchain_community.agent_toolkits import SQLDatabaseToolkit, create_sql_agent
from langchain_community.utilities.sql_database import SQLDatabase
from tests.unit_tests.llms.fake_llm import FakeLLM
def test_create_sql_agent() -> None:
db = SQLDatabase.from_uri("sqlite:///:memory:")
queries = {"foo": "Final Answer: baz"}
llm = FakeLLM(queries=queries, sequential_responses=True)
toolkit = SQLDatabaseToolkit(db=db, llm=llm)
agent_executor = create_sql_agent(
llm=llm,
toolkit=toolkit,
)
assert agent_executor.run("hello") == "baz"
| |
157308
|
"""Test FAISS functionality."""
import datetime
import math
import tempfile
from typing import Union
import pytest
from langchain_core.documents import Document
from langchain_community.docstore.base import Docstore
from langchain_community.docstore.in_memory import InMemoryDocstore
from langchain_community.vectorstores.faiss import FAISS
from langchain_community.vectorstores.utils import DistanceStrategy
from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings
_PAGE_CONTENT = """This is a page about LangChain.
It is a really cool framework.
What isn't there to love about langchain?
Made in 2022."""
class FakeDocstore(Docstore):
"""Fake docstore for testing purposes."""
def search(self, search: str) -> Union[str, Document]:
"""Return the fake document."""
document = Document(page_content=_PAGE_CONTENT)
return document
@pytest.mark.requires("faiss")
def test_faiss() -> None:
"""Test end to end construction and search."""
texts = ["foo", "bar", "baz"]
docsearch = FAISS.from_texts(texts, FakeEmbeddings())
index_to_id = docsearch.index_to_docstore_id
expected_docstore = InMemoryDocstore(
{
index_to_id[0]: Document(page_content="foo"),
index_to_id[1]: Document(page_content="bar"),
index_to_id[2]: Document(page_content="baz"),
}
)
assert docsearch.docstore.__dict__ == expected_docstore.__dict__
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo")]
# Retriever standard params
retriever = docsearch.as_retriever()
ls_params = retriever._get_ls_params()
assert ls_params == {
"ls_retriever_name": "vectorstore",
"ls_vector_store_provider": "FAISS",
"ls_embedding_provider": "FakeEmbeddings",
}
@pytest.mark.requires("faiss")
async def test_faiss_afrom_texts() -> None:
"""Test end to end construction and search."""
texts = ["foo", "bar", "baz"]
docsearch = await FAISS.afrom_texts(texts, FakeEmbeddings())
index_to_id = docsearch.index_to_docstore_id
expected_docstore = InMemoryDocstore(
{
index_to_id[0]: Document(page_content="foo"),
index_to_id[1]: Document(page_content="bar"),
index_to_id[2]: Document(page_content="baz"),
}
)
assert docsearch.docstore.__dict__ == expected_docstore.__dict__
output = await docsearch.asimilarity_search("foo", k=1)
assert output == [Document(page_content="foo")]
@pytest.mark.requires("faiss")
def test_faiss_vector_sim() -> None:
"""Test vector similarity."""
texts = ["foo", "bar", "baz"]
docsearch = FAISS.from_texts(texts, FakeEmbeddings())
index_to_id = docsearch.index_to_docstore_id
expected_docstore = InMemoryDocstore(
{
index_to_id[0]: Document(page_content="foo"),
index_to_id[1]: Document(page_content="bar"),
index_to_id[2]: Document(page_content="baz"),
}
)
assert docsearch.docstore.__dict__ == expected_docstore.__dict__
query_vec = FakeEmbeddings().embed_query(text="foo")
output = docsearch.similarity_search_by_vector(query_vec, k=1)
assert output == [Document(page_content="foo")]
@pytest.mark.requires("faiss")
async def test_faiss_async_vector_sim() -> None:
"""Test vector similarity."""
texts = ["foo", "bar", "baz"]
docsearch = await FAISS.afrom_texts(texts, FakeEmbeddings())
index_to_id = docsearch.index_to_docstore_id
expected_docstore = InMemoryDocstore(
{
index_to_id[0]: Document(page_content="foo"),
index_to_id[1]: Document(page_content="bar"),
index_to_id[2]: Document(page_content="baz"),
}
)
assert docsearch.docstore.__dict__ == expected_docstore.__dict__
query_vec = await FakeEmbeddings().aembed_query(text="foo")
output = await docsearch.asimilarity_search_by_vector(query_vec, k=1)
assert output == [Document(page_content="foo")]
@pytest.mark.requires("faiss")
def test_faiss_vector_sim_with_score_threshold() -> None:
"""Test vector similarity."""
texts = ["foo", "bar", "baz"]
docsearch = FAISS.from_texts(texts, FakeEmbeddings())
index_to_id = docsearch.index_to_docstore_id
expected_docstore = InMemoryDocstore(
{
index_to_id[0]: Document(page_content="foo"),
index_to_id[1]: Document(page_content="bar"),
index_to_id[2]: Document(page_content="baz"),
}
)
assert docsearch.docstore.__dict__ == expected_docstore.__dict__
query_vec = FakeEmbeddings().embed_query(text="foo")
output = docsearch.similarity_search_by_vector(query_vec, k=2, score_threshold=0.2)
assert output == [Document(page_content="foo")]
@pytest.mark.requires("faiss")
async def test_faiss_vector_async_sim_with_score_threshold() -> None:
"""Test vector similarity."""
texts = ["foo", "bar", "baz"]
docsearch = await FAISS.afrom_texts(texts, FakeEmbeddings())
index_to_id = docsearch.index_to_docstore_id
expected_docstore = InMemoryDocstore(
{
index_to_id[0]: Document(page_content="foo"),
index_to_id[1]: Document(page_content="bar"),
index_to_id[2]: Document(page_content="baz"),
}
)
assert docsearch.docstore.__dict__ == expected_docstore.__dict__
query_vec = await FakeEmbeddings().aembed_query(text="foo")
output = await docsearch.asimilarity_search_by_vector(
query_vec, k=2, score_threshold=0.2
)
assert output == [Document(page_content="foo")]
@pytest.mark.requires("faiss")
def test_similarity_search_with_score_by_vector() -> None:
"""Test vector similarity with score by vector."""
texts = ["foo", "bar", "baz"]
docsearch = FAISS.from_texts(texts, FakeEmbeddings())
index_to_id = docsearch.index_to_docstore_id
expected_docstore = InMemoryDocstore(
{
index_to_id[0]: Document(page_content="foo"),
index_to_id[1]: Document(page_content="bar"),
index_to_id[2]: Document(page_content="baz"),
}
)
assert docsearch.docstore.__dict__ == expected_docstore.__dict__
query_vec = FakeEmbeddings().embed_query(text="foo")
output = docsearch.similarity_search_with_score_by_vector(query_vec, k=1)
assert len(output) == 1
assert output[0][0] == Document(page_content="foo")
@pytest.mark.requires("faiss")
async def test_similarity_async_search_with_score_by_vector() -> None:
"""Test vector similarity with score by vector."""
texts = ["foo", "bar", "baz"]
docsearch = await FAISS.afrom_texts(texts, FakeEmbeddings())
index_to_id = docsearch.index_to_docstore_id
expected_docstore = InMemoryDocstore(
{
index_to_id[0]: Document(page_content="foo"),
index_to_id[1]: Document(page_content="bar"),
index_to_id[2]: Document(page_content="baz"),
}
)
assert docsearch.docstore.__dict__ == expected_docstore.__dict__
query_vec = await FakeEmbeddings().aembed_query(text="foo")
output = await docsearch.asimilarity_search_with_score_by_vector(query_vec, k=1)
assert len(output) == 1
assert output[0][0] == Document(page_content="foo")
| |
157398
|
@pytest.mark.parametrize(
"patch_func,patch_func_value,kwargs",
(
# JSON content.
(
"pathlib.Path.read_text",
'[{"text": "value1"}, {"text": "value2"}]',
{"jq_schema": ".[]", "content_key": "text"},
),
# JSON Lines content.
(
"pathlib.Path.open",
io.StringIO(
"""
{"text": "value1"}
{"text": "value2"}
"""
),
{"jq_schema": ".", "content_key": "text", "json_lines": True},
),
),
)
def test_json_meta_02(
patch_func: str, patch_func_value: Any, kwargs: Dict, mocker: MockerFixture
) -> None:
mocker.patch("builtins.open", mocker.mock_open())
mocker.patch(patch_func, return_value=patch_func_value)
file_path = "/workspaces/langchain/test.json"
expected_docs = [
Document(
page_content="value1",
metadata={"source": file_path, "seq_num": 1, "x": "value1-meta"},
),
Document(
page_content="value2",
metadata={"source": file_path, "seq_num": 2, "x": "value2-meta"},
),
]
def metadata_func(record: Dict, metadata: Dict) -> Dict:
return {**metadata, "x": f"{record['text']}-meta"}
loader = JSONLoader(file_path=file_path, metadata_func=metadata_func, **kwargs)
result = loader.load()
assert result == expected_docs
@pytest.mark.parametrize(
"params",
(
{"jq_schema": ".[].text"},
{"jq_schema": ".[]", "content_key": "text"},
{
"jq_schema": ".[]",
"content_key": ".text",
"is_content_key_jq_parsable": True,
},
),
)
def test_load_json_with_jq_parsable_content_key(
params: Dict, mocker: MockerFixture
) -> None:
file_path = "/workspaces/langchain/test.json"
expected_docs = [
Document(
page_content="value1",
metadata={"source": file_path, "seq_num": 1},
),
Document(
page_content="value2",
metadata={"source": file_path, "seq_num": 2},
),
]
mocker.patch(
"pathlib.Path.open",
return_value=io.StringIO(
"""
[{"text": "value1"}, {"text": "value2"}]
"""
),
)
loader = JSONLoader(file_path=file_path, json_lines=True, **params)
result = loader.load()
assert result == expected_docs
def test_load_json_with_nested_jq_parsable_content_key(mocker: MockerFixture) -> None:
file_path = "/workspaces/langchain/test.json"
expected_docs = [
Document(
page_content="message1",
metadata={"source": file_path, "seq_num": 1},
),
Document(
page_content="message2",
metadata={"source": file_path, "seq_num": 2},
),
]
mocker.patch(
"pathlib.Path.open",
return_value=io.StringIO(
"""
{"data": [
{"attributes": {"message": "message1","tags": ["tag1"]},"id": "1"},
{"attributes": {"message": "message2","tags": ["tag2"]},"id": "2"}]}
"""
),
)
loader = JSONLoader(
file_path=file_path,
jq_schema=".data[]",
content_key=".attributes.message",
is_content_key_jq_parsable=True,
)
result = loader.load()
assert result == expected_docs
def test_load_json_with_nested_jq_parsable_content_key_with_metadata(
mocker: MockerFixture,
) -> None:
file_path = "/workspaces/langchain/test.json"
expected_docs = [
Document(
page_content="message1",
metadata={"source": file_path, "seq_num": 1, "id": "1", "tags": ["tag1"]},
),
Document(
page_content="message2",
metadata={"source": file_path, "seq_num": 2, "id": "2", "tags": ["tag2"]},
),
]
mocker.patch(
"pathlib.Path.open",
return_value=io.StringIO(
"""
{"data": [
{"attributes": {"message": "message1","tags": ["tag1"]},"id": "1"},
{"attributes": {"message": "message2","tags": ["tag2"]},"id": "2"}]}
"""
),
)
def _metadata_func(record: dict, metadata: dict) -> dict:
metadata["id"] = record.get("id")
metadata["tags"] = record["attributes"].get("tags")
return metadata
loader = JSONLoader(
file_path=file_path,
jq_schema=".data[]",
content_key=".attributes.message",
is_content_key_jq_parsable=True,
metadata_func=_metadata_func,
)
result = loader.load()
assert result == expected_docs
| |
157402
|
from pathlib import Path
from langchain_core.documents import Document
from langchain_community.document_loaders.csv_loader import CSVLoader
class TestCSVLoader:
# Tests that a CSV file with valid data is loaded successfully.
def test_csv_loader_load_valid_data(self) -> None:
# Setup
file_path = self._get_csv_file_path("test_nominal.csv")
expected_docs = [
Document(
page_content="column1: value1\ncolumn2: value2\ncolumn3: value3",
metadata={"source": file_path, "row": 0},
),
Document(
page_content="column1: value4\ncolumn2: value5\ncolumn3: value6",
metadata={"source": file_path, "row": 1},
),
]
# Exercise
loader = CSVLoader(file_path=file_path)
result = loader.load()
# Assert
assert result == expected_docs
# Tests that an empty CSV file is handled correctly.
def test_csv_loader_load_empty_file(self) -> None:
# Setup
file_path = self._get_csv_file_path("test_empty.csv")
expected_docs: list = []
# Exercise
loader = CSVLoader(file_path=file_path)
result = loader.load()
# Assert
assert result == expected_docs
# Tests that a CSV file with only one row is handled correctly.
def test_csv_loader_load_single_row_file(self) -> None:
# Setup
file_path = self._get_csv_file_path("test_one_row.csv")
expected_docs = [
Document(
page_content="column1: value1\ncolumn2: value2\ncolumn3: value3",
metadata={"source": file_path, "row": 0},
)
]
# Exercise
loader = CSVLoader(file_path=file_path)
result = loader.load()
# Assert
assert result == expected_docs
# Tests that a CSV file with only one column is handled correctly.
def test_csv_loader_load_single_column_file(self) -> None:
# Setup
file_path = self._get_csv_file_path("test_one_col.csv")
expected_docs = [
Document(
page_content="column1: value1",
metadata={"source": file_path, "row": 0},
),
Document(
page_content="column1: value2",
metadata={"source": file_path, "row": 1},
),
Document(
page_content="column1: value3",
metadata={"source": file_path, "row": 2},
),
]
# Exercise
loader = CSVLoader(file_path=file_path)
result = loader.load()
# Assert
assert result == expected_docs
def test_csv_loader_load_none_column_file(self) -> None:
# Setup
file_path = self._get_csv_file_path("test_none_col.csv")
expected_docs = [
Document(
page_content="column1: value1\ncolumn2: value2\n"
"column3: value3\nNone: value4,value5",
metadata={"source": file_path, "row": 0},
),
Document(
page_content="column1: value6\ncolumn2: value7\n"
"column3: value8\nNone: value9",
metadata={"source": file_path, "row": 1},
),
]
# Exercise
loader = CSVLoader(file_path=file_path)
result = loader.load()
# Assert
assert result == expected_docs
def test_csv_loader_content_columns(self) -> None:
# Setup
file_path = self._get_csv_file_path("test_none_col.csv")
expected_docs = [
Document(
page_content="column1: value1\n" "column3: value3",
metadata={"source": file_path, "row": 0},
),
Document(
page_content="column1: value6\n" "column3: value8",
metadata={"source": file_path, "row": 1},
),
]
# Exercise
loader = CSVLoader(file_path=file_path, content_columns=("column1", "column3"))
result = loader.load()
# Assert
assert result == expected_docs
# utility functions
def _get_csv_file_path(self, file_name: str) -> str:
return str(Path(__file__).resolve().parent / "test_docs" / "csv" / file_name)
| |
157409
|
@pytest.mark.requires("atlassian", "bs4", "lxml")
class TestConfluenceLoader:
CONFLUENCE_URL: str = "https://example.atlassian.com/wiki"
MOCK_USERNAME: str = "user@gmail.com"
MOCK_API_TOKEN: str = "api_token"
MOCK_SPACE_KEY: str = "spaceId123"
def test_confluence_loader_initialization(self, mock_confluence: MagicMock) -> None:
ConfluenceLoader(
self.CONFLUENCE_URL,
username=self.MOCK_USERNAME,
api_key=self.MOCK_API_TOKEN,
)
mock_confluence.assert_called_once_with(
url=self.CONFLUENCE_URL,
username="user@gmail.com",
password="api_token",
cloud=True,
)
def test_confluence_loader_initialization_invalid(self) -> None:
with pytest.raises(ValueError):
ConfluenceLoader(
self.CONFLUENCE_URL,
username=self.MOCK_USERNAME,
api_key=self.MOCK_API_TOKEN,
token="foo",
)
with pytest.raises(ValueError):
ConfluenceLoader(
self.CONFLUENCE_URL,
username=self.MOCK_USERNAME,
api_key=self.MOCK_API_TOKEN,
oauth2={
"access_token": "bar",
"access_token_secret": "bar",
"consumer_key": "bar",
"key_cert": "bar",
},
)
with pytest.raises(ValueError):
ConfluenceLoader(
self.CONFLUENCE_URL,
username=self.MOCK_USERNAME,
api_key=self.MOCK_API_TOKEN,
session=requests.Session(),
)
def test_confluence_loader_initialization_from_env(
self, mock_confluence: MagicMock
) -> None:
with unittest.mock.patch.dict(
"os.environ",
{
"CONFLUENCE_USERNAME": self.MOCK_USERNAME,
"CONFLUENCE_API_TOKEN": self.MOCK_API_TOKEN,
},
):
ConfluenceLoader(url=self.CONFLUENCE_URL)
mock_confluence.assert_called_with(
url=self.CONFLUENCE_URL, username=None, password=None, cloud=True
)
def test_confluence_loader_load_data_invalid_args(self) -> None:
confluence_loader = ConfluenceLoader(
self.CONFLUENCE_URL,
username=self.MOCK_USERNAME,
api_key=self.MOCK_API_TOKEN,
)
with pytest.raises(
ValueError,
match="Must specify at least one among `space_key`, `page_ids`, `label`, `cql` parameters.", # noqa: E501
):
confluence_loader.load()
def test_confluence_loader_load_data_by_page_ids(
self, mock_confluence: MagicMock
) -> None:
mock_confluence.get_page_by_id.side_effect = [
self._get_mock_page("123"),
self._get_mock_page("456"),
]
mock_confluence.get_all_restrictions_for_content.side_effect = [
self._get_mock_page_restrictions("123"),
self._get_mock_page_restrictions("456"),
]
mock_page_ids = ["123", "456"]
confluence_loader = self._get_mock_confluence_loader(
mock_confluence, page_ids=mock_page_ids
)
documents = list(confluence_loader.lazy_load())
assert mock_confluence.get_page_by_id.call_count == 2
assert mock_confluence.get_all_restrictions_for_content.call_count == 2
assert len(documents) == 2
assert all(isinstance(doc, Document) for doc in documents)
assert documents[0].page_content == "Content 123"
assert documents[1].page_content == "Content 456"
assert mock_confluence.get_all_pages_from_space.call_count == 0
assert mock_confluence.get_all_pages_by_label.call_count == 0
assert mock_confluence.cql.call_count == 0
assert mock_confluence.get_page_child_by_type.call_count == 0
def test_confluence_loader_load_data_by_space_id(
self, mock_confluence: MagicMock
) -> None:
# one response with two pages
mock_confluence.get_all_pages_from_space.return_value = [
self._get_mock_page("123"),
self._get_mock_page("456"),
]
mock_confluence.get_all_restrictions_for_content.side_effect = [
self._get_mock_page_restrictions("123"),
self._get_mock_page_restrictions("456"),
]
confluence_loader = self._get_mock_confluence_loader(
mock_confluence, space_key=self.MOCK_SPACE_KEY, max_pages=2
)
documents = confluence_loader.load()
assert mock_confluence.get_all_pages_from_space.call_count == 1
assert len(documents) == 2
assert all(isinstance(doc, Document) for doc in documents)
assert documents[0].page_content == "Content 123"
assert documents[1].page_content == "Content 456"
assert mock_confluence.get_page_by_id.call_count == 0
assert mock_confluence.get_all_pages_by_label.call_count == 0
assert mock_confluence.cql.call_count == 0
assert mock_confluence.get_page_child_by_type.call_count == 0
@pytest.mark.requires("markdownify")
def test_confluence_loader_when_content_format_and_keep_markdown_format_enabled(
self, mock_confluence: MagicMock
) -> None:
# one response with two pages
mock_confluence.get_all_pages_from_space.return_value = [
self._get_mock_page("123", ContentFormat.VIEW),
self._get_mock_page("456", ContentFormat.VIEW),
]
mock_confluence.get_all_restrictions_for_content.side_effect = [
self._get_mock_page_restrictions("123"),
self._get_mock_page_restrictions("456"),
]
confluence_loader = self._get_mock_confluence_loader(
mock_confluence,
space_key=self.MOCK_SPACE_KEY,
content_format=ContentFormat.VIEW,
keep_markdown_format=True,
max_pages=2,
)
documents = confluence_loader.load()
assert mock_confluence.get_all_pages_from_space.call_count == 1
assert len(documents) == 2
assert all(isinstance(doc, Document) for doc in documents)
assert documents[0].page_content == "Content 123\n\n"
assert documents[1].page_content == "Content 456\n\n"
assert mock_confluence.get_page_by_id.call_count == 0
assert mock_confluence.get_all_pages_by_label.call_count == 0
assert mock_confluence.cql.call_count == 0
assert mock_confluence.get_page_child_by_type.call_count == 0
def _get_mock_confluence_loader(
self, mock_confluence: MagicMock, **kwargs: Any
) -> ConfluenceLoader:
confluence_loader = ConfluenceLoader(
self.CONFLUENCE_URL,
username=self.MOCK_USERNAME,
api_key=self.MOCK_API_TOKEN,
**kwargs,
)
confluence_loader.confluence = mock_confluence
return confluence_loader
def _get_mock_page(
self, page_id: str, content_format: ContentFormat = ContentFormat.STORAGE
) -> Dict:
return {
"id": f"{page_id}",
"title": f"Page {page_id}",
"body": {
f"{content_format.name.lower()}": {"value": f"<p>Content {page_id}</p>"}
},
"status": "current",
"type": "page",
"_links": {
"self": f"{self.CONFLUENCE_URL}/rest/api/content/{page_id}",
"tinyui": "/x/tiny_ui_link",
"editui": f"/pages/resumedraft.action?draftId={page_id}",
"webui": f"/spaces/{self.MOCK_SPACE_KEY}/overview",
},
}
def _get_mock_page_restrictions(self, page_id: str) -> Dict:
return {
"read": {
"operation": "read",
"restrictions": {
"user": {"results": [], "start": 0, "limit": 200, "size": 0},
"group": {"results": [], "start": 0, "limit": 200, "size": 0},
},
"_expandable": {"content": f"/rest/api/content/{page_id}"},
"_links": {
"self": f"{self.CONFLUENCE_URL}/rest/api/content/{page_id}/restriction/byOperation/read" # noqa: E501
},
},
"update": {
"operation": "update",
"restrictions": {
"user": {"results": [], "start": 0, "limit": 200, "size": 0},
"group": {"results": [], "start": 0, "limit": 200, "size": 0},
},
"_expandable": {"content": f"/rest/api/content/{page_id}"},
"_links": {
"self": f"{self.CONFLUENCE_URL}/rest/api/content/{page_id}/restriction/byOperation/update" # noqa: E501
},
},
"_links": {
"self": f"{self.CONFLUENCE_URL}/rest/api/content/{page_id}/restriction/byOperation", # noqa: E501
"base": self.CONFLUENCE_URL,
"context": "/wiki",
},
}
| |
157561
|
from langchain.retrievers.contextual_compression import ContextualCompressionRetriever
from langchain.retrievers.document_compressors import EmbeddingsFilter
from langchain_community.embeddings import OpenAIEmbeddings
from langchain_community.vectorstores import FAISS
def test_contextual_compression_retriever_get_relevant_docs() -> None:
"""Test get_relevant_docs."""
texts = [
"This is a document about the Boston Celtics",
"The Boston Celtics won the game by 20 points",
"I simply love going to the movies",
]
embeddings = OpenAIEmbeddings()
base_compressor = EmbeddingsFilter(embeddings=embeddings, similarity_threshold=0.75)
base_retriever = FAISS.from_texts(texts, embedding=embeddings).as_retriever(
search_kwargs={"k": len(texts)}
)
retriever = ContextualCompressionRetriever(
base_compressor=base_compressor, base_retriever=base_retriever
)
actual = retriever.invoke("Tell me about the Celtics")
assert len(actual) == 2
assert texts[-1] not in [d.page_content for d in actual]
async def test_acontextual_compression_retriever_get_relevant_docs() -> None:
"""Test get_relevant_docs."""
texts = [
"This is a document about the Boston Celtics",
"The Boston Celtics won the game by 20 points",
"I simply love going to the movies",
]
embeddings = OpenAIEmbeddings()
base_compressor = EmbeddingsFilter(embeddings=embeddings, similarity_threshold=0.75)
base_retriever = FAISS.from_texts(texts, embedding=embeddings).as_retriever(
search_kwargs={"k": len(texts)}
)
retriever = ContextualCompressionRetriever(
base_compressor=base_compressor, base_retriever=base_retriever
)
actual = retriever.invoke("Tell me about the Celtics")
assert len(actual) == 2
assert texts[-1] not in [d.page_content for d in actual]
| |
157586
|
"""Test openai embeddings."""
import numpy as np
import pytest
from langchain_community.embeddings.openai import OpenAIEmbeddings
@pytest.mark.scheduled
def test_openai_embedding_documents() -> None:
"""Test openai embeddings."""
documents = ["foo bar"]
embedding = OpenAIEmbeddings()
output = embedding.embed_documents(documents)
assert len(output) == 1
assert len(output[0]) == 1536
@pytest.mark.scheduled
def test_openai_embedding_documents_multiple() -> None:
"""Test openai embeddings."""
documents = ["foo bar", "bar foo", "foo"]
embedding = OpenAIEmbeddings(chunk_size=2)
embedding.embedding_ctx_length = 8191
output = embedding.embed_documents(documents)
assert len(output) == 3
assert len(output[0]) == 1536
assert len(output[1]) == 1536
assert len(output[2]) == 1536
@pytest.mark.scheduled
async def test_openai_embedding_documents_async_multiple() -> None:
"""Test openai embeddings."""
documents = ["foo bar", "bar foo", "foo"]
embedding = OpenAIEmbeddings(chunk_size=2)
embedding.embedding_ctx_length = 8191
output = await embedding.aembed_documents(documents)
assert len(output) == 3
assert len(output[0]) == 1536
assert len(output[1]) == 1536
assert len(output[2]) == 1536
@pytest.mark.scheduled
def test_openai_embedding_query() -> None:
"""Test openai embeddings."""
document = "foo bar"
embedding = OpenAIEmbeddings()
output = embedding.embed_query(document)
assert len(output) == 1536
@pytest.mark.scheduled
async def test_openai_embedding_async_query() -> None:
"""Test openai embeddings."""
document = "foo bar"
embedding = OpenAIEmbeddings()
output = await embedding.aembed_query(document)
assert len(output) == 1536
@pytest.mark.skip(reason="Unblock scheduled testing. TODO: fix.")
@pytest.mark.scheduled
def test_openai_embedding_with_empty_string() -> None:
"""Test openai embeddings with empty string."""
import openai
document = ["", "abc"]
embedding = OpenAIEmbeddings()
output = embedding.embed_documents(document)
assert len(output) == 2
assert len(output[0]) == 1536
expected_output = openai.Embedding.create(input="", model="text-embedding-ada-002")[
"data"
][0]["embedding"]
assert np.allclose(output[0], expected_output)
assert len(output[1]) == 1536
@pytest.mark.scheduled
def test_embed_documents_normalized() -> None:
output = OpenAIEmbeddings().embed_documents(["foo walked to the market"])
assert np.isclose(np.linalg.norm(output[0]), 1.0)
@pytest.mark.scheduled
def test_embed_query_normalized() -> None:
output = OpenAIEmbeddings().embed_query("foo walked to the market")
assert np.isclose(np.linalg.norm(output), 1.0)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.