id
stringlengths 6
6
| text
stringlengths 20
17.2k
| title
stringclasses 1
value |
|---|---|---|
176250
|
import nox
@nox.session(reuse_venv=True, name="test-pydantic-v1")
def test_pydantic_v1(session: nox.Session) -> None:
session.install("-r", "requirements-dev.lock")
session.install("pydantic<2")
session.run("pytest", "--showlocals", "--ignore=tests/functional", *session.posargs)
| |
176299
|
@pytest.mark.respx(base_url=base_url)
@pytest.mark.skipif(not PYDANTIC_V2, reason="dataclasses only supported in v2")
def test_parse_pydantic_dataclass(client: OpenAI, respx_mock: MockRouter, monkeypatch: pytest.MonkeyPatch) -> None:
from pydantic.dataclasses import dataclass
@dataclass
class CalendarEvent:
name: str
date: str
participants: List[str]
completion = _make_snapshot_request(
lambda c: c.beta.chat.completions.parse(
model="gpt-4o-2024-08-06",
messages=[
{"role": "system", "content": "Extract the event information."},
{"role": "user", "content": "Alice and Bob are going to a science fair on Friday."},
],
response_format=CalendarEvent,
),
content_snapshot=snapshot(
'{"id": "chatcmpl-ABfvqhz4uUUWsw8Ohw2Mp9B4sKKV8", "object": "chat.completion", "created": 1727346158, "model": "gpt-4o-2024-08-06", "choices": [{"index": 0, "message": {"role": "assistant", "content": "{\\"name\\":\\"Science Fair\\",\\"date\\":\\"Friday\\",\\"participants\\":[\\"Alice\\",\\"Bob\\"]}", "refusal": null}, "logprobs": null, "finish_reason": "stop"}], "usage": {"prompt_tokens": 92, "completion_tokens": 17, "total_tokens": 109, "completion_tokens_details": {"reasoning_tokens": 0}}, "system_fingerprint": "fp_7568d46099"}'
),
mock_client=client,
respx_mock=respx_mock,
)
assert print_obj(completion, monkeypatch) == snapshot(
"""\
ParsedChatCompletion[CalendarEvent](
choices=[
ParsedChoice[CalendarEvent](
finish_reason='stop',
index=0,
logprobs=None,
message=ParsedChatCompletionMessage[CalendarEvent](
audio=None,
content='{"name":"Science Fair","date":"Friday","participants":["Alice","Bob"]}',
function_call=None,
parsed=CalendarEvent(name='Science Fair', date='Friday', participants=['Alice', 'Bob']),
refusal=None,
role='assistant',
tool_calls=[]
)
)
],
created=1727346158,
id='chatcmpl-ABfvqhz4uUUWsw8Ohw2Mp9B4sKKV8',
model='gpt-4o-2024-08-06',
object='chat.completion',
service_tier=None,
system_fingerprint='fp_7568d46099',
usage=CompletionUsage(
completion_tokens=17,
completion_tokens_details=CompletionTokensDetails(audio_tokens=None, reasoning_tokens=0),
prompt_tokens=92,
prompt_tokens_details=None,
total_tokens=109
)
)
"""
)
@pytest.mark.respx(base_url=base_url)
def test_pydantic_tool_model_all_types(client: OpenAI, respx_mock: MockRouter, monkeypatch: pytest.MonkeyPatch) -> None:
completion = _make_snapshot_request(
lambda c: c.beta.chat.completions.parse(
model="gpt-4o-2024-08-06",
messages=[
{
"role": "user",
"content": "look up all my orders in may of last year that were fulfilled but not delivered on time",
},
],
tools=[openai.pydantic_function_tool(Query)],
response_format=Query,
),
content_snapshot=snapshot(
'{"id": "chatcmpl-ABfvtNiaTNUF6OymZUnEFc9lPq9p1", "object": "chat.completion", "created": 1727346161, "model": "gpt-4o-2024-08-06", "choices": [{"index": 0, "message": {"role": "assistant", "content": null, "tool_calls": [{"id": "call_NKpApJybW1MzOjZO2FzwYw0d", "type": "function", "function": {"name": "Query", "arguments": "{\\"name\\":\\"May 2022 Fulfilled Orders Not Delivered on Time\\",\\"table_name\\":\\"orders\\",\\"columns\\":[\\"id\\",\\"status\\",\\"expected_delivery_date\\",\\"delivered_at\\",\\"shipped_at\\",\\"ordered_at\\",\\"canceled_at\\"],\\"conditions\\":[{\\"column\\":\\"ordered_at\\",\\"operator\\":\\">=\\",\\"value\\":\\"2022-05-01\\"},{\\"column\\":\\"ordered_at\\",\\"operator\\":\\"<=\\",\\"value\\":\\"2022-05-31\\"},{\\"column\\":\\"status\\",\\"operator\\":\\"=\\",\\"value\\":\\"fulfilled\\"},{\\"column\\":\\"delivered_at\\",\\"operator\\":\\">\\",\\"value\\":{\\"column_name\\":\\"expected_delivery_date\\"}}],\\"order_by\\":\\"asc\\"}"}}], "refusal": null}, "logprobs": null, "finish_reason": "tool_calls"}], "usage": {"prompt_tokens": 512, "completion_tokens": 132, "total_tokens": 644, "completion_tokens_details": {"reasoning_tokens": 0}}, "system_fingerprint": "fp_7568d46099"}'
),
mock_client=client,
respx_mock=respx_mock,
)
assert print_obj(completion.choices[0], monkeypatch) == snapshot(
"""\
ParsedChoice[Query](
finish_reason='tool_calls',
index=0,
logprobs=None,
message=ParsedChatCompletionMessage[Query](
audio=None,
content=None,
function_call=None,
parsed=None,
refusal=None,
role='assistant',
tool_calls=[
ParsedFunctionToolCall(
function=ParsedFunction(
arguments='{"name":"May 2022 Fulfilled Orders Not Delivered on
Time","table_name":"orders","columns":["id","status","expected_delivery_date","delivered_at","shipped_at","ordered_at","
canceled_at"],"conditions":[{"column":"ordered_at","operator":">=","value":"2022-05-01"},{"column":"ordered_at","operato
r":"<=","value":"2022-05-31"},{"column":"status","operator":"=","value":"fulfilled"},{"column":"delivered_at","operator"
:">","value":{"column_name":"expected_delivery_date"}}],"order_by":"asc"}',
name='Query',
parsed_arguments=Query(
columns=[
<Column.id: 'id'>,
<Column.status: 'status'>,
<Column.expected_delivery_date: 'expected_delivery_date'>,
<Column.delivered_at: 'delivered_at'>,
<Column.shipped_at: 'shipped_at'>,
<Column.ordered_at: 'ordered_at'>,
<Column.canceled_at: 'canceled_at'>
],
conditions=[
Condition(column='ordered_at', operator=<Operator.ge: '>='>, value='2022-05-01'),
Condition(column='ordered_at', operator=<Operator.le: '<='>, value='2022-05-31'),
Condition(column='status', operator=<Operator.eq: '='>, value='fulfilled'),
Condition(
column='delivered_at',
operator=<Operator.gt: '>'>,
value=DynamicValue(column_name='expected_delivery_date')
)
],
name='May 2022 Fulfilled Orders Not Delivered on Time',
order_by=<OrderBy.asc: 'asc'>,
table_name=<Table.orders: 'orders'>
)
),
id='call_NKpApJybW1MzOjZO2FzwYw0d',
type='function'
)
]
)
)
"""
)
| |
176375
|
from azure.identity import DefaultAzureCredential, get_bearer_token_provider
from openai import AzureOpenAI
token_provider = get_bearer_token_provider(DefaultAzureCredential(), "https://cognitiveservices.azure.com/.default")
# may change in the future
# https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#rest-api-versioning
api_version = "2023-07-01-preview"
# https://learn.microsoft.com/en-us/azure/cognitive-services/openai/how-to/create-resource?pivots=web-portal#create-a-resource
endpoint = "https://my-resource.openai.azure.com"
client = AzureOpenAI(
api_version=api_version,
azure_endpoint=endpoint,
azure_ad_token_provider=token_provider,
)
completion = client.chat.completions.create(
model="deployment-name", # e.g. gpt-35-instant
messages=[
{
"role": "user",
"content": "How do I output all files in a directory using Python?",
},
],
)
print(completion.to_json())
| |
176376
|
from openai import AzureOpenAI
# may change in the future
# https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#rest-api-versioning
api_version = "2023-07-01-preview"
# gets the API Key from environment variable AZURE_OPENAI_API_KEY
client = AzureOpenAI(
api_version=api_version,
# https://learn.microsoft.com/en-us/azure/cognitive-services/openai/how-to/create-resource?pivots=web-portal#create-a-resource
azure_endpoint="https://example-endpoint.openai.azure.com",
)
completion = client.chat.completions.create(
model="deployment-name", # e.g. gpt-35-instant
messages=[
{
"role": "user",
"content": "How do I output all files in a directory using Python?",
},
],
)
print(completion.to_json())
deployment_client = AzureOpenAI(
api_version=api_version,
# https://learn.microsoft.com/en-us/azure/cognitive-services/openai/how-to/create-resource?pivots=web-portal#create-a-resource
azure_endpoint="https://example-resource.azure.openai.com/",
# Navigate to the Azure OpenAI Studio to deploy a model.
azure_deployment="deployment-name", # e.g. gpt-35-instant
)
completion = deployment_client.chat.completions.create(
model="<ignored>",
messages=[
{
"role": "user",
"content": "How do I output all files in a directory using Python?",
},
],
)
print(completion.to_json())
| |
176408
|
class BaseAPIResponse(Generic[R]):
_cast_to: type[R]
_client: BaseClient[Any, Any]
_parsed_by_type: dict[type[Any], Any]
_is_sse_stream: bool
_stream_cls: type[Stream[Any]] | type[AsyncStream[Any]] | None
_options: FinalRequestOptions
http_response: httpx.Response
retries_taken: int
"""The number of retries made. If no retries happened this will be `0`"""
def __init__(
self,
*,
raw: httpx.Response,
cast_to: type[R],
client: BaseClient[Any, Any],
stream: bool,
stream_cls: type[Stream[Any]] | type[AsyncStream[Any]] | None,
options: FinalRequestOptions,
retries_taken: int = 0,
) -> None:
self._cast_to = cast_to
self._client = client
self._parsed_by_type = {}
self._is_sse_stream = stream
self._stream_cls = stream_cls
self._options = options
self.http_response = raw
self.retries_taken = retries_taken
@property
def headers(self) -> httpx.Headers:
return self.http_response.headers
@property
def http_request(self) -> httpx.Request:
"""Returns the httpx Request instance associated with the current response."""
return self.http_response.request
@property
def status_code(self) -> int:
return self.http_response.status_code
@property
def url(self) -> httpx.URL:
"""Returns the URL for which the request was made."""
return self.http_response.url
@property
def method(self) -> str:
return self.http_request.method
@property
def http_version(self) -> str:
return self.http_response.http_version
@property
def elapsed(self) -> datetime.timedelta:
"""The time taken for the complete request/response cycle to complete."""
return self.http_response.elapsed
@property
def is_closed(self) -> bool:
"""Whether or not the response body has been closed.
If this is False then there is response data that has not been read yet.
You must either fully consume the response body or call `.close()`
before discarding the response to prevent resource leaks.
"""
return self.http_response.is_closed
@override
def __repr__(self) -> str:
return (
f"<{self.__class__.__name__} [{self.status_code} {self.http_response.reason_phrase}] type={self._cast_to}>"
)
def _parse(self, *, to: type[_T] | None = None) -> R | _T:
# unwrap `Annotated[T, ...]` -> `T`
if to and is_annotated_type(to):
to = extract_type_arg(to, 0)
if self._is_sse_stream:
if to:
if not is_stream_class_type(to):
raise TypeError(f"Expected custom parse type to be a subclass of {Stream} or {AsyncStream}")
return cast(
_T,
to(
cast_to=extract_stream_chunk_type(
to,
failure_message="Expected custom stream type to be passed with a type argument, e.g. Stream[ChunkType]",
),
response=self.http_response,
client=cast(Any, self._client),
),
)
if self._stream_cls:
return cast(
R,
self._stream_cls(
cast_to=extract_stream_chunk_type(self._stream_cls),
response=self.http_response,
client=cast(Any, self._client),
),
)
stream_cls = cast("type[Stream[Any]] | type[AsyncStream[Any]] | None", self._client._default_stream_cls)
if stream_cls is None:
raise MissingStreamClassError()
return cast(
R,
stream_cls(
cast_to=self._cast_to,
response=self.http_response,
client=cast(Any, self._client),
),
)
cast_to = to if to is not None else self._cast_to
# unwrap `Annotated[T, ...]` -> `T`
if is_annotated_type(cast_to):
cast_to = extract_type_arg(cast_to, 0)
if cast_to is NoneType:
return cast(R, None)
response = self.http_response
if cast_to == str:
return cast(R, response.text)
if cast_to == bytes:
return cast(R, response.content)
if cast_to == int:
return cast(R, int(response.text))
if cast_to == float:
return cast(R, float(response.text))
if cast_to == bool:
return cast(R, response.text.lower() == "true")
origin = get_origin(cast_to) or cast_to
# handle the legacy binary response case
if inspect.isclass(cast_to) and cast_to.__name__ == "HttpxBinaryResponseContent":
return cast(R, cast_to(response)) # type: ignore
if origin == APIResponse:
raise RuntimeError("Unexpected state - cast_to is `APIResponse`")
if inspect.isclass(origin) and issubclass(origin, httpx.Response):
# Because of the invariance of our ResponseT TypeVar, users can subclass httpx.Response
# and pass that class to our request functions. We cannot change the variance to be either
# covariant or contravariant as that makes our usage of ResponseT illegal. We could construct
# the response class ourselves but that is something that should be supported directly in httpx
# as it would be easy to incorrectly construct the Response object due to the multitude of arguments.
if cast_to != httpx.Response:
raise ValueError(f"Subclasses of httpx.Response cannot be passed to `cast_to`")
return cast(R, response)
if inspect.isclass(origin) and not issubclass(origin, BaseModel) and issubclass(origin, pydantic.BaseModel):
raise TypeError("Pydantic models must subclass our base model type, e.g. `from openai import BaseModel`")
if (
cast_to is not object
and not origin is list
and not origin is dict
and not origin is Union
and not issubclass(origin, BaseModel)
):
raise RuntimeError(
f"Unsupported type, expected {cast_to} to be a subclass of {BaseModel}, {dict}, {list}, {Union}, {NoneType}, {str} or {httpx.Response}."
)
# split is required to handle cases where additional information is included
# in the response, e.g. application/json; charset=utf-8
content_type, *_ = response.headers.get("content-type", "*").split(";")
if content_type != "application/json":
if is_basemodel(cast_to):
try:
data = response.json()
except Exception as exc:
log.debug("Could not read JSON from response data due to %s - %s", type(exc), exc)
else:
return self._client._process_response_data(
data=data,
cast_to=cast_to, # type: ignore
response=response,
)
if self._client._strict_response_validation:
raise APIResponseValidationError(
response=response,
message=f"Expected Content-Type response header to be `application/json` but received `{content_type}` instead.",
body=response.text,
)
# If the API responds with content that isn't JSON then we just return
# the (decoded) text without performing any parsing so that you can still
# handle the response however you need to.
return response.text # type: ignore
data = response.json()
return self._client._process_response_data(
data=data,
cast_to=cast_to, # type: ignore
response=response,
)
| |
176413
|
def _parse(self, *, to: type[_T] | None = None) -> R | _T:
# unwrap `Annotated[T, ...]` -> `T`
if to and is_annotated_type(to):
to = extract_type_arg(to, 0)
if self._stream:
if to:
if not is_stream_class_type(to):
raise TypeError(f"Expected custom parse type to be a subclass of {Stream} or {AsyncStream}")
return cast(
_T,
to(
cast_to=extract_stream_chunk_type(
to,
failure_message="Expected custom stream type to be passed with a type argument, e.g. Stream[ChunkType]",
),
response=self.http_response,
client=cast(Any, self._client),
),
)
if self._stream_cls:
return cast(
R,
self._stream_cls(
cast_to=extract_stream_chunk_type(self._stream_cls),
response=self.http_response,
client=cast(Any, self._client),
),
)
stream_cls = cast("type[Stream[Any]] | type[AsyncStream[Any]] | None", self._client._default_stream_cls)
if stream_cls is None:
raise MissingStreamClassError()
return cast(
R,
stream_cls(
cast_to=self._cast_to,
response=self.http_response,
client=cast(Any, self._client),
),
)
cast_to = to if to is not None else self._cast_to
# unwrap `Annotated[T, ...]` -> `T`
if is_annotated_type(cast_to):
cast_to = extract_type_arg(cast_to, 0)
if cast_to is NoneType:
return cast(R, None)
response = self.http_response
if cast_to == str:
return cast(R, response.text)
if cast_to == int:
return cast(R, int(response.text))
if cast_to == float:
return cast(R, float(response.text))
if cast_to == bool:
return cast(R, response.text.lower() == "true")
origin = get_origin(cast_to) or cast_to
if inspect.isclass(origin) and issubclass(origin, HttpxBinaryResponseContent):
return cast(R, cast_to(response)) # type: ignore
if origin == LegacyAPIResponse:
raise RuntimeError("Unexpected state - cast_to is `APIResponse`")
if inspect.isclass(origin) and issubclass(origin, httpx.Response):
# Because of the invariance of our ResponseT TypeVar, users can subclass httpx.Response
# and pass that class to our request functions. We cannot change the variance to be either
# covariant or contravariant as that makes our usage of ResponseT illegal. We could construct
# the response class ourselves but that is something that should be supported directly in httpx
# as it would be easy to incorrectly construct the Response object due to the multitude of arguments.
if cast_to != httpx.Response:
raise ValueError(f"Subclasses of httpx.Response cannot be passed to `cast_to`")
return cast(R, response)
if inspect.isclass(origin) and not issubclass(origin, BaseModel) and issubclass(origin, pydantic.BaseModel):
raise TypeError("Pydantic models must subclass our base model type, e.g. `from openai import BaseModel`")
if (
cast_to is not object
and not origin is list
and not origin is dict
and not origin is Union
and not issubclass(origin, BaseModel)
):
raise RuntimeError(
f"Unsupported type, expected {cast_to} to be a subclass of {BaseModel}, {dict}, {list}, {Union}, {NoneType}, {str} or {httpx.Response}."
)
# split is required to handle cases where additional information is included
# in the response, e.g. application/json; charset=utf-8
content_type, *_ = response.headers.get("content-type", "*").split(";")
if content_type != "application/json":
if is_basemodel(cast_to):
try:
data = response.json()
except Exception as exc:
log.debug("Could not read JSON from response data due to %s - %s", type(exc), exc)
else:
return self._client._process_response_data(
data=data,
cast_to=cast_to, # type: ignore
response=response,
)
if self._client._strict_response_validation:
raise APIResponseValidationError(
response=response,
message=f"Expected Content-Type response header to be `application/json` but received `{content_type}` instead.",
body=response.text,
)
# If the API responds with content that isn't JSON then we just return
# the (decoded) text without performing any parsing so that you can still
# handle the response however you need to.
return response.text # type: ignore
data = response.json()
return self._client._process_response_data(
data=data,
cast_to=cast_to, # type: ignore
response=response,
)
@override
def __repr__(self) -> str:
return f"<APIResponse [{self.status_code} {self.http_response.reason_phrase}] type={self._cast_to}>"
class MissingStreamClassError(TypeError):
def __init__(self) -> None:
super().__init__(
"The `stream` argument was set to `True` but the `stream_cls` argument was not given. See `openai._streaming` for reference",
)
def to_raw_response_wrapper(func: Callable[P, R]) -> Callable[P, LegacyAPIResponse[R]]:
"""Higher order function that takes one of our bound API methods and wraps it
to support returning the raw `APIResponse` object directly.
"""
@functools.wraps(func)
def wrapped(*args: P.args, **kwargs: P.kwargs) -> LegacyAPIResponse[R]:
extra_headers: dict[str, str] = {**(cast(Any, kwargs.get("extra_headers")) or {})}
extra_headers[RAW_RESPONSE_HEADER] = "true"
kwargs["extra_headers"] = extra_headers
return cast(LegacyAPIResponse[R], func(*args, **kwargs))
return wrapped
def async_to_raw_response_wrapper(func: Callable[P, Awaitable[R]]) -> Callable[P, Awaitable[LegacyAPIResponse[R]]]:
"""Higher order function that takes one of our bound API methods and wraps it
to support returning the raw `APIResponse` object directly.
"""
@functools.wraps(func)
async def wrapped(*args: P.args, **kwargs: P.kwargs) -> LegacyAPIResponse[R]:
extra_headers: dict[str, str] = {**(cast(Any, kwargs.get("extra_headers")) or {})}
extra_headers[RAW_RESPONSE_HEADER] = "true"
kwargs["extra_headers"] = extra_headers
return cast(LegacyAPIResponse[R], await func(*args, **kwargs))
return wrapped
| |
176427
|
class BaseModel(pydantic.BaseModel):
if PYDANTIC_V2:
model_config: ClassVar[ConfigDict] = ConfigDict(
extra="allow", defer_build=coerce_boolean(os.environ.get("DEFER_PYDANTIC_BUILD", "true"))
)
else:
@property
@override
def model_fields_set(self) -> set[str]:
# a forwards-compat shim for pydantic v2
return self.__fields_set__ # type: ignore
class Config(pydantic.BaseConfig): # pyright: ignore[reportDeprecated]
extra: Any = pydantic.Extra.allow # type: ignore
@override
def __repr_args__(self) -> ReprArgs:
# we don't want these attributes to be included when something like `rich.print` is used
return [arg for arg in super().__repr_args__() if arg[0] not in {"_request_id", "__exclude_fields__"}]
if TYPE_CHECKING:
_request_id: Optional[str] = None
"""The ID of the request, returned via the X-Request-ID header. Useful for debugging requests and reporting issues to OpenAI.
This will **only** be set for the top-level response object, it will not be defined for nested objects. For example:
```py
completion = await client.chat.completions.create(...)
completion._request_id # req_id_xxx
completion.usage._request_id # raises `AttributeError`
```
Note: unlike other properties that use an `_` prefix, this property
*is* public. Unless documented otherwise, all other `_` prefix properties,
methods and modules are *private*.
"""
def to_dict(
self,
*,
mode: Literal["json", "python"] = "python",
use_api_names: bool = True,
exclude_unset: bool = True,
exclude_defaults: bool = False,
exclude_none: bool = False,
warnings: bool = True,
) -> dict[str, object]:
"""Recursively generate a dictionary representation of the model, optionally specifying which fields to include or exclude.
By default, fields that were not set by the API will not be included,
and keys will match the API response, *not* the property names from the model.
For example, if the API responds with `"fooBar": true` but we've defined a `foo_bar: bool` property,
the output will use the `"fooBar"` key (unless `use_api_names=False` is passed).
Args:
mode:
If mode is 'json', the dictionary will only contain JSON serializable types. e.g. `datetime` will be turned into a string, `"2024-3-22T18:11:19.117000Z"`.
If mode is 'python', the dictionary may contain any Python objects. e.g. `datetime(2024, 3, 22)`
use_api_names: Whether to use the key that the API responded with or the property name. Defaults to `True`.
exclude_unset: Whether to exclude fields that have not been explicitly set.
exclude_defaults: Whether to exclude fields that are set to their default value from the output.
exclude_none: Whether to exclude fields that have a value of `None` from the output.
warnings: Whether to log warnings when invalid fields are encountered. This is only supported in Pydantic v2.
"""
return self.model_dump(
mode=mode,
by_alias=use_api_names,
exclude_unset=exclude_unset,
exclude_defaults=exclude_defaults,
exclude_none=exclude_none,
warnings=warnings,
)
def to_json(
self,
*,
indent: int | None = 2,
use_api_names: bool = True,
exclude_unset: bool = True,
exclude_defaults: bool = False,
exclude_none: bool = False,
warnings: bool = True,
) -> str:
"""Generates a JSON string representing this model as it would be received from or sent to the API (but with indentation).
By default, fields that were not set by the API will not be included,
and keys will match the API response, *not* the property names from the model.
For example, if the API responds with `"fooBar": true` but we've defined a `foo_bar: bool` property,
the output will use the `"fooBar"` key (unless `use_api_names=False` is passed).
Args:
indent: Indentation to use in the JSON output. If `None` is passed, the output will be compact. Defaults to `2`
use_api_names: Whether to use the key that the API responded with or the property name. Defaults to `True`.
exclude_unset: Whether to exclude fields that have not been explicitly set.
exclude_defaults: Whether to exclude fields that have the default value.
exclude_none: Whether to exclude fields that have a value of `None`.
warnings: Whether to show any warnings that occurred during serialization. This is only supported in Pydantic v2.
"""
return self.model_dump_json(
indent=indent,
by_alias=use_api_names,
exclude_unset=exclude_unset,
exclude_defaults=exclude_defaults,
exclude_none=exclude_none,
warnings=warnings,
)
@override
def __str__(self) -> str:
# mypy complains about an invalid self arg
return f'{self.__repr_name__()}({self.__repr_str__(", ")})' # type: ignore[misc]
# Override the 'construct' method in a way that supports recursive parsing without validation.
# Based on https://github.com/samuelcolvin/pydantic/issues/1168#issuecomment-817742836.
@classmethod
@override
def construct(
cls: Type[ModelT],
_fields_set: set[str] | None = None,
**values: object,
) -> ModelT:
m = cls.__new__(cls)
fields_values: dict[str, object] = {}
config = get_model_config(cls)
populate_by_name = (
config.allow_population_by_field_name
if isinstance(config, _ConfigProtocol)
else config.get("populate_by_name")
)
if _fields_set is None:
_fields_set = set()
model_fields = get_model_fields(cls)
for name, field in model_fields.items():
key = field.alias
if key is None or (key not in values and populate_by_name):
key = name
if key in values:
fields_values[name] = _construct_field(value=values[key], field=field, key=key)
_fields_set.add(name)
else:
fields_values[name] = field_get_default(field)
_extra = {}
for key, value in values.items():
if key not in model_fields:
if PYDANTIC_V2:
_extra[key] = value
else:
_fields_set.add(key)
fields_values[key] = value
object.__setattr__(m, "__dict__", fields_values)
if PYDANTIC_V2:
# these properties are copied from Pydantic's `model_construct()` method
object.__setattr__(m, "__pydantic_private__", None)
object.__setattr__(m, "__pydantic_extra__", _extra)
object.__setattr__(m, "__pydantic_fields_set__", _fields_set)
else:
# init_private_attributes() does not exist in v2
m._init_private_attributes() # type: ignore
# copied from Pydantic v1's `construct()` method
object.__setattr__(m, "__fields_set__", _fields_set)
return m
if not TYPE_CHECKING:
# type checkers incorrectly complain about this assignment
# because the type signatures are technically different
# although not in practice
model_construct = construct
| |
176465
|
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing import List, Union, Iterable
from typing_extensions import Literal, Required, TypedDict
from .embedding_model import EmbeddingModel
__all__ = ["EmbeddingCreateParams"]
class EmbeddingCreateParams(TypedDict, total=False):
input: Required[Union[str, List[str], Iterable[int], Iterable[Iterable[int]]]]
"""Input text to embed, encoded as a string or array of tokens.
To embed multiple inputs in a single request, pass an array of strings or array
of token arrays. The input must not exceed the max input tokens for the model
(8192 tokens for `text-embedding-ada-002`), cannot be an empty string, and any
array must be 2048 dimensions or less.
[Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken)
for counting tokens.
"""
model: Required[Union[str, EmbeddingModel]]
"""ID of the model to use.
You can use the
[List models](https://platform.openai.com/docs/api-reference/models/list) API to
see all of your available models, or see our
[Model overview](https://platform.openai.com/docs/models/overview) for
descriptions of them.
"""
dimensions: int
"""The number of dimensions the resulting output embeddings should have.
Only supported in `text-embedding-3` and later models.
"""
encoding_format: Literal["float", "base64"]
"""The format to return the embeddings in.
Can be either `float` or [`base64`](https://pypi.org/project/pybase64/).
"""
user: str
"""
A unique identifier representing your end-user, which can help OpenAI to monitor
and detect abuse.
[Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids).
"""
| |
176819
|
from typing import Any
from typing_extensions import ClassVar
import pydantic
from .. import _models
from .._compat import PYDANTIC_V2, ConfigDict
class BaseModel(_models.BaseModel):
if PYDANTIC_V2:
model_config: ClassVar[ConfigDict] = ConfigDict(extra="ignore", arbitrary_types_allowed=True)
else:
class Config(pydantic.BaseConfig): # type: ignore
extra: Any = pydantic.Extra.ignore # type: ignore
arbitrary_types_allowed: bool = True
| |
176842
|
from __future__ import annotations
import inspect
from typing import Any, TypeVar
from typing_extensions import TypeGuard
import pydantic
from .._types import NOT_GIVEN
from .._utils import is_dict as _is_dict, is_list
from .._compat import PYDANTIC_V2, model_json_schema
_T = TypeVar("_T")
def to_strict_json_schema(model: type[pydantic.BaseModel] | pydantic.TypeAdapter[Any]) -> dict[str, Any]:
if inspect.isclass(model) and is_basemodel_type(model):
schema = model_json_schema(model)
elif PYDANTIC_V2 and isinstance(model, pydantic.TypeAdapter):
schema = model.json_schema()
else:
raise TypeError(f"Non BaseModel types are only supported with Pydantic v2 - {model}")
return _ensure_strict_json_schema(schema, path=(), root=schema)
def _ensure_strict_json_schema(
json_schema: object,
*,
path: tuple[str, ...],
root: dict[str, object],
) -> dict[str, Any]:
"""Mutates the given JSON schema to ensure it conforms to the `strict` standard
that the API expects.
"""
if not is_dict(json_schema):
raise TypeError(f"Expected {json_schema} to be a dictionary; path={path}")
defs = json_schema.get("$defs")
if is_dict(defs):
for def_name, def_schema in defs.items():
_ensure_strict_json_schema(def_schema, path=(*path, "$defs", def_name), root=root)
definitions = json_schema.get("definitions")
if is_dict(definitions):
for definition_name, definition_schema in definitions.items():
_ensure_strict_json_schema(definition_schema, path=(*path, "definitions", definition_name), root=root)
typ = json_schema.get("type")
if typ == "object" and "additionalProperties" not in json_schema:
json_schema["additionalProperties"] = False
# object types
# { 'type': 'object', 'properties': { 'a': {...} } }
properties = json_schema.get("properties")
if is_dict(properties):
json_schema["required"] = [prop for prop in properties.keys()]
json_schema["properties"] = {
key: _ensure_strict_json_schema(prop_schema, path=(*path, "properties", key), root=root)
for key, prop_schema in properties.items()
}
# arrays
# { 'type': 'array', 'items': {...} }
items = json_schema.get("items")
if is_dict(items):
json_schema["items"] = _ensure_strict_json_schema(items, path=(*path, "items"), root=root)
# unions
any_of = json_schema.get("anyOf")
if is_list(any_of):
json_schema["anyOf"] = [
_ensure_strict_json_schema(variant, path=(*path, "anyOf", str(i)), root=root)
for i, variant in enumerate(any_of)
]
# intersections
all_of = json_schema.get("allOf")
if is_list(all_of):
if len(all_of) == 1:
json_schema.update(_ensure_strict_json_schema(all_of[0], path=(*path, "allOf", "0"), root=root))
json_schema.pop("allOf")
else:
json_schema["allOf"] = [
_ensure_strict_json_schema(entry, path=(*path, "allOf", str(i)), root=root)
for i, entry in enumerate(all_of)
]
# strip `None` defaults as there's no meaningful distinction here
# the schema will still be `nullable` and the model will default
# to using `None` anyway
if json_schema.get("default", NOT_GIVEN) is None:
json_schema.pop("default")
# we can't use `$ref`s if there are also other properties defined, e.g.
# `{"$ref": "...", "description": "my description"}`
#
# so we unravel the ref
# `{"type": "string", "description": "my description"}`
ref = json_schema.get("$ref")
if ref and has_more_than_n_keys(json_schema, 1):
assert isinstance(ref, str), f"Received non-string $ref - {ref}"
resolved = resolve_ref(root=root, ref=ref)
if not is_dict(resolved):
raise ValueError(f"Expected `$ref: {ref}` to resolved to a dictionary but got {resolved}")
# properties from the json schema take priority over the ones on the `$ref`
json_schema.update({**resolved, **json_schema})
json_schema.pop("$ref")
return json_schema
def resolve_ref(*, root: dict[str, object], ref: str) -> object:
if not ref.startswith("#/"):
raise ValueError(f"Unexpected $ref format {ref!r}; Does not start with #/")
path = ref[2:].split("/")
resolved = root
for key in path:
value = resolved[key]
assert is_dict(value), f"encountered non-dictionary entry while resolving {ref} - {resolved}"
resolved = value
return resolved
def is_basemodel_type(typ: type) -> TypeGuard[type[pydantic.BaseModel]]:
return issubclass(typ, pydantic.BaseModel)
def is_dataclass_like_type(typ: type) -> bool:
"""Returns True if the given type likely used `@pydantic.dataclass`"""
return hasattr(typ, "__pydantic_config__")
def is_dict(obj: object) -> TypeGuard[dict[str, object]]:
# just pretend that we know there are only `str` keys
# as that check is not worth the performance cost
return _is_dict(obj)
def has_more_than_n_keys(obj: dict[str, object], n: int) -> bool:
i = 0
for _ in obj.keys():
i += 1
if i > n:
return True
return False
| |
177081
|
@is_pipeline_test
@require_torch_or_tf
class TextGenerationPipelineTests(unittest.TestCase):
model_mapping = MODEL_FOR_CAUSAL_LM_MAPPING
tf_model_mapping = TF_MODEL_FOR_CAUSAL_LM_MAPPING
@require_torch
def test_small_model_pt(self):
text_generator = pipeline(task="text-generation", model="sshleifer/tiny-ctrl", framework="pt")
# Using `do_sample=False` to force deterministic output
outputs = text_generator("This is a test", do_sample=False)
self.assertEqual(
outputs,
[
{
"generated_text": (
"This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope."
" oscope. FiliFili@@"
)
}
],
)
outputs = text_generator(["This is a test", "This is a second test"])
self.assertEqual(
outputs,
[
[
{
"generated_text": (
"This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope."
" oscope. FiliFili@@"
)
}
],
[
{
"generated_text": (
"This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy"
" oscope. oscope. FiliFili@@"
)
}
],
],
)
outputs = text_generator("This is a test", do_sample=True, num_return_sequences=2, return_tensors=True)
self.assertEqual(
outputs,
[
{"generated_token_ids": ANY(list)},
{"generated_token_ids": ANY(list)},
],
)
## -- test tokenizer_kwargs
test_str = "testing tokenizer kwargs. using truncation must result in a different generation."
input_len = len(text_generator.tokenizer(test_str)["input_ids"])
output_str, output_str_with_truncation = (
text_generator(test_str, do_sample=False, return_full_text=False, min_new_tokens=1)[0]["generated_text"],
text_generator(
test_str,
do_sample=False,
return_full_text=False,
min_new_tokens=1,
truncation=True,
max_length=input_len + 1,
)[0]["generated_text"],
)
assert output_str != output_str_with_truncation # results must be different because one had truncation
## -- test kwargs for preprocess_params
outputs = text_generator("This is a test", do_sample=False, add_special_tokens=False, padding=False)
self.assertEqual(
outputs,
[
{
"generated_text": (
"This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope."
" oscope. FiliFili@@"
)
}
],
)
# -- what is the point of this test? padding is hardcoded False in the pipeline anyway
text_generator.tokenizer.pad_token_id = text_generator.model.config.eos_token_id
text_generator.tokenizer.pad_token = "<pad>"
outputs = text_generator(
["This is a test", "This is a second test"],
do_sample=True,
num_return_sequences=2,
batch_size=2,
return_tensors=True,
)
self.assertEqual(
outputs,
[
[
{"generated_token_ids": ANY(list)},
{"generated_token_ids": ANY(list)},
],
[
{"generated_token_ids": ANY(list)},
{"generated_token_ids": ANY(list)},
],
],
)
@require_torch
def test_small_chat_model_pt(self):
text_generator = pipeline(
task="text-generation", model="hf-internal-testing/tiny-gpt2-with-chatml-template", framework="pt"
)
# Using `do_sample=False` to force deterministic output
chat1 = [
{"role": "system", "content": "This is a system message."},
{"role": "user", "content": "This is a test"},
]
chat2 = [
{"role": "system", "content": "This is a system message."},
{"role": "user", "content": "This is a second test"},
]
outputs = text_generator(chat1, do_sample=False, max_new_tokens=10)
expected_chat1 = chat1 + [
{
"role": "assistant",
"content": " factors factors factors factors factors factors factors factors factors factors",
}
]
self.assertEqual(
outputs,
[
{"generated_text": expected_chat1},
],
)
outputs = text_generator([chat1, chat2], do_sample=False, max_new_tokens=10)
expected_chat2 = chat2 + [
{
"role": "assistant",
"content": " stairs stairs stairs stairs stairs stairs stairs stairs stairs stairs",
}
]
self.assertEqual(
outputs,
[
[{"generated_text": expected_chat1}],
[{"generated_text": expected_chat2}],
],
)
@require_torch
def test_small_chat_model_continue_final_message(self):
# Here we check that passing a chat that ends in an assistant message is handled correctly
# by continuing the final message rather than starting a new one
text_generator = pipeline(
task="text-generation", model="hf-internal-testing/tiny-gpt2-with-chatml-template", framework="pt"
)
# Using `do_sample=False` to force deterministic output
chat1 = [
{"role": "system", "content": "This is a system message."},
{"role": "user", "content": "This is a test"},
{"role": "assistant", "content": "This is"},
]
outputs = text_generator(chat1, do_sample=False, max_new_tokens=10)
# Assert that we continued the last message and there isn't a sneaky <|im_end|>
self.assertEqual(
outputs,
[
{
"generated_text": [
{"role": "system", "content": "This is a system message."},
{"role": "user", "content": "This is a test"},
{
"role": "assistant",
"content": "This is stairs stairs stairs stairs stairs stairs stairs stairs stairs stairs",
},
]
}
],
)
@require_torch
def test_small_chat_model_continue_final_message_override(self):
# Here we check that passing a chat that ends in an assistant message is handled correctly
# by continuing the final message rather than starting a new one
text_generator = pipeline(
task="text-generation", model="hf-internal-testing/tiny-gpt2-with-chatml-template", framework="pt"
)
# Using `do_sample=False` to force deterministic output
chat1 = [
{"role": "system", "content": "This is a system message."},
{"role": "user", "content": "This is a test"},
]
outputs = text_generator(chat1, do_sample=False, max_new_tokens=10, continue_final_message=True)
# Assert that we continued the last message and there isn't a sneaky <|im_end|>
self.assertEqual(
outputs,
[
{
"generated_text": [
{"role": "system", "content": "This is a system message."},
{
"role": "user",
"content": "This is a test stairs stairs stairs stairs stairs stairs stairs stairs stairs stairs",
},
]
}
],
)
@require_torch
def test_small_chat_model_with_dataset_pt(self):
from torch.utils.data import Dataset
from transformers.pipelines.pt_utils import KeyDataset
class MyDataset(Dataset):
data = [
[
{"role": "system", "content": "This is a system message."},
{"role": "user", "content": "This is a test"},
],
]
def __len__(self):
return 1
def __getitem__(self, i):
return {"text": self.data[i]}
text_generator = pipeline(
task="text-generation", model="hf-internal-testing/tiny-gpt2-with-chatml-template", framework="pt"
)
dataset = MyDataset()
key_dataset = KeyDataset(dataset, "text")
for outputs in text_generator(key_dataset, do_sample=False, max_new_tokens=10):
expected_chat = dataset.data[0] + [
{
"role": "assistant",
"content": " factors factors factors factors factors factors factors factors factors factors",
}
]
self.assertEqual(
outputs,
[
{"generated_text": expected_chat},
],
)
@require_tf
def test
| |
177082
|
_model_tf(self):
text_generator = pipeline(task="text-generation", model="sshleifer/tiny-ctrl", framework="tf")
# Using `do_sample=False` to force deterministic output
outputs = text_generator("This is a test", do_sample=False)
self.assertEqual(
outputs,
[
{
"generated_text": (
"This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵"
" please,"
)
}
],
)
outputs = text_generator(["This is a test", "This is a second test"], do_sample=False)
self.assertEqual(
outputs,
[
[
{
"generated_text": (
"This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵"
" please,"
)
}
],
[
{
"generated_text": (
"This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes"
" Cannes 閲閲Cannes Cannes Cannes 攵 please,"
)
}
],
],
)
@require_tf
def test_small_chat_model_tf(self):
text_generator = pipeline(
task="text-generation", model="hf-internal-testing/tiny-gpt2-with-chatml-template", framework="tf"
)
# Using `do_sample=False` to force deterministic output
chat1 = [
{"role": "system", "content": "This is a system message."},
{"role": "user", "content": "This is a test"},
]
chat2 = [
{"role": "system", "content": "This is a system message."},
{"role": "user", "content": "This is a second test"},
]
outputs = text_generator(chat1, do_sample=False, max_new_tokens=10)
expected_chat1 = chat1 + [
{
"role": "assistant",
"content": " factors factors factors factors factors factors factors factors factors factors",
}
]
self.assertEqual(
outputs,
[
{"generated_text": expected_chat1},
],
)
outputs = text_generator([chat1, chat2], do_sample=False, max_new_tokens=10)
expected_chat2 = chat2 + [
{
"role": "assistant",
"content": " stairs stairs stairs stairs stairs stairs stairs stairs stairs stairs",
}
]
self.assertEqual(
outputs,
[
[{"generated_text": expected_chat1}],
[{"generated_text": expected_chat2}],
],
)
def get_test_pipeline(self, model, tokenizer, processor, torch_dtype="float32"):
text_generator = TextGenerationPipeline(model=model, tokenizer=tokenizer, torch_dtype=torch_dtype)
return text_generator, ["This is a test", "Another test"]
def test_stop_sequence_stopping_criteria(self):
prompt = """Hello I believe in"""
text_generator = pipeline("text-generation", model="hf-internal-testing/tiny-random-gpt2")
output = text_generator(prompt)
self.assertEqual(
output,
[{"generated_text": "Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe"}],
)
output = text_generator(prompt, stop_sequence=" fe")
self.assertEqual(output, [{"generated_text": "Hello I believe in fe"}])
def run_pipeline_test(self, text_generator, _):
model = text_generator.model
tokenizer = text_generator.tokenizer
outputs = text_generator("This is a test")
self.assertEqual(outputs, [{"generated_text": ANY(str)}])
self.assertTrue(outputs[0]["generated_text"].startswith("This is a test"))
outputs = text_generator("This is a test", return_full_text=False)
self.assertEqual(outputs, [{"generated_text": ANY(str)}])
self.assertNotIn("This is a test", outputs[0]["generated_text"])
text_generator = pipeline(task="text-generation", model=model, tokenizer=tokenizer, return_full_text=False)
outputs = text_generator("This is a test")
self.assertEqual(outputs, [{"generated_text": ANY(str)}])
self.assertNotIn("This is a test", outputs[0]["generated_text"])
outputs = text_generator("This is a test", return_full_text=True)
self.assertEqual(outputs, [{"generated_text": ANY(str)}])
self.assertTrue(outputs[0]["generated_text"].startswith("This is a test"))
outputs = text_generator(["This is great !", "Something else"], num_return_sequences=2, do_sample=True)
self.assertEqual(
outputs,
[
[{"generated_text": ANY(str)}, {"generated_text": ANY(str)}],
[{"generated_text": ANY(str)}, {"generated_text": ANY(str)}],
],
)
if text_generator.tokenizer.pad_token is not None:
outputs = text_generator(
["This is great !", "Something else"], num_return_sequences=2, batch_size=2, do_sample=True
)
self.assertEqual(
outputs,
[
[{"generated_text": ANY(str)}, {"generated_text": ANY(str)}],
[{"generated_text": ANY(str)}, {"generated_text": ANY(str)}],
],
)
with self.assertRaises(ValueError):
outputs = text_generator("test", return_full_text=True, return_text=True)
with self.assertRaises(ValueError):
outputs = text_generator("test", return_full_text=True, return_tensors=True)
with self.assertRaises(ValueError):
outputs = text_generator("test", return_text=True, return_tensors=True)
# Empty prompt is slighly special
# it requires BOS token to exist.
# Special case for Pegasus which will always append EOS so will
# work even without BOS.
if (
text_generator.tokenizer.bos_token_id is not None
or "Pegasus" in tokenizer.__class__.__name__
or "Git" in model.__class__.__name__
):
outputs = text_generator("")
self.assertEqual(outputs, [{"generated_text": ANY(str)}])
else:
with self.assertRaises((ValueError, AssertionError)):
outputs = text_generator("", add_special_tokens=False)
if text_generator.framework == "tf":
# TF generation does not support max_new_tokens, and it's impossible
# to control long generation with only max_length without
# fancy calculation, dismissing tests for now.
self.skipTest(reason="TF generation does not support max_new_tokens")
# We don't care about infinite range models.
# They already work.
# Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly.
EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS = [
"RwkvForCausalLM",
"XGLMForCausalLM",
"GPTNeoXForCausalLM",
"GPTNeoXJapaneseForCausalLM",
"FuyuForCausalLM",
]
if (
tokenizer.model_max_length < 10000
and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS
):
# Handling of large generations
with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError)):
text_generator("This is a test" * 500, max_new_tokens=20)
outputs = text_generator("This is a test" * 500, handle_long_generation="hole", max_new_tokens=20)
# Hole strategy cannot work
with self.assertRaises(ValueError):
text_generator(
"This is a test" * 500,
handle_long_generation="hole",
max_new_tokens=tokenizer.model_max_length + 10,
)
@require_torch
@require_accelerate
| |
177098
|
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
Text2TextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class Text2TextGenerationPipelineTests(unittest.TestCase):
model_mapping = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
tf_model_mapping = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def get_test_pipeline(self, model, tokenizer, processor, torch_dtype="float32"):
generator = Text2TextGenerationPipeline(model=model, tokenizer=tokenizer, torch_dtype=torch_dtype)
return generator, ["Something to write", "Something else"]
def run_pipeline_test(self, generator, _):
outputs = generator("Something there")
self.assertEqual(outputs, [{"generated_text": ANY(str)}])
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]["generated_text"].startswith("Something there"))
outputs = generator(["This is great !", "Something else"], num_return_sequences=2, do_sample=True)
self.assertEqual(
outputs,
[
[{"generated_text": ANY(str)}, {"generated_text": ANY(str)}],
[{"generated_text": ANY(str)}, {"generated_text": ANY(str)}],
],
)
outputs = generator(
["This is great !", "Something else"], num_return_sequences=2, batch_size=2, do_sample=True
)
self.assertEqual(
outputs,
[
[{"generated_text": ANY(str)}, {"generated_text": ANY(str)}],
[{"generated_text": ANY(str)}, {"generated_text": ANY(str)}],
],
)
with self.assertRaises(ValueError):
generator(4)
@require_torch
def test_small_model_pt(self):
generator = pipeline("text2text-generation", model="patrickvonplaten/t5-tiny-random", framework="pt")
# do_sample=False necessary for reproducibility
outputs = generator("Something there", do_sample=False)
self.assertEqual(outputs, [{"generated_text": ""}])
num_return_sequences = 3
outputs = generator(
"Something there",
num_return_sequences=num_return_sequences,
num_beams=num_return_sequences,
)
target_outputs = [
{"generated_text": "Beide Beide Beide Beide Beide Beide Beide Beide Beide"},
{"generated_text": "Beide Beide Beide Beide Beide Beide Beide Beide"},
{"generated_text": ""},
]
self.assertEqual(outputs, target_outputs)
outputs = generator("This is a test", do_sample=True, num_return_sequences=2, return_tensors=True)
self.assertEqual(
outputs,
[
{"generated_token_ids": ANY(torch.Tensor)},
{"generated_token_ids": ANY(torch.Tensor)},
],
)
generator.tokenizer.pad_token_id = generator.model.config.eos_token_id
generator.tokenizer.pad_token = "<pad>"
outputs = generator(
["This is a test", "This is a second test"],
do_sample=True,
num_return_sequences=2,
batch_size=2,
return_tensors=True,
)
self.assertEqual(
outputs,
[
[
{"generated_token_ids": ANY(torch.Tensor)},
{"generated_token_ids": ANY(torch.Tensor)},
],
[
{"generated_token_ids": ANY(torch.Tensor)},
{"generated_token_ids": ANY(torch.Tensor)},
],
],
)
@require_tf
def test_small_model_tf(self):
generator = pipeline("text2text-generation", model="patrickvonplaten/t5-tiny-random", framework="tf")
# do_sample=False necessary for reproducibility
outputs = generator("Something there", do_sample=False)
self.assertEqual(outputs, [{"generated_text": ""}])
| |
177444
|
@slow
@require_torch_gpu
@require_bitsandbytes
@require_read_token
def test_11b_model_integration_multi_image_generate(self):
processor = AutoProcessor.from_pretrained(self.instruct_model_checkpoint)
# Prepare inputs
image1 = Image.open(requests.get("https://llava-vl.github.io/static/images/view.jpg", stream=True).raw)
image2 = Image.open(requests.get("https://www.ilankelman.org/stopsigns/australia.jpg", stream=True).raw)
conversation = [
{
"role": "user",
"content": [
{"type": "image"},
{"type": "text", "text": "What’s shown in this image?"},
],
},
{
"role": "assistant",
"content": [
{"type": "text", "text": "This image shows a long wooden dock extending out into a lake."}
],
},
{
"role": "user",
"content": [
{"type": "image"},
{"type": "text", "text": "What about this one, what do you see here? Can you describe in detail?"},
],
},
]
prompt = processor.apply_chat_template(conversation, add_generation_prompt=True)
inputs = processor(text=prompt, images=[[image1, image2]], return_tensors="pt").to(torch_device)
prompt_len = inputs["input_ids"].shape[-1]
# Load model in 4 bit
quantization_config = BitsAndBytesConfig(load_in_4bit=True)
model = MllamaForConditionalGeneration.from_pretrained(
self.instruct_model_checkpoint, quantization_config=quantization_config
)
output = model.generate(**inputs, do_sample=False, max_new_tokens=25)
# Check first output
generated_output = output[0][prompt_len:]
decoded_output = processor.decode(generated_output, skip_special_tokens=False)
# model should response about "stop sign", however it responses about "dock"
# this happens only in quantized version, bfloat16 works fine
expected_output = "This image shows a long wooden dock extending out into a lake. The dock is made of wooden planks and has a railing"
self.assertEqual(
decoded_output,
expected_output,
f"Decoded output: {decoded_output}\nExpected output: {expected_output}",
)
| |
177519
|
# coding=utf-8
# Copyright 2021, The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testing suite for the PyTorch Marian model."""
import tempfile
import unittest
from huggingface_hub.hf_api import list_models
from transformers import MarianConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
require_torch_fp16,
slow,
torch_device,
)
from transformers.utils import cached_property
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
MarianModel,
MarianMTModel,
TranslationPipeline,
)
from transformers.models.marian.convert_marian_to_pytorch import (
ORG_NAME,
convert_hf_name_to_opus_name,
convert_opus_name_to_hf_name,
)
from transformers.models.marian.modeling_marian import (
MarianDecoder,
MarianEncoder,
MarianForCausalLM,
shift_tokens_right,
)
def prepare_marian_inputs_dict(
config,
input_ids,
decoder_input_ids,
attention_mask=None,
decoder_attention_mask=None,
head_mask=None,
decoder_head_mask=None,
cross_attn_head_mask=None,
):
if attention_mask is None:
attention_mask = input_ids.ne(config.pad_token_id)
if decoder_attention_mask is None:
decoder_attention_mask = decoder_input_ids.ne(config.pad_token_id)
if head_mask is None:
head_mask = torch.ones(config.encoder_layers, config.encoder_attention_heads, device=torch_device)
if decoder_head_mask is None:
decoder_head_mask = torch.ones(config.decoder_layers, config.decoder_attention_heads, device=torch_device)
if cross_attn_head_mask is None:
cross_attn_head_mask = torch.ones(config.decoder_layers, config.decoder_attention_heads, device=torch_device)
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
class MarianModelTester:
def __init__(
self,
parent,
batch_size=13,
seq_length=7,
is_training=True,
use_labels=False,
vocab_size=99,
hidden_size=16,
num_hidden_layers=2,
num_attention_heads=4,
intermediate_size=4,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=20,
eos_token_id=2,
pad_token_id=1,
bos_token_id=0,
decoder_start_token_id=3,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_labels = use_labels
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.eos_token_id = eos_token_id
self.pad_token_id = pad_token_id
self.bos_token_id = bos_token_id
self.decoder_start_token_id = decoder_start_token_id
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size).clamp(
3,
)
input_ids[:, -1] = self.eos_token_id # Eos Token
decoder_input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
config = self.get_config()
inputs_dict = prepare_marian_inputs_dict(config, input_ids, decoder_input_ids)
return config, inputs_dict
def get_config(self):
return MarianConfig(
vocab_size=self.vocab_size,
d_model=self.hidden_size,
encoder_layers=self.num_hidden_layers,
decoder_layers=self.num_hidden_layers,
encoder_attention_heads=self.num_attention_heads,
decoder_attention_heads=self.num_attention_heads,
encoder_ffn_dim=self.intermediate_size,
decoder_ffn_dim=self.intermediate_size,
dropout=self.hidden_dropout_prob,
attention_dropout=self.attention_probs_dropout_prob,
max_position_embeddings=self.max_position_embeddings,
eos_token_id=self.eos_token_id,
bos_token_id=self.bos_token_id,
pad_token_id=self.pad_token_id,
decoder_start_token_id=self.decoder_start_token_id,
)
def prepare_config_and_inputs_for_common(self):
config, inputs_dict = self.prepare_config_and_inputs()
return config, inputs_dict
def create_and_check_decoder_model_past_large_inputs(self, config, inputs_dict):
model = MarianModel(config=config).get_decoder().to(torch_device).eval()
input_ids = inputs_dict["input_ids"]
attention_mask = inputs_dict["attention_mask"]
head_mask = inputs_dict["head_mask"]
# first forward pass
outputs = model(input_ids, attention_mask=attention_mask, head_mask=head_mask, use_cache=True)
output, past_key_values = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size)
next_attn_mask = ids_tensor((self.batch_size, 3), 2)
# append to next input_ids and
next_input_ids = torch.cat([input_ids, next_tokens], dim=-1)
next_attention_mask = torch.cat([attention_mask, next_attn_mask], dim=-1)
output_from_no_past = model(next_input_ids, attention_mask=next_attention_mask)["last_hidden_state"]
output_from_past = model(next_tokens, attention_mask=next_attention_mask, past_key_values=past_key_values)[
"last_hidden_state"
]
# select random slice
random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item()
output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach()
output_from_past_slice = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3))
def check_encoder_decoder_model_standalone(self, config, inputs_dict):
model = MarianModel(config=config).to(torch_device).eval()
outputs = model(**inputs_dict)
encoder_last_hidden_state = outputs.encoder_last_hidden_state
last_hidden_state = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
encoder = model.get_encoder()
encoder.save_pretrained(tmpdirname)
encoder = MarianEncoder.from_pretrained(tmpdirname).to(torch_device)
encoder_last_hidden_state_2 = encoder(inputs_dict["input_ids"], attention_mask=inputs_dict["attention_mask"])[
0
]
self.parent.assertTrue((encoder_last_hidden_state_2 - encoder_last_hidden_state).abs().max().item() < 1e-3)
with tempfile.TemporaryDirectory() as tmpdirname:
decoder = model.get_decoder()
decoder.save_pretrained(tmpdirname)
decoder = MarianDecoder.from_pretrained(tmpdirname).to(torch_device)
last_hidden_state_2 = decoder(
input_ids=inputs_dict["decoder_input_ids"],
attention_mask=inputs_dict["decoder_attention_mask"],
encoder_hidden_states=encoder_last_hidden_state,
encoder_attention_mask=inputs_dict["attention_mask"],
)[0]
self.parent.assertTrue((last_hidden_state_2 - last_hidden_state).abs().max().item() < 1e-3)
| |
177521
|
@require_torch
@require_sentencepiece
@require_tokenizers
class MarianIntegrationTest(unittest.TestCase):
src = "en"
tgt = "de"
src_text = [
"I am a small frog.",
"Now I can forget the 100 words of german that I know.",
"Tom asked his teacher for advice.",
"That's how I would do it.",
"Tom really admired Mary's courage.",
"Turn around and close your eyes.",
]
expected_text = [
"Ich bin ein kleiner Frosch.",
"Jetzt kann ich die 100 Wörter des Deutschen vergessen, die ich kenne.",
"Tom bat seinen Lehrer um Rat.",
"So würde ich das machen.",
"Tom bewunderte Marias Mut wirklich.",
"Drehen Sie sich um und schließen Sie die Augen.",
]
# ^^ actual C++ output differs slightly: (1) des Deutschen removed, (2) ""-> "O", (3) tun -> machen
@classmethod
def setUpClass(cls) -> None:
cls.model_name = f"Helsinki-NLP/opus-mt-{cls.src}-{cls.tgt}"
return cls
@cached_property
def tokenizer(self):
return AutoTokenizer.from_pretrained(self.model_name)
@property
def eos_token_id(self) -> int:
return self.tokenizer.eos_token_id
@cached_property
def model(self):
model: MarianMTModel = AutoModelWithLMHead.from_pretrained(self.model_name).to(torch_device)
c = model.config
self.assertListEqual(c.bad_words_ids, [[c.pad_token_id]])
self.assertEqual(c.max_length, 512)
self.assertEqual(c.decoder_start_token_id, c.pad_token_id)
if torch_device == "cuda":
return model.half()
else:
return model
def _assert_generated_batch_equal_expected(self, **tokenizer_kwargs):
generated_words = self.translate_src_text(**tokenizer_kwargs)
self.assertListEqual(self.expected_text, generated_words)
def translate_src_text(self, **tokenizer_kwargs):
model_inputs = self.tokenizer(self.src_text, padding=True, return_tensors="pt", **tokenizer_kwargs).to(
torch_device
)
self.assertEqual(self.model.device, model_inputs.input_ids.device)
generated_ids = self.model.generate(
model_inputs.input_ids,
attention_mask=model_inputs.attention_mask,
num_beams=2,
max_length=128,
renormalize_logits=True, # Marian should always renormalize its logits. See #25459
)
generated_words = self.tokenizer.batch_decode(generated_ids, skip_special_tokens=True)
return generated_words
@require_sentencepiece
@require_tokenizers
class TestMarian_EN_DE_More(MarianIntegrationTest):
@slow
def test_forward(self):
src, tgt = ["I am a small frog"], ["Ich bin ein kleiner Frosch."]
expected_ids = [38, 121, 14, 697, 38848, 0]
model_inputs = self.tokenizer(src, text_target=tgt, return_tensors="pt").to(torch_device)
self.assertListEqual(expected_ids, model_inputs.input_ids[0].tolist())
desired_keys = {
"input_ids",
"attention_mask",
"labels",
}
self.assertSetEqual(desired_keys, set(model_inputs.keys()))
model_inputs["decoder_input_ids"] = shift_tokens_right(
model_inputs.labels, self.tokenizer.pad_token_id, self.model.config.decoder_start_token_id
)
model_inputs["return_dict"] = True
model_inputs["use_cache"] = False
with torch.no_grad():
outputs = self.model(**model_inputs)
max_indices = outputs.logits.argmax(-1)
self.tokenizer.batch_decode(max_indices)
def test_unk_support(self):
t = self.tokenizer
ids = t(["||"], return_tensors="pt").to(torch_device).input_ids[0].tolist()
expected = [t.unk_token_id, t.unk_token_id, t.eos_token_id]
self.assertEqual(expected, ids)
def test_pad_not_split(self):
input_ids_w_pad = self.tokenizer(["I am a small frog <pad>"], return_tensors="pt").input_ids[0].tolist()
expected_w_pad = [38, 121, 14, 697, 38848, self.tokenizer.pad_token_id, 0] # pad
self.assertListEqual(expected_w_pad, input_ids_w_pad)
@slow
def test_batch_generation_en_de(self):
self._assert_generated_batch_equal_expected()
def test_auto_config(self):
config = AutoConfig.from_pretrained(self.model_name)
self.assertIsInstance(config, MarianConfig)
@require_sentencepiece
@require_tokenizers
class TestMarian_EN_FR(MarianIntegrationTest):
src = "en"
tgt = "fr"
src_text = [
"I am a small frog.",
"Now I can forget the 100 words of german that I know.",
]
expected_text = [
"Je suis une petite grenouille.",
"Maintenant, je peux oublier les 100 mots d'allemand que je connais.",
]
@slow
def test_batch_generation_en_fr(self):
self._assert_generated_batch_equal_expected()
@require_sentencepiece
@require_tokenizers
class TestMarian_FR_EN(MarianIntegrationTest):
src = "fr"
tgt = "en"
src_text = [
"Donnez moi le micro.",
"Tom et Mary étaient assis à une table.", # Accents
]
expected_text = [
"Give me the microphone.",
"Tom and Mary were sitting at a table.",
]
@slow
def test_batch_generation_fr_en(self):
self._assert_generated_batch_equal_expected()
@require_sentencepiece
@require_tokenizers
class TestMarian_RU_FR(MarianIntegrationTest):
src = "ru"
tgt = "fr"
src_text = ["Он показал мне рукопись своей новой пьесы."]
expected_text = ["Il m'a montré le manuscrit de sa nouvelle pièce."]
@slow
def test_batch_generation_ru_fr(self):
self._assert_generated_batch_equal_expected()
@require_sentencepiece
@require_tokenizers
class TestMarian_MT_EN(MarianIntegrationTest):
"""Cover low resource/high perplexity setting. This breaks without adjust_logits_generation overwritten"""
src = "mt"
tgt = "en"
src_text = ["Billi messu b'mod ġentili, Ġesù fejjaq raġel li kien milqut bil - marda kerha tal - ġdiem."]
expected_text = ["Touching gently, Jesus healed a man who was affected by the sad disease of leprosy."]
@slow
def test_batch_generation_mt_en(self):
self._assert_generated_batch_equal_expected()
@require_sentencepiece
@require_tokenizers
class TestMarian_en_zh(MarianIntegrationTest):
src = "en"
tgt = "zh"
src_text = ["My name is Wolfgang and I live in Berlin"]
expected_text = ["我叫沃尔夫冈 我住在柏林"]
@slow
def test_batch_generation_eng_zho(self):
self._assert_generated_batch_equal_expected()
@require_sentencepiece
@require_tokenizers
class TestMarian_en_ROMANCE(MarianIntegrationTest):
"""Multilingual on target side."""
src = "en"
tgt = "ROMANCE"
src_text = [
">>fr<< Don't spend so much time watching TV.",
">>pt<< Your message has been sent.",
">>es<< He's two years older than me.",
]
expected_text = [
"Ne passez pas autant de temps à regarder la télé.",
"A sua mensagem foi enviada.",
"Es dos años más viejo que yo.",
]
@slow
def test_batch_generation_en_ROMANCE_multi(self):
self._assert_generated_batch_equal_expected()
@slow
@require_torch
def test_pipeline(self):
pipeline = TranslationPipeline(self.model, self.tokenizer, framework="pt", device=torch_device)
output = pipeline(self.src_text)
self.assertEqual(self.expected_text, [x["translation_text"] for x in output])
@require_sentencepiece
@require_tokenizers
class TestMarian_FI_EN_V2(Ma
| |
179390
|
# Testing mixed int8 quantization

The following is the recipe on how to effectively debug `bitsandbytes` integration on Hugging Face `transformers`.
## Library requirements
+ `transformers>=4.22.0`
+ `accelerate>=0.12.0`
+ `bitsandbytes>=0.31.5`.
## Hardware requirements
The following instructions are tested with 2 NVIDIA-Tesla T4 GPUs. To run successfully `bitsandbytes` you would need a 8-bit core tensor supported GPU. Note that Turing, Ampere or newer architectures - e.g. T4, RTX20s RTX30s, A40-A100, A6000 should be supported.
## Virutal envs
```bash
conda create --name int8-testing python==3.8
pip install bitsandbytes>=0.31.5
pip install accelerate>=0.12.0
pip install transformers>=4.23.0
```
if `transformers>=4.23.0` is not released yet, then use:
```bash
pip install git+https://github.com/huggingface/transformers.git
```
## Troubleshooting
A list of common errors:
### Torch does not correctly do the operations on GPU
First check that:
```py
import torch
vec = torch.randn(1, 2, 3).to(0)
```
Works without any error. If not, install torch using `conda` like:
```bash
conda create --name int8-testing python==3.8
conda install pytorch torchvision torchaudio cudatoolkit=11.6 -c pytorch -c conda-forge
pip install bitsandbytes>=0.31.5
pip install accelerate>=0.12.0
pip install transformers>=4.23.0
```
For the latest pytorch instructions please see [this](https://pytorch.org/get-started/locally/)
and the snippet above should work.
### ` bitsandbytes operations are not supported under CPU!`
This happens when some Linear weights are set to the CPU when using `accelerate`. Please check carefully `model.hf_device_map` and make sure that there is no `Linear` module that is assigned to CPU. It is fine to have the last module (usually the Lm_head) set on CPU.
### `To use the type as a Parameter, please correct the detach() semantics defined by __torch_dispatch__() implementation.`
Use the latest version of `accelerate` with a command such as: `pip install -U accelerate` and the problem should be solved.
### `Parameter has no attribue .CB`
Same solution as above.
### `RuntimeError: CUDA error: an illegal memory access was encountered ... consider passing CUDA_LAUNCH_BLOCKING=1`
Run your script by pre-pending `CUDA_LAUNCH_BLOCKING=1` and you should observe an error as described in the next section.
### `CUDA illegal memory error: an illegal memory access at line...`:
Check the CUDA verisons with:
```bash
nvcc --version
```
and confirm it is the same version as the one detected by `bitsandbytes`. If not, run:
```bash
ls -l $CONDA_PREFIX/lib/libcudart.so
```
or
```bash
ls -l $LD_LIBRARY_PATH
```
Check if `libcudart.so` has a correct symlink that is set. Sometimes `nvcc` detects the correct CUDA version but `bitsandbytes` doesn't. You have to make sure that the symlink that is set for the file `libcudart.so` is redirected to the correct CUDA file.
Here is an example of a badly configured CUDA installation:
`nvcc --version` gives:

which means that the detected CUDA version is 11.3 but `bitsandbytes` outputs:

First check:
```bash
echo $LD_LIBRARY_PATH
```
If this contains multiple paths separated by `:`. Then you have to make sure that the correct CUDA version is set. By doing:
```bash
ls -l $path/libcudart.so
```
On each path (`$path`) separated by `:`.
If not, simply run
```bash
ls -l $LD_LIBRARY_PATH/libcudart.so
```
and you can see

If you see that the file is linked to the wrong CUDA version (here 10.2), find the correct location for `libcudart.so` (`find --name libcudart.so`) and replace the environment variable `LD_LIBRARY_PATH` with the one containing the correct `libcudart.so` file.
| |
179391
|
# coding=utf-8
# Copyright 2022 The HuggingFace Team Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a clone of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForSeq2SeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
apply_skip_if_not_implemented,
is_bitsandbytes_available,
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu_if_bnb_not_multi_backend_enabled,
require_torch_multi_gpu,
slow,
torch_device,
)
def get_some_linear_layer(model):
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
elif model.config.model_type == "opt":
try:
return model.decoder.layers[0].fc1
except AttributeError:
# for AutoModelforCausalLM
return model.model.decoder.layers[0].fc1
else:
return model.transformer.h[0].mlp.dense_4h_to_h
if is_torch_available():
import torch
import torch.nn as nn
class LoRALayer(nn.Module):
"""Wraps a linear layer with LoRA-like adapter - Used for testing purposes only"""
def __init__(self, module: nn.Module, rank: int):
super().__init__()
self.module = module
self.adapter = nn.Sequential(
nn.Linear(module.in_features, rank, bias=False),
nn.Linear(rank, module.out_features, bias=False),
)
small_std = (2.0 / (5 * min(module.in_features, module.out_features))) ** 0.5
nn.init.normal_(self.adapter[0].weight, std=small_std)
nn.init.zeros_(self.adapter[1].weight)
self.adapter.to(module.weight.device)
def forward(self, input, *args, **kwargs):
return self.module(input, *args, **kwargs) + self.adapter(input)
if is_bitsandbytes_available():
import bitsandbytes as bnb
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu_if_bnb_not_multi_backend_enabled
@slow
class Base4bitTest(unittest.TestCase):
# We keep the constants inside the init function and model loading inside setUp function
# We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected)
# Therefore here we use only bloom-1b3 to test our module
model_name = "bigscience/bloom-1b7"
# Constant values
EXPECTED_RELATIVE_DIFFERENCE = (
2.109659552692574 # This was obtained on a RTX Titan so the number might slightly change
)
input_text = "Hello my name is"
EXPECTED_OUTPUTS = set()
EXPECTED_OUTPUTS.add("Hello my name is John and I am a professional photographer. I")
EXPECTED_OUTPUTS.add("Hello my name is John.\nI am a friend of your father.\n")
EXPECTED_OUTPUTS.add("Hello my name is John Doe, I am a student at the University")
MAX_NEW_TOKENS = 10
def setUp(self):
# Models and tokenizer
self.tokenizer = AutoTokenizer.from_pretrained(self.model_name)
@apply_skip_if_not_implemented
| |
180210
|
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# Quantize 🤗 Transformers models
## `AutoGPTQ` Integration
🤗 Transformers には、言語モデルで GPTQ 量子化を実行するための `optimum` API が統合されています。パフォーマンスを大幅に低下させることなく、推論速度を高速化することなく、モデルを 8、4、3、さらには 2 ビットでロードおよび量子化できます。これは、ほとんどの GPU ハードウェアでサポートされています。
量子化モデルの詳細については、以下を確認してください。
- [GPTQ](https://arxiv.org/pdf/2210.17323.pdf) 論文
- GPTQ 量子化に関する `optimum` [ガイド](https://huggingface.co/docs/optimum/llm_quantization/usage_guides/quantization)
- バックエンドとして使用される [`AutoGPTQ`](https://github.com/PanQiWei/AutoGPTQ) ライブラリ
### Requirements
以下のコードを実行するには、以下の要件がインストールされている必要があります:
- 最新の `AutoGPTQ` ライブラリをインストールする。
`pip install auto-gptq` をインストールする。
- 最新の `optimum` をソースからインストールする。
`git+https://github.com/huggingface/optimum.git` をインストールする。
- 最新の `transformers` をソースからインストールする。
最新の `transformers` をソースからインストールする `pip install git+https://github.com/huggingface/transformers.git`
- 最新の `accelerate` ライブラリをインストールする。
`pip install --upgrade accelerate` を実行する。
GPTQ統合は今のところテキストモデルのみをサポートしているので、視覚、音声、マルチモーダルモデルでは予期せぬ挙動に遭遇するかもしれないことに注意してください。
### Load and quantize a model
GPTQ は、量子化モデルを使用する前に重みのキャリブレーションを必要とする量子化方法です。トランスフォーマー モデルを最初から量子化する場合は、量子化モデルを作成するまでに時間がかかることがあります (`facebook/opt-350m`モデルの Google colab では約 5 分)。
したがって、GPTQ 量子化モデルを使用するシナリオは 2 つあります。最初の使用例は、ハブで利用可能な他のユーザーによってすでに量子化されたモデルをロードすることです。2 番目の使用例は、モデルを最初から量子化し、保存するかハブにプッシュして、他のユーザーが使用できるようにすることです。それも使ってください。
#### GPTQ Configuration
モデルをロードして量子化するには、[`GPTQConfig`] を作成する必要があります。データセットを準備するには、`bits`の数、量子化を調整するための`dataset`、およびモデルの`Tokenizer`を渡す必要があります。
```python
model_id = "facebook/opt-125m"
tokenizer = AutoTokenizer.from_pretrained(model_id)
gptq_config = GPTQConfig(bits=4, dataset = "c4", tokenizer=tokenizer)
```
独自のデータセットを文字列のリストとして渡すことができることに注意してください。ただし、GPTQ 論文のデータセットを使用することを強くお勧めします。
```python
dataset = ["auto-gptq is an easy-to-use model quantization library with user-friendly apis
| |
180531
|
ce/optimum-benchmark)库进行了一些速度、吞吐量和延迟基准测试。
请注意,在编写本文档部分时,可用的量化方法包括:`awq`、`gptq`和`bitsandbytes`。
基准测试在一台NVIDIA-A100实例上运行,使用[`TheBloke/Mistral-7B-v0.1-AWQ`](https://huggingface.co/TheBloke/Mistral-7B-v0.1-AWQ)作为AWQ模型,[`TheBloke/Mistral-7B-v0.1-GPTQ`](https://huggingface.co/TheBloke/Mistral-7B-v0.1-GPTQ)作为GPTQ模型。我们还将其与`bitsandbytes`量化模型和`float16`模型进行了对比。以下是一些结果示例:
<div style="text-align: center">
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/quantization/forward_memory_plot.png">
</div>
<div style="text-align: center">
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/quantization/generate_memory_plot.png">
</div>
<div style="text-align: center">
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/quantization/generate_throughput_plot.png">
</div>
<div style="text-align: center">
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/quantization/forward_latency_plot.png">
</div>
你可以在[此链接](https://github.com/huggingface/optimum-benchmark/tree/main/examples/running-mistrals)中找到完整的结果以及包版本。
从结果来看,AWQ量化方法是推理、文本生成中最快的量化方法,并且在文本生成的峰值内存方面属于最低。然而,对于每批数据,AWQ似乎有最大的前向延迟。
### Google colab 演示
查看如何在[Google Colab演示](https://colab.research.google.com/drive/1HzZH89yAXJaZgwJDhQj9LqSBux932BvY)中使用此集成!
### AwqConfig
[[autodoc]] AwqConfig
## `AutoGPTQ` 集成
🤗 Transformers已经整合了`optimum` API,用于对语言模型执行GPTQ量化。您可以以8、4、3甚至2位加载和量化您的模型,而性能无明显下降,并且推理速度更快!这受到大多数GPU硬件的支持。
要了解更多关于量化模型的信息,请查看:
- [GPTQ](https://arxiv.org/pdf/2210.17323.pdf)论文
- `optimum`关于GPTQ量化的[指南](https://huggingface.co/docs/optimum/llm_quantization/usage_guides/quantization)
- 用作后端的[`AutoGPTQ`](https://github.com/PanQiWei/AutoGPTQ)库
### 要求
为了运行下面的代码,您需要安装:
- 安装最新版本的 `AutoGPTQ` 库
`pip install auto-gptq`
- 从源代码安装最新版本的`optimum`
`pip install git+https://github.com/huggingface/optimum.git`
- 从源代码安装最新版本的`transformers`
`pip install git+https://github.com/huggingface/transformers.git`
- 安装最新版本的`accelerate`库:
`pip install --upgrade accelerate`
请注意,目前GPTQ集成仅支持文本模型,对于视觉、语音或多模态模型可能会遇到预期以外结果。
### 加载和量化模型
GPTQ是一种在使用量化模型之前需要进行权重校准的量化方法。如果您想从头开始对transformers模型进行量化,生成量化模型可能需要一些时间(在Google Colab上对`facebook/opt-350m`模型量化约为5分钟)。
因此,有两种不同的情况下您可能想使用GPTQ量化模型。第一种情况是加载已经由其他用户在Hub上量化的模型,第二种情况是从头开始对您的模型进行量化并保存或推送到Hub,以便其他用户也可以使用它。
#### GPTQ 配置
为了加载和量化一个模型,您需要创建一个[`GPTQConfig`]。您需要传递`bits`的数量,一个用于校准量化的`dataset`,以及模型的`tokenizer`以准备数据集。
```python
model_id = "facebook/opt-125m"
tokenizer = AutoTokenizer.from_pretrained(model_id)
gptq_config = GPTQConfig(bits=4, dataset = "c4", tokenizer=tokenizer)
```
请注意,您可以将自己的数据集以字符串列表形式传递到模型。然而,强烈建议您使用GPTQ论文中提供的数据集。
```python
dataset = ["auto-gptq is an easy-to-use model quantization library with user-friendly apis, based on GPTQ algorithm."]
quantization = GPTQConfig(bits=4, dataset = dataset, tokenizer=tokenizer)
```
#### 量化
您可以通过使用`from_pretrained`并设置`quantization_config`来对模型进行量化。
| |
181066
|
TQ [[gptq]]
<Tip>
PEFT를 활용한 GPTQ 양자화를 사용해보시려면 이 [노트북](https://colab.research.google.com/drive/1_TIrmuKOFhuRRiTWN94iLKUFu6ZX4ceb)을 참고하시고, 자세한 내용은 이 [블로그 게시물](https://huggingface.co/blog/gptq-integration)에서 확인하세요!
</Tip>
[AutoGPTQ](https://github.com/PanQiWei/AutoGPTQ) 라이브러리는 GPTQ 알고리즘을 구현합니다. 이는 훈련 후 양자화 기법으로, 가중치 행렬의 각 행을 독립적으로 양자화하여 오차를 최소화하는 가중치 버전을 찾습니다. 이 가중치는 int4로 양자화되지만, 추론 중에는 실시간으로 fp16으로 복원됩니다. 이는 int4 가중치가 GPU의 전역 메모리 대신 결합된 커널에서 역양자화되기 때문에 메모리 사용량을 4배 절약할 수 있으며, 더 낮은 비트 너비를 사용함으로써 통신 시간이 줄어들어 추론 속도가 빨라질 것으로 기대할 수 있습니다.
시작하기 전에 다음 라이브러리들이 설치되어 있는지 확인하세요:
```bash
pip install auto-gptq
pip install --upgrade accelerate optimum transformers
```
모델을 양자화하려면(현재 텍스트 모델만 지원됨) [`GPTQConfig`] 클래스를 생성하고 양자화할 비트 수, 양자화를 위한 가중치 교정 데이터셋, 그리고 데이터셋을 준비하기 위한 토크나이저를 설정해야 합니다.
```py
from transformers import AutoModelForCausalLM, AutoTokenizer, GPTQConfig
model_id = "facebook/opt-125m"
tokenizer = AutoTokenizer.from_pretrained(model_id)
gptq_config = GPTQConfig(bits=4, dataset="c4", tokenizer=tokenizer)
```
자신의 데이터셋을 문자열 리스트 형태로 전달할 수도 있지만, GPTQ 논문에서 사용한 동일한 데이터셋을 사용하는 것을 강력히 권장합니다.
```py
dataset = ["auto-gptq is an easy-to-use model quantization library with user-friendly apis, based on GPTQ algorithm."]
gptq_config = GPTQConfig(bits=4, dataset=dataset, tokenizer=tokenizer)
```
양자화할 모델을 로드하고 `gptq_config`을 [`~AutoModelForCausalLM.from_pretrained`] 메소드에 전달하세요. 모델을 메모리에 맞추기 위해 `device_map="auto"`를 설정하여 모델을 자동으로 CPU로 오프로드하고, 양자화를 위해 모델 모듈이 CPU와 GPU 간에 이동할 수 있도록 합니다.
```py
quantized_model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", quantization_config=gptq_config)
```
데이터셋이 너무 커서 메모리가 부족한 경우를 대비한 디스크 오프로드는 현재 지원하지 않고 있습니다. 이럴 때는 `max_memory` 매개변수를 사용하여 디바이스(GPU 및 CPU)에서 사용할 메모리 양을 할당해 보세요:
```py
quantized_model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", max_memory={0: "30GiB", 1: "46GiB", "cpu": "30GiB"}, quantization_config=gptq_config)
```
<Tip warning={true}>
하드웨어와 모델 매개변수량에 따라 모델을 처음부터 양자화하는 데 드는 시간이 서로 다를 수 있습니다. 예를 들어, 무료 등급의 Google Colab GPU로 비교적 가벼운 [facebook/opt-350m](https://huggingface.co/facebook/opt-350m) 모델을 양자화하는 데 약 5분이 걸리지만, NVIDIA A100으로 175B에 달하는 매개변수를 가진 모델을 양자화하는 데는 약 4시간에 달하는 시간이 걸릴 수 있습니다. 모델을 양자화하기 전에, Hub에서 해당 모델의 GPTQ 양자화 버전이 이미 존재하는지 확인하는 것이 좋습니다.
</Tip>
모델이 양자화되면, 모델과 토크나이저를 Hub에 푸시하여 쉽게 공유하고 접근할 수 있습니다. [`GPTQConfig`]를 저장하기 위해 [`~PreTrainedModel.push_to_hub`] 메소드를 사용하세요:
```py
quantized_model.push_to_hub("opt-125m-gptq")
tokenizer.push_to_hub("opt-125m-gptq")
```
양자화된 모델을 로컬에 저장하려면 [`~PreTrainedModel.save_pretrained`] 메소드를 사용할 수 있습니다. 모델이 `device_map` 매개변수로 양자화되었을 경우, 저장하기 전에 전체 모델을 GPU나 CPU로 이동해야 합니다. 예를 들어, 모델을 CPU에 저장하려면 다음과 같이 합니다:
```py
quantized_model.save_pretrained("opt-125m-gptq")
tokenizer.save_pretrained("opt-125m-gptq")
# device_map이 설정된 상태에서 양자화된 경우
quantized_model.to("cpu")
quantized_model.save_pretrained("opt-125m-gptq")
```
양자화된 모델을 다시 로드하려면 [`~PreTrainedModel.from_pretrained`] 메소드를 사용하고, `device_map="auto"`를 설정하여 모든 사용 가능한 GPU에 모델을 자동으로 분산시켜 더 많은 메모리를 사용하지 않으면서 모델을 더 빠르게 로드할 수 있습니다.
```py
from transformers import AutoModelForCausalLM
model = AutoModelForCausalLM.from_pretrained("{your_username}/opt-125m-gptq", device_map="auto")
```
## ExLlama [[exllama]]
[ExLlama](https://github.com/turboderp/exllama)은 [Llama](model_doc/llama) 모델의 Python/C++/CUDA 구현체로, 4비트 GPTQ 가중치를 사용하여 더 빠른 추론을 위해 설계되었습니다(이 [벤치마크](https://github.com/huggingface/optimum/tree/main/tests/benchmark#gptq-benchmark)를 참고하세요). ['GPTQConfig'] 객체를 생성할 때 ExLlama 커널이 기본적으로 활성화됩니다. 추론 속도를 더욱 높이기 위해, `exllama_config` 매개변수를 구성하여 [ExLlamaV2](https://github.com/turboderp/exllamav2) 커널을 사용할 수 있습니다:
```py
import torch
from transformers import AutoModelForCausalLM, GPTQConfig
gptq_config = GPTQConfig(bits=4, exllama_config={"version":2})
model = AutoModelForCausalLM.from_pretrained("{your_username}/opt-125m-gptq", device_map="auto", quantization_config=
| |
181258
|
hoosing a chat model
There are an enormous number of different chat models available on the [Hugging Face Hub](https://huggingface.co/models?pipeline_tag=text-generation&sort=trending),
and new users often feel very overwhelmed by the selection offered. Don't be, though! You really need to just focus on
two important considerations:
- The model's size, which will determine if you can fit it in memory and how quickly it will
run.
- The quality of the model's chat output.
In general, these are correlated - bigger models tend to be
more capable, but even so there's a lot of variation at a given size point!
### Size and model naming
The size of a model is easy to spot - it's the number in the model name, like "8B" or "70B". This is the number of
**parameters** in the model. Without quantization, you should expect to need about 2 bytes of memory per parameter.
This means that an "8B" model with 8 billion parameters will need about 16GB of memory just to fit the parameters,
plus a little extra for other overhead. It's a good fit for a high-end consumer GPU with 24GB of memory, such as a 3090
or 4090.
Some chat models are "Mixture of Experts" models. These may list their sizes in different ways, such as "8x7B" or
"141B-A35B". The numbers are a little fuzzier here, but in general you can read this as saying that the model
has approximately 56 (8x7) billion parameters in the first case, or 141 billion parameters in the second case.
Note that it is very common to use quantization techniques to reduce the memory usage per parameter to 8 bits, 4 bits,
or even less. This topic is discussed in more detail in the [Memory considerations](#memory-considerations) section below.
### But which chat model is best?
Even once you know the size of chat model you can run, there's still a lot of choice out there. One way to sift through
it all is to consult **leaderboards**. Two of the most popular leaderboards are the [OpenLLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard)
and the [LMSys Chatbot Arena Leaderboard](https://chat.lmsys.org/?leaderboard). Note that the LMSys leaderboard
also includes proprietary models - look at the `licence` column to identify open-source ones that you can download, then
search for them on the [Hugging Face Hub](https://huggingface.co/models?pipeline_tag=text-generation&sort=trending).
### Specialist domains
Some models may be specialized for certain domains, such as medical or legal text, or non-English languages.
If you're working in these domains, you may find that a specialized model will give you big performance benefits.
Don't automatically assume that, though! Particularly when specialized models are smaller or older than the current
cutting-edge, a top-end general-purpose model may still outclass them. Thankfully, we are beginning to see
[domain-specific leaderboards](https://huggingface.co/blog/leaderboard-medicalllm) that should make it easier to locate
the best models for specialized domains.
## What happens inside the pipeline?
The quickstart above used a high-level pipeline to chat with a chat model, which is convenient, but not the
most flexible. Let's take a more low-level approach, to see each of the steps involved in chat. Let's start with
a code sample, and then break it down:
```python
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Prepare the input as before
chat = [
{"role": "system", "content": "You are a sassy, wise-cracking robot as imagined by Hollywood circa 1986."},
{"role": "user", "content": "Hey, can you tell me any fun things to do in New York?"}
]
# 1: Load the model and tokenizer
model = AutoModelForCausalLM.from_pretrained("meta-llama/Meta-Llama-3-8B-Instruct", device_map="auto", torch_dtype=torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained("meta-llama/Meta-Llama-3-8B-Instruct")
# 2: Apply the chat template
formatted_chat = tokenizer.apply_chat_template(chat, tokenize=False, add_generation_prompt=True)
print("Formatted chat:\n", formatted_chat)
# 3: Tokenize the chat (This can be combined with the previous step using tokenize=True)
inputs = tokenizer(formatted_chat, return_tensors="pt", add_special_tokens=False)
# Move the tokenized inputs to the same device the model is on (GPU/CPU)
inputs = {key: tensor.to(model.device) for key, tensor in inputs.items()}
print("Tokenized inputs:\n", inputs)
# 4: Generate text from the model
outputs = model.generate(**inputs, max_new_tokens=512, temperature=0.1)
print("Generated tokens:\n", outputs)
# 5: Decode the output back to a string
decoded_output = tokenizer.decode(outputs[0][inputs['input_ids'].size(1):], skip_special_tokens=True)
print("Decoded output:\n", decoded_output)
```
There's a lot in here, each piece of which could be its own document! Rather than going into too much detail, I'll cover
the broad ideas, and leave the details for the linked documents. The key steps are:
1. [Models](https://huggingface.co/learn/nlp-course/en/chapter2/3) and [Tokenizers](https://huggingface.co/learn/nlp-course/en/chapter2/4?fw=pt) are loaded from the Hugging Face Hub.
2. The chat is formatted using the tokenizer's [chat template](https://huggingface.co/docs/transformers/main/en/chat_templating)
3. The formatted chat is [tokenized](https://huggingface.co/learn/nlp-course/en/chapter2/4) using the tokenizer.
4. We [generate](https://huggingface.co/docs/transformers/en/llm_tutorial) a response from the model.
5. The tokens output by the model are decoded back to a string
## P
| |
181285
|
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# Generation with LLMs
[[open-in-colab]]
LLMs, or Large Language Models, are the key component behind text generation. In a nutshell, they consist of large pretrained transformer models trained to predict the next word (or, more precisely, token) given some input text. Since they predict one token at a time, you need to do something more elaborate to generate new sentences other than just calling the model -- you need to do autoregressive generation.
Autoregressive generation is the inference-time procedure of iteratively calling a model with its own generated outputs, given a few initial inputs. In 🤗 Transformers, this is handled by the [`~generation.GenerationMixin.generate`] method, which is available to all models with generative capabilities.
This tutorial will show you how to:
* Generate text with an LLM
* Avoid common pitfalls
* Next steps to help you get the most out of your LLM
Before you begin, make sure you have all the necessary libraries installed:
```bash
pip install transformers bitsandbytes>=0.39.0 -q
```
## Generate text
A language model trained for [causal language modeling](tasks/language_modeling) takes a sequence of text tokens as input and returns the probability distribution for the next token.
<!-- [GIF 1 -- FWD PASS] -->
<figure class="image table text-center m-0 w-full">
<video
style="max-width: 90%; margin: auto;"
autoplay loop muted playsinline
src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/assisted-generation/gif_1_1080p.mov"
></video>
<figcaption>"Forward pass of an LLM"</figcaption>
</figure>
A critical aspect of autoregressive generation with LLMs is how to select the next token from this probability distribution. Anything goes in this step as long as you end up with a token for the next iteration. This means it can be as simple as selecting the most likely token from the probability distribution or as complex as applying a dozen transformations before sampling from the resulting distribution.
<!-- [GIF 2 -- TEXT GENERATION] -->
<figure class="image table text-center m-0 w-full">
<video
style="max-width: 90%; margin: auto;"
autoplay loop muted playsinline
src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/assisted-generation/gif_2_1080p.mov"
></video>
<figcaption>"Autoregressive generation iteratively selects the next token from a probability distribution to generate text"</figcaption>
</figure>
The process depicted above is repeated iteratively until some stopping condition is reached. Ideally, the stopping condition is dictated by the model, which should learn when to output an end-of-sequence (`EOS`) token. If this is not the case, generation stops when some predefined maximum length is reached.
Properly setting up the token selection step and the stopping condition is essential to make your model behave as you'd expect on your task. That is why we have a [`~generation.GenerationConfig`] file associated with each model, which contains a good default generative parameterization and is loaded alongside your model.
Let's talk code!
<Tip>
If you're interested in basic LLM usage, our high-level [`Pipeline`](pipeline_tutorial) interface is a great starting point. However, LLMs often require advanced features like quantization and fine control of the token selection step, which is best done through [`~generation.GenerationMixin.generate`]. Autoregressive generation with LLMs is also resource-intensive and should be executed on a GPU for adequate throughput.
</Tip>
First, you need to load the model.
```py
>>> from transformers import AutoModelForCausalLM
>>> model = AutoModelForCausalLM.from_pretrained(
... "mistralai/Mistral-7B-v0.1", device_map="auto", load_in_4bit=True
... )
```
You'll notice two flags in the `from_pretrained` call:
- `device_map` ensures the model is moved to your GPU(s)
- `load_in_4bit` applies [4-bit dynamic quantization](main_classes/quantization) to massively reduce the resource requirements
There are other ways to initialize a model, but this is a good baseline to begin with an LLM.
Next, you need to preprocess your text input with a [tokenizer](tokenizer_summary).
```py
>>> from transformers import AutoTokenizer
>>> tokenizer = AutoTokenizer.from_pretrained("mistralai/Mistral-7B-v0.1", padding_side="left")
>>> model_inputs = tokenizer(["A list of colors: red, blue"], return_tensors="pt").to("cuda")
```
The `model_inputs` variable holds the tokenized text input, as well as the attention mask. While [`~generation.GenerationMixin.generate`] does its best effort to infer the attention mask when it is not passed, we recommend passing it whenever possible for optimal results.
After tokenizing the inputs, you can call the [`~generation.GenerationMixin.generate`] method to returns the generated tokens. The generated tokens then should be converted to text before printing.
```py
>>> generated_ids = model.generate(**model_inputs)
>>> tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
'A list of colors: red, blue, green, yellow, orange, purple, pink,'
```
Finally, you don't need to do it one sequence at a time! You can batch your inputs, which will greatly improve the throughput at a small latency and memory cost. All you need to do is to make sure you pad your inputs properly (more on that below).
```py
>>> tokenizer.pad_token = tokenizer.eos_token # Most LLMs don't have a pad token by default
>>> model_inputs = tokenizer(
... ["A list of colors: red, blue", "Portugal is"], return_tensors="pt", padding=True
... ).to("cuda")
>>> generated_ids = model.generate(**model_inputs)
>>> tokenizer.batch_decode(generated_ids, skip_special_tokens=True)
['A list of colors: red, blue, green, yellow, orange, purple, pink,',
'Portugal is a country in southwestern Europe, on the Iber']
```
And that's it! In a few lines of code, you can harness the power of an LLM.
## Comm
| |
181339
|
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# Chat Templates
## Introduction
An increasingly common use case for LLMs is **chat**. In a chat context, rather than continuing a single string
of text (as is the case with a standard language model), the model instead continues a conversation that consists
of one or more **messages**, each of which includes a **role**, like "user" or "assistant", as well as message text.
Much like tokenization, different models expect very different input formats for chat. This is the reason we added
**chat templates** as a feature. Chat templates are part of the tokenizer. They specify how to convert conversations,
represented as lists of messages, into a single tokenizable string in the format that the model expects.
Let's make this concrete with a quick example using the `mistralai/Mistral-7B-Instruct-v0.1` model:
```python
>>> from transformers import AutoTokenizer
>>> tokenizer = AutoTokenizer.from_pretrained("mistralai/Mistral-7B-Instruct-v0.1")
>>> chat = [
... {"role": "user", "content": "Hello, how are you?"},
... {"role": "assistant", "content": "I'm doing great. How can I help you today?"},
... {"role": "user", "content": "I'd like to show off how chat templating works!"},
... ]
>>> tokenizer.apply_chat_template(chat, tokenize=False)
"<s>[INST] Hello, how are you? [/INST]I'm doing great. How can I help you today?</s> [INST] I'd like to show off how chat templating works! [/INST]"
```
Notice how the tokenizer has added the control tokens [INST] and [/INST] to indicate the start and end of
user messages (but not assistant messages!), and the entire chat is condensed into a single string.
If we use `tokenize=True`, which is the default setting, that string will also be tokenized for us.
Now, try the same code, but swap in the `HuggingFaceH4/zephyr-7b-beta` model instead, and you should get:
```text
<|user|>
Hello, how are you?</s>
<|assistant|>
I'm doing great. How can I help you today?</s>
<|user|>
I'd like to show off how chat templating works!</s>
```
Both Zephyr and Mistral-Instruct were fine-tuned from the same base model, `Mistral-7B-v0.1`. However, they were trained
with totally different chat formats. Without chat templates, you would have to write manual formatting code for each
model, and it's very easy to make minor errors that hurt performance! Chat templates handle the details of formatting
for you, allowing you to write universal code that works for any model.
## How do I use chat templates?
As you can see in the example above, chat templates are easy to use. Simply build a list of messages, with `role`
and `content` keys, and then pass it to the [`~PreTrainedTokenizer.apply_chat_template`] method. Once you do that,
you'll get output that's ready to go! When using chat templates as input for model generation, it's also a good idea
to use `add_generation_prompt=True` to add a [generation prompt](#what-are-generation-prompts).
Here's an example of preparing input for `model.generate()`, using `Zephyr` again:
```python
from transformers import AutoModelForCausalLM, AutoTokenizer
checkpoint = "HuggingFaceH4/zephyr-7b-beta"
tokenizer = AutoTokenizer.from_pretrained(checkpoint)
model = AutoModelForCausalLM.from_pretrained(checkpoint) # You may want to use bfloat16 and/or move to GPU here
messages = [
{
"role": "system",
"content": "You are a friendly chatbot who always responds in the style of a pirate",
},
{"role": "user", "content": "How many helicopters can a human eat in one sitting?"},
]
tokenized_chat = tokenizer.apply_chat_template(messages, tokenize=True, add_generation_prompt=True, return_tensors="pt")
print(tokenizer.decode(tokenized_chat[0]))
```
This will yield a string in the input format that Zephyr expects.
```text
<|system|>
You are a friendly chatbot who always responds in the style of a pirate</s>
<|user|>
How many helicopters can a human eat in one sitting?</s>
<|assistant|>
```
Now that our input is formatted correctly for Zephyr, we can use the model to generate a response to the user's question:
```python
outputs = model.generate(tokenized_chat, max_new_tokens=128)
print(tokenizer.decode(outputs[0]))
```
This will yield:
```text
<|system|>
You are a friendly chatbot who always responds in the style of a pirate</s>
<|user|>
How many helicopters can a human eat in one sitting?</s>
<|assistant|>
Matey, I'm afraid I must inform ye that humans cannot eat helicopters. Helicopters are not food, they are flying machines. Food is meant to be eaten, like a hearty plate o' grog, a savory bowl o' stew, or a delicious loaf o' bread. But helicopters, they be for transportin' and movin' around, not for eatin'. So, I'd say none, me hearties. None at all.
```
Arr, 'twas easy after all!
## Is there an automated pipeline for chat?
Yes, there is! Our text generation pipelines support chat inputs, which makes it easy to use chat models. In the past,
we used to use a dedicated "ConversationalPipeline" class, but this has now been deprecated and its functionality
has been merged into the [`TextGenerationPipeline`]. Let's try the `Zephyr` example again, but this time using
a pipeline:
```python
from transformers import pipeline
pipe = pipeline("text-generation", "HuggingFaceH4/zephyr-7b-beta")
messages = [
{
"role": "system",
"content": "You are a friendly chatbot who always responds in the style of a pirate",
},
{"role": "user", "content": "How many helicopters can a human eat in one sitting?"},
]
print(pipe(messages, max_new_tokens=128)[0]['generated_text'][-1]) # Print the assistant's response
```
```text
{'role': 'assistant', 'content': "Matey, I'm afraid I must inform ye that humans cannot eat helicopters. Helicopters are not food, they are flying machines. Food is meant to be eaten, like a hearty plate o' grog, a savory bowl o' stew, or a delicious loaf o' bread. But helicopters, they be for transportin' and movin' around, not for eatin'. So, I'd say none, me hearties. None at all."}
```
The pipeline will take care of all the details of tokenization and calling `apply_chat_template` for you -
once the model has a chat template, all you need to do is initialize the pipeline and pass it the list of messages!
## W
| |
181343
|
vanced: How do chat templates work?
The chat template for a model is stored on the `tokenizer.chat_template` attribute. If no chat template is set, the
default template for that model class is used instead. Let's take a look at a `Zephyr` chat template, though note this
one is a little simplified from the actual one!
```
{%- for message in messages %}
{{- '<|' + message['role'] + |>\n' }}
{{- message['content'] + eos_token }}
{%- endfor %}
{%- if add_generation_prompt %}
{{- '<|assistant|>\n' }}
{%- endif %}
```
If you've never seen one of these before, this is a [Jinja template](https://jinja.palletsprojects.com/en/3.1.x/templates/).
Jinja is a templating language that allows you to write simple code that generates text. In many ways, the code and
syntax resembles Python. In pure Python, this template would look something like this:
```python
for message in messages:
print(f'<|{message["role"]}|>')
print(message['content'] + eos_token)
if add_generation_prompt:
print('<|assistant|>')
```
Effectively, the template does three things:
1. For each message, print the role enclosed in `<|` and `|>`, like `<|user|>` or `<|assistant|>`.
2. Next, print the content of the message, followed by the end-of-sequence token.
3. Finally, if `add_generation_prompt` is set, print the assistant token, so that the model knows to start generating
an assistant response.
This is a pretty simple template but Jinja gives you a lot of flexibility to do more complex things! Let's see a Jinja
template that can format inputs similarly to the way LLaMA formats them (note that the real LLaMA template includes
handling for default system messages and slightly different system message handling in general - don't use this one
in your actual code!)
```
{%- for message in messages %}
{%- if message['role'] == 'user' %}
{{- bos_token + '[INST] ' + message['content'] + ' [/INST]' }}
{%- elif message['role'] == 'system' %}
{{- '<<SYS>>\\n' + message['content'] + '\\n<</SYS>>\\n\\n' }}
{%- elif message['role'] == 'assistant' %}
{{- ' ' + message['content'] + ' ' + eos_token }}
{%- endif %}
{%- endfor %}
```
Hopefully if you stare at this for a little bit you can see what this template is doing - it adds specific tokens like
`[INST]` and `[/INST]` based on the role of each message. User, assistant and system messages are clearly
distinguishable to the model because of the tokens they're wrapped in.
## Ad
| |
181344
|
vanced: Adding and editing chat templates
### How do I create a chat template?
Simple, just write a jinja template and set `tokenizer.chat_template`. You may find it easier to start with an
existing template from another model and simply edit it for your needs! For example, we could take the LLaMA template
above and add "[ASST]" and "[/ASST]" to assistant messages:
```
{%- for message in messages %}
{%- if message['role'] == 'user' %}
{{- bos_token + '[INST] ' + message['content'].strip() + ' [/INST]' }}
{%- elif message['role'] == 'system' %}
{{- '<<SYS>>\\n' + message['content'].strip() + '\\n<</SYS>>\\n\\n' }}
{%- elif message['role'] == 'assistant' %}
{{- '[ASST] ' + message['content'] + ' [/ASST]' + eos_token }}
{%- endif %}
{%- endfor %}
```
Now, simply set the `tokenizer.chat_template` attribute. Next time you use [`~PreTrainedTokenizer.apply_chat_template`], it will
use your new template! This attribute will be saved in the `tokenizer_config.json` file, so you can use
[`~utils.PushToHubMixin.push_to_hub`] to upload your new template to the Hub and make sure everyone's using the right
template for your model!
```python
template = tokenizer.chat_template
template = template.replace("SYS", "SYSTEM") # Change the system token
tokenizer.chat_template = template # Set the new template
tokenizer.push_to_hub("model_name") # Upload your new template to the Hub!
```
The method [`~PreTrainedTokenizer.apply_chat_template`] which uses your chat template is called by the [`TextGenerationPipeline`] class, so
once you set the correct chat template, your model will automatically become compatible with [`TextGenerationPipeline`].
<Tip>
If you're fine-tuning a model for chat, in addition to setting a chat template, you should probably add any new chat
control tokens as special tokens in the tokenizer. Special tokens are never split,
ensuring that your control tokens are always handled as single tokens rather than being tokenized in pieces. You
should also set the tokenizer's `eos_token` attribute to the token that marks the end of assistant generations in your
template. This will ensure that text generation tools can correctly figure out when to stop generating text.
</Tip>
### Why do some models have multiple templates?
Some models use different templates for different use cases. For example, they might use one template for normal chat
and another for tool-use, or retrieval-augmented generation. In these cases, `tokenizer.chat_template` is a dictionary.
This can cause some confusion, and where possible, we recommend using a single template for all use-cases. You can use
Jinja statements like `if tools is defined` and `{% macro %}` definitions to easily wrap multiple code paths in a
single template.
When a tokenizer has multiple templates, `tokenizer.chat_template` will be a `dict`, where each key is the name
of a template. The `apply_chat_template` method has special handling for certain template names: Specifically, it will
look for a template named `default` in most cases, and will raise an error if it can't find one. However, if a template
named `tool_use` exists when the user has passed a `tools` argument, it will use that instead. To access templates
with other names, pass the name of the template you want to the `chat_template` argument of
`apply_chat_template()`.
We find that this can be a bit confusing for users, though - so if you're writing a template yourself, we recommend
trying to put it all in a single template where possible!
### What template should I use?
When setting the template for a model that's already been trained for chat, you should ensure that the template
exactly matches the message formatting that the model saw during training, or else you will probably experience
performance degradation. This is true even if you're training the model further - you will probably get the best
performance if you keep the chat tokens constant. This is very analogous to tokenization - you generally get the
best performance for inference or fine-tuning when you precisely match the tokenization used during training.
If you're training a model from scratch, or fine-tuning a base language model for chat, on the other hand,
you have a lot of freedom to choose an appropriate template! LLMs are smart enough to learn to handle lots of different
input formats. One popular choice is the `ChatML` format, and this is a good, flexible choice for many use-cases.
It looks like this:
```
{%- for message in messages %}
{{- '<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n' }}
{%- endfor %}
```
If you like this one, here it is in one-liner form, ready to copy into your code. The one-liner also includes
handy support for [generation prompts](#what-are-generation-prompts), but note that it doesn't add BOS or EOS tokens!
If your model expects those, they won't be added automatically by `apply_chat_template` - in other words, the
text will be tokenized with `add_special_tokens=False`. This is to avoid potential conflicts between the template and
the `add_special_tokens` logic. If your model expects special tokens, make sure to add them to the template!
```python
tokenizer.chat_template = "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}"
```
This template wraps each message in `<|im_start|>` and `<|im_end|>` tokens, and simply writes the role as a string, which
allows for flexibility in the roles you train with. The output looks like this:
```text
<|im_start|>system
You are a helpful chatbot that will do its best not to say anything so stupid that people tweet about it.<|im_end|>
<|im_start|>user
How are you?<|im_end|>
<|im_start|>assistant
I'm doing great!<|im_end|>
```
The "user", "system" and "assistant" roles are the standard for chat, and we recommend using them when it makes sense,
particularly if you want your model to operate well with [`TextGenerationPipeline`]. However, you are not limited
to these roles - templating is extremely flexible, and any string can be a role.
### I want to add some chat templates! How should I get started?
If you have any chat models, you should set their `tokenizer.chat_template` attribute and test it using
[`~PreTrainedTokenizer.apply_chat_template`], then push the updated tokenizer to the Hub. This applies even if you're
not the model owner - if you're using a model with an empty chat template, or one that's still using the default class
template, please open a [pull request](https://huggingface.co/docs/hub/repositories-pull-requests-discussions) to the model repository so that this attribute can be set properly!
Once the attribute is set, that's it, you're done! `tokenizer.apply_chat_template` will now work correctly for that
model, which means it is also automatically supported in places like `TextGenerationPipeline`!
By ensuring that models have this attribute, we can make sure that the whole community gets to use the full power of
open-source models. Formatting mismatches have been haunting the field and silently harming performance for too long -
it's time to put an end to them!
## Ad
| |
181355
|
<!--⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# Using pipelines for a webserver
<Tip>
Creating an inference engine is a complex topic, and the "best" solution
will most likely depend on your problem space. Are you on CPU or GPU? Do
you want the lowest latency, the highest throughput, support for
many models, or just highly optimize 1 specific model?
There are many ways to tackle this topic, so what we are going to present is a good default
to get started which may not necessarily be the most optimal solution for you.
</Tip>
The key thing to understand is that we can use an iterator, just like you would [on a
dataset](pipeline_tutorial#using-pipelines-on-a-dataset), since a webserver is basically a system that waits for requests and
treats them as they come in.
Usually webservers are multiplexed (multithreaded, async, etc..) to handle various
requests concurrently. Pipelines on the other hand (and mostly the underlying models)
are not really great for parallelism; they take up a lot of RAM, so it's best to give them all the available resources when they are running or it's a compute-intensive job.
We are going to solve that by having the webserver handle the light load of receiving
and sending requests, and having a single thread handling the actual work.
This example is going to use `starlette`. The actual framework is not really
important, but you might have to tune or change the code if you are using another
one to achieve the same effect.
Create `server.py`:
```py
from starlette.applications import Starlette
from starlette.responses import JSONResponse
from starlette.routing import Route
from transformers import pipeline
import asyncio
async def homepage(request):
payload = await request.body()
string = payload.decode("utf-8")
response_q = asyncio.Queue()
await request.app.model_queue.put((string, response_q))
output = await response_q.get()
return JSONResponse(output)
async def server_loop(q):
pipe = pipeline(model="google-bert/bert-base-uncased")
while True:
(string, response_q) = await q.get()
out = pipe(string)
await response_q.put(out)
app = Starlette(
routes=[
Route("/", homepage, methods=["POST"]),
],
)
@app.on_event("startup")
async def startup_event():
q = asyncio.Queue()
app.model_queue = q
asyncio.create_task(server_loop(q))
```
Now you can start it with:
```bash
uvicorn server:app
```
And you can query it:
```bash
curl -X POST -d "test [MASK]" http://localhost:8000/
#[{"score":0.7742936015129089,"token":1012,"token_str":".","sequence":"test."},...]
```
And there you go, now you have a good idea of how to create a webserver!
What is really important is that we load the model only **once**, so there are no copies
of the model on the webserver. This way, no unnecessary RAM is being used.
Then the queuing mechanism allows you to do fancy stuff like maybe accumulating a few
items before inferring to use dynamic batching:
<Tip warning={true}>
The code sample below is intentionally written like pseudo-code for readability.
Do not run this without checking if it makes sense for your system resources!
</Tip>
```py
(string, rq) = await q.get()
strings = []
queues = []
while True:
try:
(string, rq) = await asyncio.wait_for(q.get(), timeout=0.001) # 1ms
except asyncio.exceptions.TimeoutError:
break
strings.append(string)
queues.append(rq)
strings
outs = pipe(strings, batch_size=len(strings))
for rq, out in zip(queues, outs):
await rq.put(out)
```
Again, the proposed code is optimized for readability, not for being the best code.
First of all, there's no batch size limit which is usually not a
great idea. Next, the timeout is reset on every queue fetch, meaning you could
wait much more than 1ms before running the inference (delaying the first request
by that much).
It would be better to have a single 1ms deadline.
This will always wait for 1ms even if the queue is empty, which might not be the
best since you probably want to start doing inference if there's nothing in the queue.
But maybe it does make sense if batching is really crucial for your use case.
Again, there's really no one best solution.
## Few things you might want to consider
### Error checking
There's a lot that can go wrong in production: out of memory, out of space,
loading the model might fail, the query might be wrong, the query might be
correct but still fail to run because of a model misconfiguration, and so on.
Generally, it's good if the server outputs the errors to the user, so
adding a lot of `try..except` statements to show those errors is a good
idea. But keep in mind it may also be a security risk to reveal all those errors depending
on your security context.
### Circuit breaking
Webservers usually look better when they do circuit breaking. It means they
return proper errors when they're overloaded instead of just waiting for the query indefinitely. Return a 503 error instead of waiting for a super long time or a 504 after a long time.
This is relatively easy to implement in the proposed code since there is a single queue.
Looking at the queue size is a basic way to start returning errors before your
webserver fails under load.
### Blocking the main thread
Currently PyTorch is not async aware, and computation will block the main
thread while running. That means it would be better if PyTorch was forced to run
on its own thread/process. This wasn't done here because the code is a lot more
complex (mostly because threads and async and queues don't play nice together).
But ultimately it does the same thing.
This would be important if the inference of single items were long (> 1s) because
in this case, it means every query during inference would have to wait for 1s before
even receiving an error.
### Dynamic batching
In general, batching is not necessarily an improvement over passing 1 item at
a time (see [batching details](./main_classes/pipelines#pipeline-batching) for more information). But it can be very effective
when used in the correct setting. In the API, there is no dynamic
batching by default (too much opportunity for a slowdown). But for BLOOM inference -
which is a very large model - dynamic batching is **essential** to provide a decent experience for everyone.
| |
181385
|
arameters
[`pipeline`] supports many parameters; some are task specific, and some are general to all pipelines.
In general, you can specify parameters anywhere you want:
```py
transcriber = pipeline(model="openai/whisper-large-v2", my_parameter=1)
out = transcriber(...) # This will use `my_parameter=1`.
out = transcriber(..., my_parameter=2) # This will override and use `my_parameter=2`.
out = transcriber(...) # This will go back to using `my_parameter=1`.
```
Let's check out 3 important ones:
### Device
If you use `device=n`, the pipeline automatically puts the model on the specified device.
This will work regardless of whether you are using PyTorch or Tensorflow.
```py
transcriber = pipeline(model="openai/whisper-large-v2", device=0)
```
If the model is too large for a single GPU and you are using PyTorch, you can set `torch_dtype='float16'` to enable FP16 precision inference. Usually this would not cause significant performance drops but make sure you evaluate it on your models!
Alternatively, you can set `device_map="auto"` to automatically
determine how to load and store the model weights. Using the `device_map` argument requires the 🤗 [Accelerate](https://huggingface.co/docs/accelerate)
package:
```bash
pip install --upgrade accelerate
```
The following code automatically loads and stores model weights across devices:
```py
transcriber = pipeline(model="openai/whisper-large-v2", device_map="auto")
```
Note that if `device_map="auto"` is passed, there is no need to add the argument `device=device` when instantiating your `pipeline` as you may encounter some unexpected behavior!
### Batch size
By default, pipelines will not batch inference for reasons explained in detail [here](https://huggingface.co/docs/transformers/main_classes/pipelines#pipeline-batching). The reason is that batching is not necessarily faster, and can actually be quite slower in some cases.
But if it works in your use case, you can use:
```py
transcriber = pipeline(model="openai/whisper-large-v2", device=0, batch_size=2)
audio_filenames = [f"https://huggingface.co/datasets/Narsil/asr_dummy/resolve/main/{i}.flac" for i in range(1, 5)]
texts = transcriber(audio_filenames)
```
This runs the pipeline on the 4 provided audio files, but it will pass them in batches of 2
to the model (which is on a GPU, where batching is more likely to help) without requiring any further code from you.
The output should always match what you would have received without batching. It is only meant as a way to help you get more speed out of a pipeline.
Pipelines can also alleviate some of the complexities of batching because, for some pipelines, a single item (like a long audio file) needs to be chunked into multiple parts to be processed by a model. The pipeline performs this [*chunk batching*](./main_classes/pipelines#pipeline-chunk-batching) for you.
### Task specific parameters
All tasks provide task specific parameters which allow for additional flexibility and options to help you get your job done.
For instance, the [`transformers.AutomaticSpeechRecognitionPipeline.__call__`] method has a `return_timestamps` parameter which sounds promising for subtitling videos:
```py
>>> transcriber = pipeline(model="openai/whisper-large-v2", return_timestamps=True)
>>> transcriber("https://huggingface.co/datasets/Narsil/asr_dummy/resolve/main/mlk.flac")
{'text': ' I have a dream that one day this nation will rise up and live out the true meaning of its creed.', 'chunks': [{'timestamp': (0.0, 11.88), 'text': ' I have a dream that one day this nation will rise up and live out the true meaning of its'}, {'timestamp': (11.88, 12.38), 'text': ' creed.'}]}
```
As you can see, the model inferred the text and also outputted **when** the various sentences were pronounced.
There are many parameters available for each task, so check out each task's API reference to see what you can tinker with!
For instance, the [`~transformers.AutomaticSpeechRecognitionPipeline`] has a `chunk_length_s` parameter which is helpful
for working on really long audio files (for example, subtitling entire movies or hour-long videos) that a model typically
cannot handle on its own:
```python
>>> transcriber = pipeline(model="openai/whisper-large-v2", chunk_length_s=30)
>>> transcriber("https://huggingface.co/datasets/reach-vb/random-audios/resolve/main/ted_60.wav")
{'text': " So in college, I was a government major, which means I had to write a lot of papers. Now, when a normal student writes a paper, they might spread the work out a little like this. So, you know. You get started maybe a little slowly, but you get enough done in the first week that with some heavier days later on, everything gets done and things stay civil. And I would want to do that like that. That would be the plan. I would have it all ready to go, but then actually the paper would come along, and then I would kind of do this. And that would happen every single paper. But then came my 90-page senior thesis, a paper you're supposed to spend a year on. I knew for a paper like that, my normal workflow was not an option, it was way too big a project. So I planned things out and I decided I kind of had to go something like this. This is how the year would go. So I'd start off light and I'd bump it up"}
```
If you can't find a parameter that would really help you out, feel free to [request it](https://github.com/huggingface/transformers/issues/new?assignees=&labels=feature&template=feature-request.yml)!
## Using pipelines on a dataset
The pipeline can also run inference on a large dataset. The easiest way we recommend doing this is by using an iterator:
```py
def data():
for i in range(1000):
yield f"My example {i}"
pipe = pipeline(model="openai-community/gpt2", device=0)
generated_characters = 0
for out in pipe(data()):
generated_characters += len(out[0]["generated_text"])
```
The iterator `data()` yields each result, and the pipeline automatically
recognizes the input is iterable and will start fetching the data while
it continues to process it on the GPU (this uses [DataLoader](https://pytorch.org/docs/stable/data.html#torch.utils.data.DataLoader) under the hood).
This is important because you don't have to allocate memory for the whole dataset
and you can feed the GPU as fast as possible.
Since batching could speed things up, it may be useful to try tuning the `batch_size` parameter here.
The simplest way to iterate over a dataset is to just load one from 🤗 [Datasets](https://github.com/huggingface/datasets/):
```py
# KeyDataset is a util that will just output the item we're interested in.
from transformers.pipelines.pt_utils import KeyDataset
from datasets import load_dataset
pipe = pipeline(model="hf-internal-testing/tiny-random-wav2vec2", device=0)
dataset = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation[:10]")
for out in pipe(KeyDataset(dataset, "audio")):
print(out)
```
## Using pipelines for a webserver
<Tip>
Creating an inference engine is a complex topic which deserves it's own
page.
</Tip>
[Link](./pipeline_webserver)
## Vision
| |
181392
|
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# GGUF and interaction with Transformers
The GGUF file format is used to store models for inference with [GGML](https://github.com/ggerganov/ggml) and other
libraries that depend on it, like the very popular [llama.cpp](https://github.com/ggerganov/llama.cpp) or
[whisper.cpp](https://github.com/ggerganov/whisper.cpp).
It is a file format [supported by the Hugging Face Hub](https://huggingface.co/docs/hub/en/gguf) with features
allowing for quick inspection of tensors and metadata within the file.
This file format is designed as a "single-file-format" where a single file usually contains both the configuration
attributes, the tokenizer vocabulary and other attributes, as well as all tensors to be loaded in the model. These
files come in different formats according to the quantization type of the file. We briefly go over some of them
[here](https://huggingface.co/docs/hub/en/gguf#quantization-types).
## Support within Transformers
We have added the ability to load `gguf` files within `transformers` in order to offer further training/fine-tuning
capabilities to gguf models, before converting back those models to `gguf` to use within the `ggml` ecosystem. When
loading a model, we first dequantize it to fp32, before loading the weights to be used in PyTorch.
> [!NOTE]
> The support is still very exploratory and we welcome contributions in order to solidify it across quantization types
> and model architectures.
For now, here are the supported model architectures and quantization types:
### Supported quantization types
The initial supported quantization types are decided according to the popular quantized files that have been shared
on the Hub.
- F32
- F16
- BF16
- Q4_0
- Q4_1
- Q5_0
- Q5_1
- Q8_0
- Q2_K
- Q3_K
- Q4_K
- Q5_K
- Q6_K
- IQ1_S
- IQ1_M
- IQ2_XXS
- IQ2_XS
- IQ2_S
- IQ3_XXS
- IQ3_S
- IQ4_XS
- IQ4_NL
> [!NOTE]
> To support gguf dequantization, `gguf>=0.10.0` installation is required.
### Supported model architectures
For now the supported model architectures are the architectures that have been very popular on the Hub, namely:
- LLaMa
- Mistral
- Qwen2
- Qwen2Moe
- Phi3
- Bloom
- Falcon
## Example usage
In order to load `gguf` files in `transformers`, you should specify the `gguf_file` argument to the `from_pretrained`
methods of both tokenizers and models. Here is how one would load a tokenizer and a model, which can be loaded
from the exact same file:
```py
from transformers import AutoTokenizer, AutoModelForCausalLM
model_id = "TheBloke/TinyLlama-1.1B-Chat-v1.0-GGUF"
filename = "tinyllama-1.1b-chat-v1.0.Q6_K.gguf"
tokenizer = AutoTokenizer.from_pretrained(model_id, gguf_file=filename)
model = AutoModelForCausalLM.from_pretrained(model_id, gguf_file=filename)
```
Now you have access to the full, unquantized version of the model in the PyTorch ecosystem, where you can combine it
with a plethora of other tools.
In order to convert back to a `gguf` file, we recommend using the
[`convert-hf-to-gguf.py` file](https://github.com/ggerganov/llama.cpp/blob/master/convert_hf_to_gguf.py) from llama.cpp.
Here's how you would complete the script above to save the model and export it back to `gguf`:
```py
tokenizer.save_pretrained('directory')
model.save_pretrained('directory')
!python ${path_to_llama_cpp}/convert-hf-to-gguf.py ${directory}
```
| |
181402
|
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# Agents and tools
[[open-in-colab]]
### What is an agent?
Large Language Models (LLMs) trained to perform [causal language modeling](./tasks/language_modeling) can tackle a wide range of tasks, but they often struggle with basic tasks like logic, calculation, and search. When prompted in domains in which they do not perform well, they often fail to generate the answer we expect them to.
One approach to overcome this weakness is to create an *agent*.
An agent is a system that uses an LLM as its engine, and it has access to functions called *tools*.
These *tools* are functions for performing a task, and they contain all necessary description for the agent to properly use them.
The agent can be programmed to:
- devise a series of actions/tools and run them all at once, like the [`CodeAgent`]
- plan and execute actions/tools one by one and wait for the outcome of each action before launching the next one, like the [`ReactJsonAgent`]
### Types of agents
#### Code agent
This agent has a planning step, then generates python code to execute all its actions at once. It natively handles different input and output types for its tools, thus it is the recommended choice for multimodal tasks.
#### React agents
This is the go-to agent to solve reasoning tasks, since the ReAct framework ([Yao et al., 2022](https://huggingface.co/papers/2210.03629)) makes it really efficient to think on the basis of its previous observations.
We implement two versions of ReactJsonAgent:
- [`ReactJsonAgent`] generates tool calls as a JSON in its output.
- [`ReactCodeAgent`] is a new type of ReactJsonAgent that generates its tool calls as blobs of code, which works really well for LLMs that have strong coding performance.
> [!TIP]
> Read [Open-source LLMs as LangChain Agents](https://huggingface.co/blog/open-source-llms-as-agents) blog post to learn more about ReAct agents.
<div class="flex justify-center">
<img
class="block dark:hidden"
src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/Agent_ManimCE.gif"
/>
<img
class="hidden dark:block"
src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/Agent_ManimCE.gif"
/>
</div>

For example, here is how a ReAct Code agent would work its way through the following question.
```py3
>>> agent.run(
... "How many more blocks (also denoted as layers) in BERT base encoder than the encoder from the architecture proposed in Attention is All You Need?",
... )
=====New task=====
How many more blocks (also denoted as layers) in BERT base encoder than the encoder from the architecture proposed in Attention is All You Need?
====Agent is executing the code below:
bert_blocks = search(query="number of blocks in BERT base encoder")
print("BERT blocks:", bert_blocks)
====
Print outputs:
BERT blocks: twelve encoder blocks
====Agent is executing the code below:
attention_layer = search(query="number of layers in Attention is All You Need")
print("Attention layers:", attention_layer)
====
Print outputs:
Attention layers: Encoder: The encoder is composed of a stack of N = 6 identical layers. Each layer has two sub-layers. The first is a multi-head self-attention mechanism, and the second is a simple, position- 2 Page 3 Figure 1: The Transformer - model architecture.
====Agent is executing the code below:
bert_blocks = 12
attention_layers = 6
diff = bert_blocks - attention_layers
print("Difference in blocks:", diff)
final_answer(diff)
====
Print outputs:
Difference in blocks: 6
Final answer: 6
```
###
| |
181437
|
<!--Copyright 2022 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# PyTorch training on Apple silicon
Previously, training models on a Mac was limited to the CPU only. With the release of PyTorch v1.12, you can take advantage of training models with Apple's silicon GPUs for significantly faster performance and training. This is powered in PyTorch by integrating Apple's Metal Performance Shaders (MPS) as a backend. The [MPS backend](https://pytorch.org/docs/stable/notes/mps.html) implements PyTorch operations as custom Metal shaders and places these modules on a `mps` device.
<Tip warning={true}>
Some PyTorch operations are not implemented in MPS yet and will throw an error. To avoid this, you should set the environment variable `PYTORCH_ENABLE_MPS_FALLBACK=1` to use the CPU kernels instead (you'll still see a `UserWarning`).
<br>
If you run into any other errors, please open an issue in the [PyTorch](https://github.com/pytorch/pytorch/issues) repository because the [`Trainer`] only integrates the MPS backend.
</Tip>
With the `mps` device set, you can:
* train larger networks or batch sizes locally
* reduce data retrieval latency because the GPU's unified memory architecture allows direct access to the full memory store
* reduce costs because you don't need to train on cloud-based GPUs or add additional local GPUs
Get started by making sure you have PyTorch installed. MPS acceleration is supported on macOS 12.3+.
```bash
pip install torch torchvision torchaudio
```
[`TrainingArguments`] uses the `mps` device by default if it's available which means you don't need to explicitly set the device. For example, you can run the [run_glue.py](https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py) script with the MPS backend automatically enabled without making any changes.
```diff
export TASK_NAME=mrpc
python examples/pytorch/text-classification/run_glue.py \
--model_name_or_path google-bert/bert-base-cased \
--task_name $TASK_NAME \
- --use_mps_device \
--do_train \
--do_eval \
--max_seq_length 128 \
--per_device_train_batch_size 32 \
--learning_rate 2e-5 \
--num_train_epochs 3 \
--output_dir /tmp/$TASK_NAME/ \
--overwrite_output_dir
```
Backends for [distributed setups](https://pytorch.org/docs/stable/distributed.html#backends) like `gloo` and `nccl` are not supported by the `mps` device which means you can only train on a single GPU with the MPS backend.
You can learn more about the MPS backend in the [Introducing Accelerated PyTorch Training on Mac](https://pytorch.org/blog/introducing-accelerated-pytorch-training-on-mac/) blog post.
| |
181440
|
ad adapters with 🤗 PEFT
[[open-in-colab]]
[Parameter-Efficient Fine Tuning (PEFT)](https://huggingface.co/blog/peft) methods freeze the pretrained model parameters during fine-tuning and add a small number of trainable parameters (the adapters) on top of it. The adapters are trained to learn task-specific information. This approach has been shown to be very memory-efficient with lower compute usage while producing results comparable to a fully fine-tuned model.
Adapters trained with PEFT are also usually an order of magnitude smaller than the full model, making it convenient to share, store, and load them.
<div class="flex flex-col justify-center">
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/peft/PEFT-hub-screenshot.png"/>
<figcaption class="text-center">The adapter weights for a OPTForCausalLM model stored on the Hub are only ~6MB compared to the full size of the model weights, which can be ~700MB.</figcaption>
</div>
If you're interested in learning more about the 🤗 PEFT library, check out the [documentation](https://huggingface.co/docs/peft/index).
## Setup
Get started by installing 🤗 PEFT:
```bash
pip install peft
```
If you want to try out the brand new features, you might be interested in installing the library from source:
```bash
pip install git+https://github.com/huggingface/peft.git
```
## Supported PEFT models
🤗 Transformers natively supports some PEFT methods, meaning you can load adapter weights stored locally or on the Hub and easily run or train them with a few lines of code. The following methods are supported:
- [Low Rank Adapters](https://huggingface.co/docs/peft/conceptual_guides/lora)
- [IA3](https://huggingface.co/docs/peft/conceptual_guides/ia3)
- [AdaLoRA](https://arxiv.org/abs/2303.10512)
If you want to use other PEFT methods, such as prompt learning or prompt tuning, or learn about the 🤗 PEFT library in general, please refer to the [documentation](https://huggingface.co/docs/peft/index).
## Load a PEFT adapter
To load and use a PEFT adapter model from 🤗 Transformers, make sure the Hub repository or local directory contains an `adapter_config.json` file and the adapter weights, as shown in the example image above. Then you can load the PEFT adapter model using the `AutoModelFor` class. For example, to load a PEFT adapter model for causal language modeling:
1. specify the PEFT model id
2. pass it to the [`AutoModelForCausalLM`] class
```py
from transformers import AutoModelForCausalLM, AutoTokenizer
peft_model_id = "ybelkada/opt-350m-lora"
model = AutoModelForCausalLM.from_pretrained(peft_model_id)
```
<Tip>
You can load a PEFT adapter with either an `AutoModelFor` class or the base model class like `OPTForCausalLM` or `LlamaForCausalLM`.
</Tip>
You can also load a PEFT adapter by calling the `load_adapter` method:
```py
from transformers import AutoModelForCausalLM, AutoTokenizer
model_id = "facebook/opt-350m"
peft_model_id = "ybelkada/opt-350m-lora"
model = AutoModelForCausalLM.from_pretrained(model_id)
model.load_adapter(peft_model_id)
```
Check out the [API documentation](#transformers.integrations.PeftAdapterMixin) section below for more details.
## Load in 8bit or 4bit
The `bitsandbytes` integration supports 8bit and 4bit precision data types, which are useful for loading large models because it saves memory (see the `bitsandbytes` integration [guide](./quantization#bitsandbytes-integration) to learn more). Add the `load_in_8bit` or `load_in_4bit` parameters to [`~PreTrainedModel.from_pretrained`] and set `device_map="auto"` to effectively distribute the model to your hardware:
```py
from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig
peft_model_id = "ybelkada/opt-350m-lora"
model = AutoModelForCausalLM.from_pretrained(peft_model_id, quantization_config=BitsAndBytesConfig(load_in_8bit=True))
```
## Add a new adapter
You can use [`~peft.PeftModel.add_adapter`] to add a new adapter to a model with an existing adapter as long as the new adapter is the same type as the current one. For example, if you have an existing LoRA adapter attached to a model:
```py
from transformers import AutoModelForCausalLM, OPTForCausalLM, AutoTokenizer
from peft import LoraConfig
model_id = "facebook/opt-350m"
model = AutoModelForCausalLM.from_pretrained(model_id)
lora_config = LoraConfig(
target_modules=["q_proj", "k_proj"],
init_lora_weights=False
)
model.add_adapter(lora_config, adapter_name="adapter_1")
```
To add a new adapter:
```py
# attach new adapter with same config
model.add_adapter(lora_config, adapter_name="adapter_2")
```
Now you can use [`~peft.PeftModel.set_adapter`] to set which adapter to use:
```py
# use adapter_1
model.set_adapter("adapter_1")
output_disabled = model.generate(**inputs)
print(tokenizer.decode(output_disabled[0], skip_special_tokens=True))
# use adapter_2
model.set_adapter("adapter_2")
output_enabled = model.generate(**inputs)
print(tokenizer.decode(output_enabled[0], skip_special_tokens=True))
```
## Enable and disable adapters
Once you've added an adapter to a model, you can enable or disable the adapter module. To enable the adapter module:
```py
from transformers import AutoModelForCausalLM, OPTForCausalLM, AutoTokenizer
from peft import PeftConfig
model_id = "facebook/opt-350m"
adapter_model_id = "ybelkada/opt-350m-lora"
tokenizer = AutoTokenizer.from_pretrained(model_id)
text = "Hello"
inputs = tokenizer(text, return_tensors="pt")
model = AutoModelForCausalLM.from_pretrained(model_id)
peft_config = PeftConfig.from_pretrained(adapter_model_id)
# to initiate with random weights
peft_config.init_lora_weights = False
model.add_adapter(peft_config)
model.enable_adapters()
output = model.generate(**inputs)
```
To disable the adapter module:
```py
model.disable_adapters()
output = model.generate(**inputs)
```
## Train a PEFT adapter
PEFT adapters are supported by the [`Trainer`] class so that you can train an adapter for your specific use case. It only requires adding a few more lines of code. For example, to train a LoRA adapter:
<Tip>
If you aren't familiar with fine-tuning a model with [`Trainer`], take a look at the [Fine-tune a pretrained model](training) tutorial.
</Tip>
1. Define your adapter configuration with the task type and hyperparameters (see [`~peft.LoraConfig`] for more details about what the hyperparameters do).
```py
from peft import LoraConfig
peft_config = LoraConfig(
lora_alpha=16,
lora_dropout=0.1,
r=64,
bias="none",
task_type="CAUSAL_LM",
)
```
2. Add adapter to the model.
```py
model.add_adapter(peft_config)
```
3. Now you can pass the model to [`Trainer`]!
```py
trainer = Trainer(model=model, ...)
trainer.train()
```
To save your trained adapter and load it back:
```py
model.save_pretrained(save_dir)
model = AutoModelForCausalLM.from_pretrained(save_dir)
```
## Add additional trainable layers to a PEFT adapter
You can also fine-tune additional trainable adapters on top of a model that has adapters attached by passing `modules_to_save` in your PEFT config. For example, if you want to also fine-tune the lm_head on top of a model with a LoRA adapter:
```py
from transformers import AutoModelForCausalLM, OPTForCausalLM, AutoTokenizer
from peft import LoraConfig
model_id = "facebook/opt-350m"
model = AutoModelForCausalLM.from_pretrained(model_id)
lora_config = LoraConfig(
target_modules=["q_proj", "k_proj"],
modules_to_save=["lm_head"],
)
model.add_adapter(lora_config)
```
## API docs
[[autodoc]] integrations.PeftAdapterMixin
- load_adapter
- add_adapter
- set_adapter
- disable_adapters
- enable_adapters
- active_adapters
- get_adapter_state_dict
<!--
TODO: (@younesbelkada @stevhliu)
- Link to PEFT docs for further details
- Trainer
- 8-bit / 4-bit examples ?
-->
| |
181529
|
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# LLM prompting guide
[[open-in-colab]]
Large Language Models such as Falcon, LLaMA, etc. are pretrained transformer models initially trained to predict the
next token given some input text. They typically have billions of parameters and have been trained on trillions of
tokens for an extended period of time. As a result, these models become quite powerful and versatile, and you can use
them to solve multiple NLP tasks out of the box by instructing the models with natural language prompts.
Designing such prompts to ensure the optimal output is often called "prompt engineering". Prompt engineering is an
iterative process that requires a fair amount of experimentation. Natural languages are much more flexible and expressive
than programming languages, however, they can also introduce some ambiguity. At the same time, prompts in natural language
are quite sensitive to changes. Even minor modifications in prompts can lead to wildly different outputs.
While there is no exact recipe for creating prompts to match all cases, researchers have worked out a number of best
practices that help to achieve optimal results more consistently.
This guide covers the prompt engineering best practices to help you craft better LLM prompts and solve various NLP tasks.
You'll learn:
- [Basics of prompting](#basics-of-prompting)
- [Best practices of LLM prompting](#best-practices-of-llm-prompting)
- [Advanced prompting techniques: few-shot prompting and chain-of-thought](#advanced-prompting-techniques)
- [When to fine-tune instead of prompting](#prompting-vs-fine-tuning)
<Tip>
Prompt engineering is only a part of the LLM output optimization process. Another essential component is choosing the
optimal text generation strategy. You can customize how your LLM selects each of the subsequent tokens when generating
the text without modifying any of the trainable parameters. By tweaking the text generation parameters, you can reduce
repetition in the generated text and make it more coherent and human-sounding.
Text generation strategies and parameters are out of scope for this guide, but you can learn more about these topics in
the following guides:
* [Generation with LLMs](../llm_tutorial)
* [Text generation strategies](../generation_strategies)
</Tip>
## Basics of prompting
### Types of models
The majority of modern LLMs are decoder-only transformers. Some examples include: [LLaMA](../model_doc/llama),
[Llama2](../model_doc/llama2), [Falcon](../model_doc/falcon), [GPT2](../model_doc/gpt2). However, you may encounter
encoder-decoder transformer LLMs as well, for instance, [Flan-T5](../model_doc/flan-t5) and [BART](../model_doc/bart).
Encoder-decoder-style models are typically used in generative tasks where the output **heavily** relies on the input, for
example, in translation and summarization. The decoder-only models are used for all other types of generative tasks.
When using a pipeline to generate text with an LLM, it's important to know what type of LLM you are using, because
they use different pipelines.
Run inference with decoder-only models with the `text-generation` pipeline:
```python
>>> from transformers import pipeline
>>> import torch
>>> torch.manual_seed(0) # doctest: +IGNORE_RESULT
>>> generator = pipeline('text-generation', model = 'openai-community/gpt2')
>>> prompt = "Hello, I'm a language model"
>>> generator(prompt, max_length = 30)
[{'generated_text': "Hello, I'm a language model programmer so you can use some of my stuff. But you also need some sort of a C program to run."}]
```
To run inference with an encoder-decoder, use the `text2text-generation` pipeline:
```python
>>> text2text_generator = pipeline("text2text-generation", model = 'google/flan-t5-base')
>>> prompt = "Translate from English to French: I'm very happy to see you"
>>> text2text_generator(prompt)
[{'generated_text': 'Je suis très heureuse de vous rencontrer.'}]
```
### Base vs instruct/chat models
Most of the recent LLM checkpoints available on 🤗 Hub come in two versions: base and instruct (or chat). For example,
[`tiiuae/falcon-7b`](https://huggingface.co/tiiuae/falcon-7b) and [`tiiuae/falcon-7b-instruct`](https://huggingface.co/tiiuae/falcon-7b-instruct).
Base models are excellent at completing the text when given an initial prompt, however, they are not ideal for NLP tasks
where they need to follow instructions, or for conversational use. This is where the instruct (chat) versions come in.
These checkpoints are the result of further fine-tuning of the pre-trained base versions on instructions and conversational data.
This additional fine-tuning makes them a better choice for many NLP tasks.
Let's illustrate some simple prompts that you can use with [`tiiuae/falcon-7b-instruct`](https://huggingface.co/tiiuae/falcon-7b-instruct)
to solve some common NLP tasks.
### NLP
| |
181531
|
oning
Reasoning is one of the most difficult tasks for LLMs, and achieving good results often requires applying advanced prompting techniques, like
[Chain-of-thought](#chain-of-thought).
Let's try if we can make a model reason about a simple arithmetics task with a basic prompt:
```python
>>> torch.manual_seed(5) # doctest: +IGNORE_RESULT
>>> prompt = """There are 5 groups of students in the class. Each group has 4 students. How many students are there in the class?"""
>>> sequences = pipe(
... prompt,
... max_new_tokens=30,
... do_sample=True,
... top_k=10,
... return_full_text = False,
... )
>>> for seq in sequences:
... print(f"Result: {seq['generated_text']}")
Result:
There are a total of 5 groups, so there are 5 x 4=20 students in the class.
```
Correct! Let's increase the complexity a little and see if we can still get away with a basic prompt:
```python
>>> torch.manual_seed(6) # doctest: +IGNORE_RESULT
>>> prompt = """I baked 15 muffins. I ate 2 muffins and gave 5 muffins to a neighbor. My partner then bought 6 more muffins and ate 2. How many muffins do we now have?"""
>>> sequences = pipe(
... prompt,
... max_new_tokens=10,
... do_sample=True,
... top_k=10,
... return_full_text = False,
... )
>>> for seq in sequences:
... print(f"Result: {seq['generated_text']}")
Result:
The total number of muffins now is 21
```
This is a wrong answer, it should be 12. In this case, this can be due to the prompt being too basic, or due to the choice
of model, after all we've picked the smallest version of Falcon. Reasoning is difficult for models of all sizes, but larger
models are likely to perform better.
## Best practices of LLM prompting
In this section of the guide we have compiled a list of best practices that tend to improve the prompt results:
* When choosing the model to work with, the latest and most capable models are likely to perform better.
* Start with a simple and short prompt, and iterate from there.
* Put the instructions at the beginning of the prompt, or at the very end. When working with large context, models apply various optimizations to prevent Attention complexity from scaling quadratically. This may make a model more attentive to the beginning or end of a prompt than the middle.
* Clearly separate instructions from the text they apply to - more on this in the next section.
* Be specific and descriptive about the task and the desired outcome - its format, length, style, language, etc.
* Avoid ambiguous descriptions and instructions.
* Favor instructions that say "what to do" instead of those that say "what not to do".
* "Lead" the output in the right direction by writing the first word (or even begin the first sentence for the model).
* Use advanced techniques like [Few-shot prompting](#few-shot-prompting) and [Chain-of-thought](#chain-of-thought)
* Test your prompts with different models to assess their robustness.
* Version and track the performance of your prompts.
## Advanced prompting techniques
### Few-shot prompting
The basic prompts in the sections above are the examples of "zero-shot" prompts, meaning, the model has been given
instructions and context, but no examples with solutions. LLMs that have been fine-tuned on instruction datasets, generally
perform well on such "zero-shot" tasks. However, you may find that your task has more complexity or nuance, and, perhaps,
you have some requirements for the output that the model doesn't catch on just from the instructions. In this case, you can
try the technique called few-shot prompting.
In few-shot prompting, we provide examples in the prompt giving the model more context to improve the performance.
The examples condition the model to generate the output following the patterns in the examples.
Here's an example:
```python
>>> torch.manual_seed(0) # doctest: +IGNORE_RESULT
>>> prompt = """Text: The first human went into space and orbited the Earth on April 12, 1961.
... Date: 04/12/1961
... Text: The first-ever televised presidential debate in the United States took place on September 28, 1960, between presidential candidates John F. Kennedy and Richard Nixon.
... Date:"""
>>> sequences = pipe(
... prompt,
... max_new_tokens=8,
... do_sample=True,
... top_k=10,
... )
>>> for seq in sequences:
... print(f"Result: {seq['generated_text']}")
Result: Text: The first human went into space and orbited the Earth on April 12, 1961.
Date: 04/12/1961
Text: The first-ever televised presidential debate in the United States took place on September 28, 1960, between presidential candidates John F. Kennedy and Richard Nixon.
Date: 09/28/1960
```
In the above code snippet we used a single example to demonstrate the desired output to the model, so this can be called a
"one-shot" prompting. However, depending on the task complexity you may need to use more than one example.
Limitations of the few-shot prompting technique:
- While LLMs can pick up on the patterns in the examples, these technique doesn't work well on complex reasoning tasks
- Few-shot prompting requires creating lengthy prompts. Prompts with large number of tokens can increase computation and latency. There's also a limit to the length of the prompts.
- Sometimes when given a number of examples, models can learn patterns that you didn't intend them to learn, e.g. that the third movie review is always negative.
### Chain-of-thought
Chain-of-thought (CoT) prompting is a technique that nudges a model to produce intermediate reasoning steps thus improving
the results on complex reasoning tasks.
There are two ways of steering a model to producing the reasoning steps:
- few-shot prompting by illustrating examples with detailed answers to questions, showing the model how to work through a problem.
- by instructing the model to reason by adding phrases like "Let's think step by step" or "Take a deep breath and work through the problem step by step."
If we apply the CoT technique to the muffins example from the [reasoning section](#reasoning) and use a larger model,
such as (`tiiuae/falcon-180B-chat`) which you can play with in the [HuggingChat](https://huggingface.co/chat/),
we'll get a significant improvement on the reasoning result:
```text
Let's go through this step-by-step:
1. You start with 15 muffins.
2. You eat 2 muffins, leaving you with 13 muffins.
3. You give 5 muffins to your neighbor, leaving you with 8 muffins.
4. Your partner buys 6 more muffins, bringing the total number of muffins to 14.
5. Your partner eats 2 muffins, leaving you with 12 muffins.
Therefore, you now have 12 muffins.
```
## Prompting vs fine-tuning
You can achieve great results by optimizing your prompts, however, you may still ponder whether fine-tuning a model
would work better for your case. Here are some scenarios when fine-tuning a smaller model may be a preferred option:
- Your domain is wildly different from what LLMs were pre-trained on and extensive prompt optimization did not yield sufficient results.
- You need your model to work well in a low-resource language.
- You need the model to be trained on sensitive data that is under strict regulations.
- You have to use a small model due to cost, privacy, infrastructure or other limitations.
In all of the above examples, you will need to make sure that you either already have or can easily obtain a large enough
domain-specific dataset at a reasonable cost to fine-tune a model. You will also need to have enough time and resources
to fine-tune a model.
If the above examples are not the case for you, optimizing prompts can prove to be more beneficial.
| |
181580
|
<!--Copyright 2022 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# LLaMA
## Overview
The LLaMA model was proposed in [LLaMA: Open and Efficient Foundation Language Models](https://arxiv.org/abs/2302.13971) by Hugo Touvron, Thibaut Lavril, Gautier Izacard, Xavier Martinet, Marie-Anne Lachaux, Timothée Lacroix, Baptiste Rozière, Naman Goyal, Eric Hambro, Faisal Azhar, Aurelien Rodriguez, Armand Joulin, Edouard Grave, Guillaume Lample. It is a collection of foundation language models ranging from 7B to 65B parameters.
The abstract from the paper is the following:
*We introduce LLaMA, a collection of foundation language models ranging from 7B to 65B parameters. We train our models on trillions of tokens, and show that it is possible to train state-of-the-art models using publicly available datasets exclusively, without resorting to proprietary and inaccessible datasets. In particular, LLaMA-13B outperforms GPT-3 (175B) on most benchmarks, and LLaMA-65B is competitive with the best models, Chinchilla-70B and PaLM-540B. We release all our models to the research community. *
This model was contributed by [zphang](https://huggingface.co/zphang) with contributions from [BlackSamorez](https://huggingface.co/BlackSamorez). The code of the implementation in Hugging Face is based on GPT-NeoX [here](https://github.com/EleutherAI/gpt-neox). The original code of the authors can be found [here](https://github.com/facebookresearch/llama).
## Usage tips
- Weights for the LLaMA models can be obtained from by filling out [this form](https://docs.google.com/forms/d/e/1FAIpQLSfqNECQnMkycAp2jP4Z9TFX0cGR4uf7b_fBxjY_OjhJILlKGA/viewform?usp=send_form)
- After downloading the weights, they will need to be converted to the Hugging Face Transformers format using the [conversion script](https://github.com/huggingface/transformers/blob/main/src/transformers/models/llama/convert_llama_weights_to_hf.py). The script can be called with the following (example) command:
```bash
python src/transformers/models/llama/convert_llama_weights_to_hf.py \
--input_dir /path/to/downloaded/llama/weights --model_size 7B --output_dir /output/path
```
- After conversion, the model and tokenizer can be loaded via:
```python
from transformers import LlamaForCausalLM, LlamaTokenizer
tokenizer = LlamaTokenizer.from_pretrained("/output/path")
model = LlamaForCausalLM.from_pretrained("/output/path")
```
Note that executing the script requires enough CPU RAM to host the whole model in float16 precision (even if the biggest versions
come in several checkpoints they each contain a part of each weight of the model, so we need to load them all in RAM). For the 65B model, it's thus 130GB of RAM needed.
- The LLaMA tokenizer is a BPE model based on [sentencepiece](https://github.com/google/sentencepiece). One quirk of sentencepiece is that when decoding a sequence, if the first token is the start of the word (e.g. "Banana"), the tokenizer does not prepend the prefix space to the string.
This model was contributed by [zphang](https://huggingface.co/zphang) with contributions from [BlackSamorez](https://huggingface.co/BlackSamorez). The code of the implementation in Hugging Face is based on GPT-NeoX [here](https://github.com/EleutherAI/gpt-neox). The original code of the authors can be found [here](https://github.com/facebookresearch/llama). The Flax version of the implementation was contributed by [afmck](https://huggingface.co/afmck) with the code in the implementation based on Hugging Face's Flax GPT-Neo.
Based on the original LLaMA model, Meta AI has released some follow-up works:
- **Llama2**: Llama2 is an improved version of Llama with some architectural tweaks (Grouped Query Attention), and is pre-trained on 2Trillion tokens. Refer to the documentation of Llama2 which can be found [here](llama2).
## Resources
A list of official Hugging Face and community (indicated by 🌎) resources to help you get started with LLaMA. If you're interested in submitting a resource to be included here, please feel free to open a Pull Request and we'll review it! The resource should ideally demonstrate something new instead of duplicating an existing resource.
<PipelineTag pipeline="text-classification"/>
- A [notebook](https://colab.research.google.com/github/bigscience-workshop/petals/blob/main/examples/prompt-tuning-sst2.ipynb#scrollTo=f04ba4d2) on how to use prompt tuning to adapt the LLaMA model for text classification task. 🌎
<PipelineTag pipeline="question-answering"/>
- [StackLLaMA: A hands-on guide to train LLaMA with RLHF](https://huggingface.co/blog/stackllama#stackllama-a-hands-on-guide-to-train-llama-with-rlhf), a blog post about how to train LLaMA to answer questions on [Stack Exchange](https://stackexchange.com/) with RLHF.
⚗️ Optimization
- A [notebook](https://colab.research.google.com/drive/1SQUXq1AMZPSLD4mk3A3swUIc6Y2dclme?usp=sharing) on how to fine-tune LLaMA model using xturing library on GPU which has limited memory. 🌎
⚡️ Inference
- A [notebook](https://colab.research.google.com/github/DominguesM/alpaca-lora-ptbr-7b/blob/main/notebooks/02%20-%20Evaluate.ipynb) on how to run the LLaMA Model using PeftModel from the 🤗 PEFT library. 🌎
- A [notebook](https://colab.research.google.com/drive/1l2GiSSPbajVyp2Nk3CFT4t3uH6-5TiBe?usp=sharing) on how to load a PEFT adapter LLaMA model with LangChain. 🌎
🚀 Deploy
- A [notebook](https://colab.research.google.com/github/lxe/simple-llama-finetuner/blob/master/Simple_LLaMA_FineTuner.ipynb#scrollTo=3PM_DilAZD8T) on how to fine-tune LLaMA model using LoRA method via the 🤗 PEFT library with intuitive UI. 🌎
- A [notebook](https://github.com/aws/amazon-sagemaker-examples/blob/main/introduction_to_amazon_algorithms/jumpstart-foundation-models/text-generation-open-llama.ipynb) on how to deploy Open-LLaMA model for text generation on Amazon SageMaker. 🌎
## LlamaConfig
[[autodoc]] LlamaConfig
## LlamaTokenizer
[[autodoc]] LlamaTokenizer
- build_inputs_with_special_tokens
- get_special_tokens_mask
- create_token_type_ids_from_sequences
- save_vocabulary
## LlamaTokenizerFast
[[autodoc]] LlamaTokenizerFast
- build_inputs_with_special_tokens
- get_special_tokens_mask
- create_token_type_ids_from_sequences
- update_post_processor
- save_vocabulary
## LlamaModel
[[autodoc]] LlamaModel
- forward
## LlamaForCausalLM
[[autodoc]] LlamaForCausalLM
- forward
## LlamaForSequenceClassification
[[autodoc]] LlamaForSequenceClassification
- forward
## LlamaForQuestionAnswering
[[autodoc]] LlamaForQuestionAnswering
- forward
## LlamaForTokenClassification
[[autodoc]] LlamaForTokenClassification
- forward
## FlaxLlamaModel
[[autodoc]] FlaxLlamaModel
- __call__
## FlaxLlamaForCausalLM
[[autodoc]] FlaxLlamaForCausalLM
- __call__
| |
181843
|
<!--Copyright 2020 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# MarianMT
<div class="flex flex-wrap space-x-1">
<a href="https://huggingface.co/models?filter=marian">
<img alt="Models" src="https://img.shields.io/badge/All_model_pages-marian-blueviolet">
</a>
<a href="https://huggingface.co/spaces/docs-demos/opus-mt-zh-en">
<img alt="Spaces" src="https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue">
</a>
</div>
## Overview
A framework for translation models, using the same models as BART. Translations should be similar, but not identical to output in the test set linked to in each model card.
This model was contributed by [sshleifer](https://huggingface.co/sshleifer).
## Implementation Notes
- Each model is about 298 MB on disk, there are more than 1,000 models.
- The list of supported language pairs can be found [here](https://huggingface.co/Helsinki-NLP).
- Models were originally trained by [Jörg Tiedemann](https://researchportal.helsinki.fi/en/persons/j%C3%B6rg-tiedemann) using the [Marian](https://marian-nmt.github.io/) C++ library, which supports fast training and translation.
- All models are transformer encoder-decoders with 6 layers in each component. Each model's performance is documented
in a model card.
- The 80 opus models that require BPE preprocessing are not supported.
- The modeling code is the same as [`BartForConditionalGeneration`] with a few minor modifications:
- static (sinusoid) positional embeddings (`MarianConfig.static_position_embeddings=True`)
- no layernorm_embedding (`MarianConfig.normalize_embedding=False`)
- the model starts generating with `pad_token_id` (which has 0 as a token_embedding) as the prefix (Bart uses
`<s/>`),
- Code to bulk convert models can be found in `convert_marian_to_pytorch.py`.
## Naming
- All model names use the following format: `Helsinki-NLP/opus-mt-{src}-{tgt}`
- The language codes used to name models are inconsistent. Two digit codes can usually be found [here](https://developers.google.com/admin-sdk/directory/v1/languages), three digit codes require googling "language
code {code}".
- Codes formatted like `es_AR` are usually `code_{region}`. That one is Spanish from Argentina.
- The models were converted in two stages. The first 1000 models use ISO-639-2 codes to identify languages, the second
group use a combination of ISO-639-5 codes and ISO-639-2 codes.
## Examples
- Since Marian models are smaller than many other translation models available in the library, they can be useful for
fine-tuning experiments and integration tests.
- [Fine-tune on GPU](https://github.com/huggingface/transformers/blob/master/examples/legacy/seq2seq/train_distil_marian_enro.sh)
## Multilingual Models
- All model names use the following format: `Helsinki-NLP/opus-mt-{src}-{tgt}`:
- If a model can output multiple languages, and you should specify a language code by prepending the desired output
language to the `src_text`.
- You can see a models's supported language codes in its model card, under target constituents, like in [opus-mt-en-roa](https://huggingface.co/Helsinki-NLP/opus-mt-en-roa).
- Note that if a model is only multilingual on the source side, like `Helsinki-NLP/opus-mt-roa-en`, no language
codes are required.
New multi-lingual models from the [Tatoeba-Challenge repo](https://github.com/Helsinki-NLP/Tatoeba-Challenge)
require 3 character language codes:
```python
>>> from transformers import MarianMTModel, MarianTokenizer
>>> src_text = [
... ">>fra<< this is a sentence in english that we want to translate to french",
... ">>por<< This should go to portuguese",
... ">>esp<< And this to Spanish",
... ]
>>> model_name = "Helsinki-NLP/opus-mt-en-roa"
>>> tokenizer = MarianTokenizer.from_pretrained(model_name)
>>> print(tokenizer.supported_language_codes)
['>>zlm_Latn<<', '>>mfe<<', '>>hat<<', '>>pap<<', '>>ast<<', '>>cat<<', '>>ind<<', '>>glg<<', '>>wln<<', '>>spa<<', '>>fra<<', '>>ron<<', '>>por<<', '>>ita<<', '>>oci<<', '>>arg<<', '>>min<<']
>>> model = MarianMTModel.from_pretrained(model_name)
>>> translated = model.generate(**tokenizer(src_text, return_tensors="pt", padding=True))
>>> [tokenizer.decode(t, skip_special_tokens=True) for t in translated]
["c'est une phrase en anglais que nous voulons traduire en français",
'Isto deve ir para o português.',
'Y esto al español']
```
Here is the code to see all available pretrained models on the hub:
```python
from huggingface_hub import list_models
model_list = list_models()
org = "Helsinki-NLP"
model_ids = [x.id for x in model_list if x.id.startswith(org)]
suffix = [x.split("/")[1] for x in model_ids]
old_style_multi_models = [f"{org}/{s}" for s in suffix if s != s.lower()]
```
## Old S
| |
181874
|
ameleon
## Overview
The Chameleon model was proposed in [Chameleon: Mixed-Modal Early-Fusion Foundation Models
](https://arxiv.org/abs/2405.09818v1) by META AI Chameleon Team. Chameleon is a Vision-Language Model that use vector quantization to tokenize images which enables the model to generate multimodal output. The model takes images and texts as input, including an interleaved format, and generates textual response. Image generation module is not released yet.
The abstract from the paper is the following:
*We present Chameleon, a family of early-fusion token-based mixed-modal models capable of understanding and generating images and text in any arbitrary sequence. We outline a stable training
approach from inception, an alignment recipe, and an architectural parameterization tailored for the
early-fusion, token-based, mixed-modal setting. The models are evaluated on a comprehensive range
of tasks, including visual question answering, image captioning, text generation, image generation, and
long-form mixed modal generation. Chameleon demonstrates broad and general capabilities, including
state-of-the-art performance in image captioning tasks, outperforms Llama-2 in text-only tasks while
being competitive with models such as Mixtral 8x7B and Gemini-Pro, and performs non-trivial image
generation, all in a single model. It also matches or exceeds the performance of much larger models,
including Gemini Pro and GPT-4V, according to human judgments on a new long-form mixed-modal
generation evaluation, where either the prompt or outputs contain mixed sequences of both images and
text. Chameleon marks a significant step forward in unified modeling of full multimodal documents*
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/model_doc/chameleon_arch.png"
alt="drawing" width="600"/>
<small> Chameleon incorporates a vector quantizer module to transform images into discrete tokens. That also enables image generation using an auto-regressive transformer. Taken from the <a href="https://arxiv.org/abs/2405.09818v1">original paper.</a> </small>
This model was contributed by [joaogante](https://huggingface.co/joaogante) and [RaushanTurganbay](https://huggingface.co/RaushanTurganbay).
The original code can be found [here](https://github.com/facebookresearch/chameleon).
## Usage tips
- We advise users to use `padding_side="left"` when computing batched generation as it leads to more accurate results. Simply make sure to set `processor.tokenizer.padding_side = "left"` before generating.
- Note that Chameleon was tuned for safety alignment. If the model is refusing to answer, consider asking a more concrete question, instead of an open question.
- Chameleon generates in chat format which means that the generated text will always be the "assistant's turn". You can enable a text completion generation by passing `return_for_text_completion=True` when calling the processor.
> [!NOTE]
> Chameleon implementation in Transformers uses a special image token to indicate where to merge image embeddings. For special image token we didn't add a new one but used one of the reserved tokens: `<reserved08707>`. You have to add `<image>` to your prompt in the place where the image should be embedded for correct generation.
## Usage example
### Single image inference
Chameleon is a gated model so make sure to have access and login to Hugging Face Hub using a token.
Here's how to load the model and perform inference in half-precision (`torch.bfloat16`):
```python
from transformers import ChameleonProcessor, ChameleonForConditionalGeneration
import torch
from PIL import Image
import requests
processor = ChameleonProcessor.from_pretrained("facebook/chameleon-7b")
model = ChameleonForConditionalGeneration.from_pretrained("facebook/chameleon-7b", torch_dtype=torch.bfloat16, device_map="cuda")
# prepare image and text prompt
url = 'http://images.cocodataset.org/val2017/000000039769.jpg'
image = Image.open(requests.get(url, stream=True).raw)
prompt = "What do you see in this image?<image>"
inputs = processor(images=image, text=prompt, return_tensors="pt").to(model.device, dtype=torch.bfloat16)
# autoregressively complete prompt
output = model.generate(**inputs, max_new_tokens=50)
print(processor.decode(output[0], skip_special_tokens=True))
```
### Multi image inference
Chameleon can perform inference with multiple images as input, where images either belong to the same prompt or different prompts (in batched inference). Here is how you can do it:
```python
from transformers import ChameleonProcessor, ChameleonForConditionalGeneration
import torch
from PIL import Image
import requests
processor = ChameleonProcessor.from_pretrained("facebook/chameleon-7b")
model = ChameleonForConditionalGeneration.from_pretrained("facebook/chameleon-7b", torch_dtype=torch.bfloat16, device_map="cuda")
# Get three different images
url = "https://www.ilankelman.org/stopsigns/australia.jpg"
image_stop = Image.open(requests.get(url, stream=True).raw)
url = "http://images.cocodataset.org/val2017/000000039769.jpg"
image_cats = Image.open(requests.get(url, stream=True).raw)
url = "https://huggingface.co/microsoft/kosmos-2-patch14-224/resolve/main/snowman.jpg"
image_snowman = Image.open(requests.get(url, stream=True).raw)
# Prepare a batched prompt, where the first one is a multi-image prompt and the second is not
prompts = [
"What do these images have in common?<image><image>",
"<image>What is shown in this image?"
]
# We can simply feed images in the order they have to be used in the text prompt
# Each "<image>" token uses one image leaving the next for the subsequent "<image>" tokens
inputs = processor(images=[image_stop, image_cats, image_snowman], text=prompts, padding=True, return_tensors="pt").to(device="cuda", dtype=torch.bfloat16)
# Generate
generate_ids = model.generate(**inputs, max_new_tokens=50)
processor.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)
```
## Model optimization
### Quantization using Bitsandbytes
The model can be loaded in 8 or 4 bits, greatly reducing the memory requirements while maintaining the performance of the original model. First make sure to install bitsandbytes, `pip install bitsandbytes` and to have access to a GPU/accelerator that is supported by the library.
<Tip>
bitsandbytes is being refactored to support multiple backends beyond CUDA. Currently, ROCm (AMD GPU) and Intel CPU implementations are mature, with Intel XPU in progress and Apple Silicon support expected by Q4/Q1. For installation instructions and the latest backend updates, visit [this link](https://huggingface.co/docs/bitsandbytes/main/en/installation#multi-backend).
We value your feedback to help identify bugs before the full release! Check out [these docs](https://huggingface.co/docs/bitsandbytes/main/en/non_cuda_backends) for more details and feedback links.
</Tip>
Simply change the snippet above with:
```python
from transformers import ChameleonForConditionalGeneration, BitsAndBytesConfig
# specify how to quantize the model
quantization_config = BitsAndBytesConfig(
load_in_4bit=True,
bnb_4bit_quant_type="nf4",
bnb_4bit_compute_dtype=torch.bfloat16,
)
model = ChameleonForConditionalGeneration.from_pretrained("facebook/chameleon-7b", quantization_config=quantization_config, device_map="cuda")
```
### Use Flash-Attention 2 and SDPA to further speed-up generation
The models supports both, Flash-Attention 2 and PyTorch's [`torch.nn.functional.scaled_dot_product_attention`](https://pytorch.org/docs/master/generated/torch.nn.functional.scaled_dot_product_attention.html) which can be enables for optimization. SDPA is the default options when you load the model, If you want to switch for Flash Attention 2, first make sure to install flash-attn. Refer to the [original repository](https://github.com/Dao-AILab/flash-attention) regarding that package installation. Simply change the snippet above with:
```python
from transformers import ChameleonForConditionalGeneration
model_id = "facebook/chameleon-7b"
model = ChameleonForConditionalGeneration.from_pretrained(
model_id,
torch_dtype=torch.bfloat16,
low_cpu_mem_usage=True,
attn_implementation="flash_attention_2"
).to(0)
```
## ChameleonConfig
[[autodoc]] ChameleonConfig
## ChameleonVQVAEConfig
[[autodoc]] ChameleonVQVAEConfig
## ChameleonProcessor
[[autodoc]] ChameleonProcessor
## ChameleonImageProcessor
[[autodoc]] ChameleonImageProcessor
- preprocess
## ChameleonVQVAE
[[autodoc]] ChameleonVQVAE
- forward
## ChameleonModel
[[autodoc]] ChameleonModel
- forward
## ChameleonForConditionalGeneration
[[autodoc]] ChameleonForConditionalGeneration
- forward
| |
181939
|
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# GPTQ
<Tip>
Try GPTQ quantization with PEFT in this [notebook](https://colab.research.google.com/drive/1_TIrmuKOFhuRRiTWN94iLKUFu6ZX4ceb?usp=sharing) and learn more about it's details in this [blog post](https://huggingface.co/blog/gptq-integration)!
</Tip>
The [AutoGPTQ](https://github.com/PanQiWei/AutoGPTQ) library implements the GPTQ algorithm, a post-training quantization technique where each row of the weight matrix is quantized independently to find a version of the weights that minimizes the error. These weights are quantized to int4, but they're restored to fp16 on the fly during inference. This can save your memory-usage by 4x because the int4 weights are dequantized in a fused kernel rather than a GPU's global memory, and you can also expect a speedup in inference because using a lower bitwidth takes less time to communicate.
Before you begin, make sure the following libraries are installed:
```bash
pip install auto-gptq
pip install --upgrade accelerate optimum transformers
```
To quantize a model (currently only supported for text models), you need to create a [`GPTQConfig`] class and set the number of bits to quantize to, a dataset to calibrate the weights for quantization, and a tokenizer to prepare the dataset.
```py
from transformers import AutoModelForCausalLM, AutoTokenizer, GPTQConfig
model_id = "facebook/opt-125m"
tokenizer = AutoTokenizer.from_pretrained(model_id)
gptq_config = GPTQConfig(bits=4, dataset="c4", tokenizer=tokenizer)
```
You could also pass your own dataset as a list of strings, but it is highly recommended to use the same dataset from the GPTQ paper.
```py
dataset = ["auto-gptq is an easy-to-use model quantization library with user-friendly apis, based on GPTQ algorithm."]
gptq_config = GPTQConfig(bits=4, dataset=dataset, tokenizer=tokenizer)
```
Load a model to quantize and pass the `gptq_config` to the [`~AutoModelForCausalLM.from_pretrained`] method. Set `device_map="auto"` to automatically offload the model to a CPU to help fit the model in memory, and allow the model modules to be moved between the CPU and GPU for quantization.
```py
quantized_model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", quantization_config=gptq_config)
```
If you're running out of memory because a dataset is too large, disk offloading is not supported. If this is the case, try passing the `max_memory` parameter to allocate the amount of memory to use on your device (GPU and CPU):
```py
quantized_model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", max_memory={0: "30GiB", 1: "46GiB", "cpu": "30GiB"}, quantization_config=gptq_config)
```
<Tip warning={true}>
Depending on your hardware, it can take some time to quantize a model from scratch. It can take ~5 minutes to quantize the [facebook/opt-350m](https://huggingface.co/facebook/opt-350m) model on a free-tier Google Colab GPU, but it'll take ~4 hours to quantize a 175B parameter model on a NVIDIA A100. Before you quantize a model, it is a good idea to check the Hub if a GPTQ-quantized version of the model already exists.
</Tip>
Once your model is quantized, you can push the model and tokenizer to the Hub where it can be easily shared and accessed. Use the [`~PreTrainedModel.push_to_hub`] method to save the [`GPTQConfig`]:
```py
quantized_model.push_to_hub("opt-125m-gptq")
tokenizer.push_to_hub("opt-125m-gptq")
```
You could also save your quantized model locally with the [`~PreTrainedModel.save_pretrained`] method. If the model was quantized with the `device_map` parameter, make sure to move the entire model to a GPU or CPU before saving it. For example, to save the model on a CPU:
```py
quantized_model.save_pretrained("opt-125m-gptq")
tokenizer.save_pretrained("opt-125m-gptq")
# if quantized with device_map set
quantized_model.to("cpu")
quantized_model.save_pretrained("opt-125m-gptq")
```
Reload a quantized model with the [`~PreTrainedModel.from_pretrained`] method, and set `device_map="auto"` to automatically distribute the model on all available GPUs to load the model faster without using more memory than needed.
```py
from transformers import AutoModelForCausalLM
model = AutoModelForCausalLM.from_pretrained("{your_username}/opt-125m-gptq", device_map="auto")
```
## ExLlama
[ExLlama](https://github.com/turboderp/exllama) is a Python/C++/CUDA implementation of the [Llama](model_doc/llama) model that is designed for faster inference with 4-bit GPTQ weights (check out these [benchmarks](https://github.com/huggingface/optimum/tree/main/tests/benchmark#gptq-benchmark)). The ExLlama kernel is activated by default when you create a [`GPTQConfig`] object. To boost inference speed even further, use the [ExLlamaV2](https://github.com/turboderp/exllamav2) kernels by configuring the `exllama_config` parameter:
```py
import torch
from transformers import AutoModelForCausalLM, GPTQConfig
gptq_config = GPTQConfig(bits=4, exllama_config={"version":2})
model = AutoModelForCausalLM.from_pretrained("{your_username}/opt-125m-gptq", device_map="auto", quantization_config=gptq_config)
```
<Tip warning={true}>
Only 4-bit models are supported, and we recommend deactivating the ExLlama kernels if you're finetuning a quantized model with PEFT.
</Tip>
The ExLlama kernels are only supported when the entire model is on the GPU. If you're doing inference on a CPU with AutoGPTQ (version > 0.4.2), then you'll need to disable the ExLlama kernel. This overwrites the attributes related to the ExLlama kernels in the quantization config of the config.json file.
```py
import torch
from transformers import AutoModelForCausalLM, GPTQConfig
gptq_config = GPTQConfig(bits=4, use_exllama=False)
model = AutoModelForCausalLM.from_pretrained("{your_username}/opt-125m-gptq", device_map="cpu", quantization_config=gptq_config)
```
| |
182030
|
can access the TPU cores at a time. This means that if multiple team members
are trying to connect to the TPU cores errors, such as:
```
libtpu.so already in used by another process. Not attempting to load libtpu.so in this process.
```
are thrown. As a conclusion, we recommend every team member to create her/his own virtual environment, but only one
person should run the heavy training processes. Also, please take turns when setting up the TPUv3-8 so that everybody
can verify that JAX is correctly installed.
The following libraries are required to train a JAX/Flax model with 🤗 Transformers and 🤗 Datasets on TPU VM:
- [JAX](https://github.com/google/jax/)
- [Flax](https://github.com/google/flax)
- [Optax](https://github.com/deepmind/optax)
- [Transformers](https://github.com/huggingface/transformers)
- [Datasets](https://github.com/huggingface/datasets)
You should install the above libraries in a [virtual environment](https://docs.python.org/3/library/venv.html).
If you're unfamiliar with Python virtual environments, check out the [user guide](https://packaging.python.org/guides/installing-using-pip-and-virtual-environments/). Create a virtual environment with the version of Python you're going
to use and activate it.
You should be able to run the command:
```bash
python3 -m venv <your-venv-name>
```
If this doesn't work, you first might to have install `python3-venv`. You can do this as follows:
```bash
sudo apt-get install python3-venv
```
You can activate your venv by running
```bash
source ~/<your-venv-name>/bin/activate
```
Next you should install JAX's TPU version on TPU by running the following command:
```bash
$ pip install requests
```
and then:
```bash
$ pip install "jax[tpu]>=0.2.16" -f https://storage.googleapis.com/jax-releases/libtpu_releases.html
```
**Note**: Running this command might actually throw an error, such as:
```
Building wheel for jax (setup.py) ... error
ERROR: Command errored out with exit status 1:
command: /home/patrick/patrick/bin/python3 -u -c 'import sys, setuptools, tokenize; sys.argv[0] = '"'"'/tmp/pip-install-lwseckn1/jax/setup.py'"'"'; __file__='"'"'/tmp/pip-install-lwseckn1/jax/setup.py'"'"';f=getattr(tokenize, '"'"'open'"'"', open)(__file__);code=f.read().replace('"'"'\r\n'"'"', '"'"'\n'"'"');f.close();exec(compile(code, __file__, '"'"'exec'"'"'))' bdist_wheel -d /tmp/pip-wheel-pydotzlo
cwd: /tmp/pip-install-lwseckn1/jax/
Complete output (6 lines):
usage: setup.py [global_opts] cmd1 [cmd1_opts] [cmd2 [cmd2_opts] ...]
or: setup.py --help [cmd1 cmd2 ...]
or: setup.py --help-commands
or: setup.py cmd --help
error: invalid command 'bdist_wheel'
----------------------------------------
ERROR: Failed building wheel for jax
```
Jax should have been installed correctly nevertheless.
To verify that JAX was correctly installed, you can run the following command:
```python
import jax
jax.device_count()
```
This should display the number of TPU cores, which should be 8 on a TPUv3-8 VM.
We strongly recommend to make use of the provided JAX/Flax examples scripts in [transformers/examples/flax](https://github.com/huggingface/transformers/tree/main/examples/flax) even if you want to train a JAX/Flax model of another github repository that is not integrated into 🤗 Transformers.
In all likelihood, you will need to adapt one of the example scripts, so we recommend forking and cloning the 🤗 Transformers repository as follows.
Doing so will allow you to share your fork of the Transformers library with your team members so that the team effectively works on the same code base. It will also automatically install the newest versions of `flax`, `jax` and `optax`.
1. Fork the [repository](https://github.com/huggingface/transformers) by
clicking on the 'Fork' button on the repository's page. This creates a copy of the code
under your GitHub user account.
2. Clone your fork to your local disk, and add the base repository as a remote:
```bash
$ git clone https://github.com/<your Github handle>/transformers.git
$ cd transformers
$ git remote add upstream https://github.com/huggingface/transformers.git
```
3. Create a new branch to hold your development changes. This is especially useful to share code changes with your team:
```bash
$ git checkout -b a-descriptive-name-for-my-project
```
4. Set up a flax environment by running the following command in a virtual environment:
```bash
$ pip install -e ".[flax]"
```
(If transformers was already installed in the virtual environment, remove
it with `pip uninstall transformers` before reinstalling it in editable
mode with the `-e` flag.)
If you have already cloned that repo, you might need to `git pull` to get the most recent changes in the `datasets`
library.
Running this command will automatically install `flax`, `jax` and `optax`.
Next, you should also install the 🤗 Datasets library. We strongly recommend installing the
library from source to profit from the most current additions during the community week.
Simply run the following steps:
```bash
$ cd ~/
$ git clone https://github.com/huggingface/datasets.git
$ cd datasets
$ pip install -e ".[streaming]"
```
If you plan on contributing a specific dataset during
the community week, please fork the datasets repository and follow the instructions
[here](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-create-a-pull-request).
To verify that all libraries are correctly installed, you can run the following command.
It assumes that both `transformers` and `datasets` were installed from main - otherwise
datasets streaming will not work correctly.
```python
from transformers import FlaxRobertaModel, RobertaTokenizerFast
from datasets import load_dataset
import jax
dataset = load_dataset('oscar', "unshuffled_deduplicated_en", split='train', streaming=True)
dummy_input = next(iter(dataset))["text"]
tokenizer = RobertaTokenizerFast.from_pretrained("FacebookAI/roberta-base")
input_ids = tokenizer(dummy_input, return_tensors="np").input_ids[:, :10]
model = FlaxRobertaModel.from_pretrained("julien-c/dummy-unknown")
# run a forward pass, should return an object `FlaxBaseModelOutputWithPooling`
model(input_ids)
```
## Quickstart flax and jax
[JAX](https://jax.readthedocs.io/en/latest/index.html) is Autograd and XLA, brought together for high-performance numerical computing and machine learning research. It provides composable transformations of Python+NumPy programs: differentiate, vectorize, parallelize, Just-In-Time compile to GPU/TPU, and more. A great place for getting started with JAX is the [JAX 101 Tutorial](https://jax.readthedocs.io/en/latest/jax-101/index.html).
[Flax](https://flax.readthedocs.io/en/latest/index.html) is a high-performance neural network library designed for flexibility built on top of JAX. It aims to provide users with full control of their training code and is carefully designed to work well with JAX transformations such as `grad` and `pmap` (see the [Flax philosophy](https://flax.readthedocs.io/en/latest/philosophy.html)). For an introduction to Flax see the [Flax Basics Colab](https://flax.readthedocs.io/en/latest/notebooks/flax_basics.html) or the list of curated [Flax examples](https://flax.readthedocs.io/en/latest/examples.html).
## Quickstart flax and jax in transformers
Currently, we support
| |
182171
|
import gzip
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from huggingface_hub.utils import insecure_hashlib
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
PATTERN = re.compile(r"\s+")
def get_hash(example):
"""Get hash of content field."""
return {"hash": insecure_hashlib.md5(re.sub(PATTERN, "", example["content"]).encode("utf-8")).hexdigest()}
def line_stats(example):
"""Calculates mean and max line length of file."""
line_lengths = [len(line) for line in example["content"].splitlines()]
return {"line_mean": np.mean(line_lengths), "line_max": max(line_lengths)}
def alpha_stats(example):
"""Calculates mean and max line length of file."""
alpha_frac = np.mean([c.isalnum() for c in example["content"]])
return {"alpha_frac": alpha_frac}
def check_uniques(example, uniques):
"""Check if current hash is still in set of unique hashes and remove if true."""
if example["hash"] in uniques:
uniques.remove(example["hash"])
return True
else:
return False
def is_autogenerated(example, scan_width=5):
"""Check if file is autogenerated by looking for keywords in the first few lines of the file."""
keywords = ["auto-generated", "autogenerated", "automatically generated"]
lines = example["content"].splitlines()
for _, line in zip(range(scan_width), lines):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def is_config_or_test(example, scan_width=5, coeff=0.05):
"""Check if file is a configuration file or a unit test by :
1- looking for keywords in the first few lines of the file.
2- counting number of occurrence of the words 'config' and 'test' with respect to number of lines.
"""
keywords = ["unit tests", "test file", "configuration file"]
lines = example["content"].splitlines()
count_config = 0
count_test = 0
# first test
for _, line in zip(range(scan_width), lines):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
nlines = example["content"].count("\n")
threshold = int(coeff * nlines)
for line in lines:
count_config += line.lower().count("config")
count_test += line.lower().count("test")
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def has_no_keywords(example):
"""Check if a python file has none of the keywords for: funcion, class, for loop, while loop."""
keywords = ["def ", "class ", "for ", "while "]
lines = example["content"].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def has_few_assignments(example, minimum=4):
"""Check if file uses symbol '=' less than `minimum` times."""
lines = example["content"].splitlines()
counter = 0
for line in lines:
counter += line.lower().count("=")
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def char_token_ratio(example):
"""Compute character/token ratio of the file with tokenizer."""
input_ids = tokenizer(example["content"], truncation=False)["input_ids"]
ratio = len(example["content"]) / len(input_ids)
return {"ratio": ratio}
def preprocess(example):
"""Chain all preprocessing steps into one function to not fill cache."""
results = {}
results.update(get_hash(example))
results.update(line_stats(example))
results.update(alpha_stats(example))
results.update(char_token_ratio(example))
results.update(is_autogenerated(example))
results.update(is_config_or_test(example))
results.update(has_no_keywords(example))
results.update(has_few_assignments(example))
return results
def filter(example, uniques, args):
"""Filter dataset with heuristics. Config, test and has_no_keywords files are removed with a given probability."""
if not check_uniques(example, uniques):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def compress_file(file_path):
"""Compress a file with g-zip."""
with open(file_path, "rb") as f_in:
with gzip.open(str(file_path) + ".gz", "wb", compresslevel=6) as f_out:
shutil.copyfileobj(f_in, f_out)
os.unlink(file_path)
# Settings
parser = HfArgumentParser(PreprocessingArguments)
args = parser.parse_args()
if args.num_workers is None:
args.num_workers = multiprocessing.cpu_count()
tokenizer = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
t_start = time.time()
ds = load_dataset(args.dataset_name, split="train")
print(f"Time to load dataset: {time.time()-t_start:.2f}")
# Run preprocessing
t_start = time.time()
ds = ds.map(preprocess, num_proc=args.num_workers)
print(f"Time to preprocess dataset: {time.time()-t_start:.2f}")
# Deduplicate hashes
uniques = set(ds.unique("hash"))
frac = len(uniques) / len(ds)
print(f"Fraction of duplicates: {1-frac:.2%}")
# Deduplicate data and apply heuristics
t_start = time.time()
ds_filter = ds.filter(filter, fn_kwargs={"uniques": uniques, "args": args})
print(f"Time to filter dataset: {time.time()-t_start:.2f}")
print(f"Size of filtered dataset: {len(ds_filter)}")
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
t_start = time.time()
ds_filter, duplicate_clusters = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(f"Time to deduplicate dataset: {time.time()-t_start:.2f}")
print(f"Size of deduplicate dataset: {len(ds_filter)}")
# Save data in batches of samples_per_file
output_dir = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / "duplicate_clusters.json", "w") as f:
json.dump(duplicate_clusters, f)
data_dir = output_dir / "data"
data_dir.mkdir(exist_ok=True)
t_start = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
file_path = str(data_dir / f"file-{file_number+1:012}.json")
end_index = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(f"Time to save dataset: {time.time()-t_start:.2f}")
| |
183779
|
@add_end_docstrings(build_pipeline_init_args(has_tokenizer=True))
class Text2TextGenerationPipeline(Pipeline):
"""
Pipeline for text to text generation using seq2seq models.
Example:
```python
>>> from transformers import pipeline
>>> generator = pipeline(model="mrm8488/t5-base-finetuned-question-generation-ap")
>>> generator(
... "answer: Manuel context: Manuel has created RuPERTa-base with the support of HF-Transformers and Google"
... )
[{'generated_text': 'question: Who created the RuPERTa-base?'}]
```
Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial). You can pass text
generation parameters to this pipeline to control stopping criteria, decoding strategy, and more. Learn more about
text generation parameters in [Text generation strategies](../generation_strategies) and [Text
generation](text_generation).
This Text2TextGenerationPipeline pipeline can currently be loaded from [`pipeline`] using the following task
identifier: `"text2text-generation"`.
The models that this pipeline can use are models that have been fine-tuned on a translation task. See the
up-to-date list of available models on
[huggingface.co/models](https://huggingface.co/models?filter=text2text-generation). For a list of available
parameters, see the [following
documentation](https://huggingface.co/docs/transformers/en/main_classes/text_generation#transformers.generation.GenerationMixin.generate)
Usage:
```python
text2text_generator = pipeline("text2text-generation")
text2text_generator("question: What is 42 ? context: 42 is the answer to life, the universe and everything")
```"""
# Used in the return key of the pipeline.
return_name = "generated"
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
if self.framework == "tf"
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
def _sanitize_parameters(
self,
return_tensors=None,
return_text=None,
return_type=None,
clean_up_tokenization_spaces=None,
truncation=None,
stop_sequence=None,
**generate_kwargs,
):
preprocess_params = {}
if truncation is not None:
preprocess_params["truncation"] = truncation
forward_params = generate_kwargs
postprocess_params = {}
if return_tensors is not None and return_type is None:
return_type = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
postprocess_params["return_type"] = return_type
if clean_up_tokenization_spaces is not None:
postprocess_params["clean_up_tokenization_spaces"] = clean_up_tokenization_spaces
if stop_sequence is not None:
stop_sequence_ids = self.tokenizer.encode(stop_sequence, add_special_tokens=False)
if len(stop_sequence_ids) > 1:
warnings.warn(
"Stopping on a multiple token sequence is not yet supported on transformers. The first token of"
" the stop sequence will be used as the stop sequence string in the interim."
)
generate_kwargs["eos_token_id"] = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def check_inputs(self, input_length: int, min_length: int, max_length: int):
"""
Checks whether there might be something wrong with given input with regard to the model.
"""
return True
def _parse_and_tokenize(self, *args, truncation):
prefix = self.prefix if self.prefix is not None else ""
if isinstance(args[0], list):
if self.tokenizer.pad_token_id is None:
raise ValueError("Please make sure that the tokenizer has a pad_token_id when using a batch input")
args = ([prefix + arg for arg in args[0]],)
padding = True
elif isinstance(args[0], str):
args = (prefix + args[0],)
padding = False
else:
raise ValueError(
f" `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`"
)
inputs = self.tokenizer(*args, padding=padding, truncation=truncation, return_tensors=self.framework)
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__(self, *args, **kwargs):
r"""
Generate the output text(s) using text(s) given as inputs.
Args:
args (`str` or `List[str]`):
Input text for the encoder.
return_tensors (`bool`, *optional*, defaults to `False`):
Whether or not to include the tensors of predictions (as token indices) in the outputs.
return_text (`bool`, *optional*, defaults to `True`):
Whether or not to include the decoded texts in the outputs.
clean_up_tokenization_spaces (`bool`, *optional*, defaults to `False`):
Whether or not to clean up the potential extra spaces in the text output.
truncation (`TruncationStrategy`, *optional*, defaults to `TruncationStrategy.DO_NOT_TRUNCATE`):
The truncation strategy for the tokenization within the pipeline. `TruncationStrategy.DO_NOT_TRUNCATE`
(default) will never truncate, but it is sometimes desirable to truncate the input to fit the model's
max_length instead of throwing an error down the line.
generate_kwargs:
Additional keyword arguments to pass along to the generate method of the model (see the generate method
corresponding to your framework [here](./text_generation)).
Return:
A list or a list of list of `dict`: Each result comes as a dictionary with the following keys:
- **generated_text** (`str`, present when `return_text=True`) -- The generated text.
- **generated_token_ids** (`torch.Tensor` or `tf.Tensor`, present when `return_tensors=True`) -- The token
ids of the generated text.
"""
result = super().__call__(*args, **kwargs)
if (
isinstance(args[0], list)
and all(isinstance(el, str) for el in args[0])
and all(len(res) == 1 for res in result)
):
return [res[0] for res in result]
return result
def preprocess(self, inputs, truncation=TruncationStrategy.DO_NOT_TRUNCATE, **kwargs):
inputs = self._parse_and_tokenize(inputs, truncation=truncation, **kwargs)
return inputs
def _forward(self, model_inputs, **generate_kwargs):
if self.framework == "pt":
in_b, input_length = model_inputs["input_ids"].shape
elif self.framework == "tf":
in_b, input_length = tf.shape(model_inputs["input_ids"]).numpy()
self.check_inputs(
input_length,
generate_kwargs.get("min_length", self.generation_config.min_length),
generate_kwargs.get("max_length", self.generation_config.max_length),
)
# User-defined `generation_config` passed to the pipeline call take precedence
if "generation_config" not in generate_kwargs:
generate_kwargs["generation_config"] = self.generation_config
output_ids = self.model.generate(**model_inputs, **generate_kwargs)
out_b = output_ids.shape[0]
if self.framework == "pt":
output_ids = output_ids.reshape(in_b, out_b // in_b, *output_ids.shape[1:])
elif self.framework == "tf":
output_ids = tf.reshape(output_ids, (in_b, out_b // in_b, *output_ids.shape[1:]))
return {"output_ids": output_ids}
def postprocess(self, model_outputs, return_type=ReturnType.TEXT, clean_up_tokenization_spaces=False):
records = []
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
record = {f"{self.return_name}_token_ids": output_ids}
elif return_type == ReturnType.TEXT:
record = {
f"{self.return_name}_text": self.tokenizer.decode(
output_ids,
skip_special_tokens=True,
clean_up_tokenization_spaces=clean_up_tokenization_spaces,
)
}
records.append(record)
return records
| |
183794
|
def check_task(task: str) -> Tuple[str, Dict, Any]:
"""
Checks an incoming task string, to validate it's correct and return the default Pipeline and Model classes, and
default models if they exist.
Args:
task (`str`):
The task defining which pipeline will be returned. Currently accepted tasks are:
- `"audio-classification"`
- `"automatic-speech-recognition"`
- `"conversational"`
- `"depth-estimation"`
- `"document-question-answering"`
- `"feature-extraction"`
- `"fill-mask"`
- `"image-classification"`
- `"image-feature-extraction"`
- `"image-segmentation"`
- `"image-to-text"`
- `"image-to-image"`
- `"object-detection"`
- `"question-answering"`
- `"summarization"`
- `"table-question-answering"`
- `"text2text-generation"`
- `"text-classification"` (alias `"sentiment-analysis"` available)
- `"text-generation"`
- `"text-to-audio"` (alias `"text-to-speech"` available)
- `"token-classification"` (alias `"ner"` available)
- `"translation"`
- `"translation_xx_to_yy"`
- `"video-classification"`
- `"visual-question-answering"` (alias `"vqa"` available)
- `"zero-shot-classification"`
- `"zero-shot-image-classification"`
- `"zero-shot-object-detection"`
Returns:
(normalized_task: `str`, task_defaults: `dict`, task_options: (`tuple`, None)) The normalized task name
(removed alias and options). The actual dictionary required to initialize the pipeline and some extra task
options for parametrized tasks like "translation_XX_to_YY"
"""
return PIPELINE_REGISTRY.check_task(task)
def clean_custom_task(task_info):
import transformers
if "impl" not in task_info:
raise RuntimeError("This model introduces a custom pipeline without specifying its implementation.")
pt_class_names = task_info.get("pt", ())
if isinstance(pt_class_names, str):
pt_class_names = [pt_class_names]
task_info["pt"] = tuple(getattr(transformers, c) for c in pt_class_names)
tf_class_names = task_info.get("tf", ())
if isinstance(tf_class_names, str):
tf_class_names = [tf_class_names]
task_info["tf"] = tuple(getattr(transformers, c) for c in tf_class_names)
return task_info, None
def pipeline(
task: str = None,
model: Optional[Union[str, "PreTrainedModel", "TFPreTrainedModel"]] = None,
config: Optional[Union[str, PretrainedConfig]] = None,
tokenizer: Optional[Union[str, PreTrainedTokenizer, "PreTrainedTokenizerFast"]] = None,
feature_extractor: Optional[Union[str, PreTrainedFeatureExtractor]] = None,
image_processor: Optional[Union[str, BaseImageProcessor]] = None,
framework: Optional[str] = None,
revision: Optional[str] = None,
use_fast: bool = True,
token: Optional[Union[str, bool]] = None,
device: Optional[Union[int, str, "torch.device"]] = None,
device_map=None,
torch_dtype=None,
trust_remote_code: Optional[bool] = None,
model_kwargs: Dict[str, Any] = None,
pipeline_class: Optional[Any] = None,
**kwargs,
) -> Pipeline:
"""
| |
183795
|
Utility factory method to build a [`Pipeline`].
Pipelines are made of:
- A [tokenizer](tokenizer) in charge of mapping raw textual input to token.
- A [model](model) to make predictions from the inputs.
- Some (optional) post processing for enhancing model's output.
Args:
task (`str`):
The task defining which pipeline will be returned. Currently accepted tasks are:
- `"audio-classification"`: will return a [`AudioClassificationPipeline`].
- `"automatic-speech-recognition"`: will return a [`AutomaticSpeechRecognitionPipeline`].
- `"depth-estimation"`: will return a [`DepthEstimationPipeline`].
- `"document-question-answering"`: will return a [`DocumentQuestionAnsweringPipeline`].
- `"feature-extraction"`: will return a [`FeatureExtractionPipeline`].
- `"fill-mask"`: will return a [`FillMaskPipeline`]:.
- `"image-classification"`: will return a [`ImageClassificationPipeline`].
- `"image-feature-extraction"`: will return an [`ImageFeatureExtractionPipeline`].
- `"image-segmentation"`: will return a [`ImageSegmentationPipeline`].
- `"image-to-image"`: will return a [`ImageToImagePipeline`].
- `"image-to-text"`: will return a [`ImageToTextPipeline`].
- `"mask-generation"`: will return a [`MaskGenerationPipeline`].
- `"object-detection"`: will return a [`ObjectDetectionPipeline`].
- `"question-answering"`: will return a [`QuestionAnsweringPipeline`].
- `"summarization"`: will return a [`SummarizationPipeline`].
- `"table-question-answering"`: will return a [`TableQuestionAnsweringPipeline`].
- `"text2text-generation"`: will return a [`Text2TextGenerationPipeline`].
- `"text-classification"` (alias `"sentiment-analysis"` available): will return a
[`TextClassificationPipeline`].
- `"text-generation"`: will return a [`TextGenerationPipeline`]:.
- `"text-to-audio"` (alias `"text-to-speech"` available): will return a [`TextToAudioPipeline`]:.
- `"token-classification"` (alias `"ner"` available): will return a [`TokenClassificationPipeline`].
- `"translation"`: will return a [`TranslationPipeline`].
- `"translation_xx_to_yy"`: will return a [`TranslationPipeline`].
- `"video-classification"`: will return a [`VideoClassificationPipeline`].
- `"visual-question-answering"`: will return a [`VisualQuestionAnsweringPipeline`].
- `"zero-shot-classification"`: will return a [`ZeroShotClassificationPipeline`].
- `"zero-shot-image-classification"`: will return a [`ZeroShotImageClassificationPipeline`].
- `"zero-shot-audio-classification"`: will return a [`ZeroShotAudioClassificationPipeline`].
- `"zero-shot-object-detection"`: will return a [`ZeroShotObjectDetectionPipeline`].
model (`str` or [`PreTrainedModel`] or [`TFPreTrainedModel`], *optional*):
The model that will be used by the pipeline to make predictions. This can be a model identifier or an
actual instance of a pretrained model inheriting from [`PreTrainedModel`] (for PyTorch) or
[`TFPreTrainedModel`] (for TensorFlow).
If not provided, the default for the `task` will be loaded.
config (`str` or [`PretrainedConfig`], *optional*):
The configuration that will be used by the pipeline to instantiate the model. This can be a model
identifier or an actual pretrained model configuration inheriting from [`PretrainedConfig`].
If not provided, the default configuration file for the requested model will be used. That means that if
`model` is given, its default configuration will be used. However, if `model` is not supplied, this
`task`'s default model's config is used instead.
tokenizer (`str` or [`PreTrainedTokenizer`], *optional*):
The tokenizer that will be used by the pipeline to encode data for the model. This can be a model
identifier or an actual pretrained tokenizer inheriting from [`PreTrainedTokenizer`].
If not provided, the default tokenizer for the given `model` will be loaded (if it is a string). If `model`
is not specified or not a string, then the default tokenizer for `config` is loaded (if it is a string).
However, if `config` is also not given or not a string, then the default tokenizer for the given `task`
will be loaded.
feature_extractor (`str` or [`PreTrainedFeatureExtractor`], *optional*):
The feature extractor that will be used by the pipeline to encode data for the model. This can be a model
identifier or an actual pretrained feature extractor inheriting from [`PreTrainedFeatureExtractor`].
Feature extractors are used for non-NLP models, such as Speech or Vision models as well as multi-modal
models. Multi-modal models will also require a tokenizer to be passed.
If not provided, the default feature extractor for the given `model` will be loaded (if it is a string). If
`model` is not specified or not a string, then the default feature extractor for `config` is loaded (if it
is a string). However, if `config` is also not given or not a string, then the default feature extractor
for the given `task` will be loaded.
framework (`str`, *optional*):
The framework to use, either `"pt"` for PyTorch or `"tf"` for TensorFlow. The specified framework must be
installed.
If no framework is specified, will default to the one currently installed. If no framework is specified and
both frameworks are installed, will default to the framework of the `model`, or to PyTorch if no model is
provided.
revision (`str`, *optional*, defaults to `"main"`):
When passing a task name or a string model identifier: The specific model version to use. It can be a
branch name, a tag name, or a commit id, since we use a git-based system for storing models and other
artifacts on huggingface.co, so `revision` can be any identifier allowed by git.
use_fast (`bool`, *optional*, defaults to `True`):
Whether or not to use a Fast tokenizer if possible (a [`PreTrainedTokenizerFast`]).
use_auth_token (`str` or *bool*, *optional*):
The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated
when running `huggingface-cli login` (stored in `~/.huggingface`).
device (`int` or `str` or `torch.device`):
Defines the device (*e.g.*, `"cpu"`, `"cuda:1"`, `"mps"`, or a GPU ordinal rank like `1`) on which this
pipeline will be allocated.
device_map (`str` or `Dict[str, Union[int, str, torch.device]`, *optional*):
Sent directly as `model_kwargs` (just a simpler shortcut). When `accelerate` library is present, set
`device_map="auto"` to compute the most optimized `device_map` automatically (see
[here](https://huggingface.co/docs/accelerate/main/en/package_reference/big_modeling#accelerate.cpu_offload)
for more information).
<Tip warning={true}>
Do not use `device_map` AND `device` at the same time as they will conflict
</Tip>
torch_dtype (`str` or `torch.dtype`, *optional*):
Sent directly as `model_kwargs` (just a simpler shortcut) to use the available precision for this model
(`torch.float16`, `torch.bfloat16`, ... or `"auto"`).
trust_remote_code (`bool`, *optional*, defaults to `False`):
Whether or not to allow for custom code defined on the Hub in their own modeling, configuration,
tokenization or even pipeline files. This option should only be set to `True` for repositories you trust
and in which you have read the code, as it will execute code present on the Hub on your local machine.
model_kwargs (`Dict[str, Any]`, *optional*):
Additional dictionary of keyword arguments passed along to the model's `from_pretrained(...,
**model_kwargs)` function.
kwargs (`Dict[str, Any]`, *optional*):
Additional keyword arguments passed along to the specific pipeline init (see the documentation for the
corresponding pipeline class for possible values).
Returns:
[`Pipeline`]: A suitable pipeline for the task.
Examples:
```python
>>> from transformers import pipeline, AutoModelForTokenClassification, AutoTokenizer
>>> # Sentiment analysis pipeline
>>> analyzer = pipeline("sentiment-analysis")
>>> # Question answering pipeline, specifying the checkpoint identifier
>>> oracle = pipeline(
... "question-answering", model="distilbert/distilbert-base-cased-distilled-squad", tokenizer="google-bert/bert-base-cased"
... )
| |
183816
|
@add_end_docstrings(build_pipeline_init_args(has_tokenizer=True))
class TextGenerationPipeline(Pipeline):
"""
Language generation pipeline using any `ModelWithLMHead`. This pipeline predicts the words that will follow a
specified text prompt. When the underlying model is a conversational model, it can also accept one or more chats,
in which case the pipeline will operate in chat mode and will continue the chat(s) by adding its response(s).
Each chat takes the form of a list of dicts, where each dict contains "role" and "content" keys.
Examples:
```python
>>> from transformers import pipeline
>>> generator = pipeline(model="openai-community/gpt2")
>>> generator("I can't believe you did such a ", do_sample=False)
[{'generated_text': "I can't believe you did such a icky thing to me. I'm so sorry. I'm so sorry. I'm so sorry. I'm so sorry. I'm so sorry. I'm so sorry. I'm so sorry. I"}]
>>> # These parameters will return suggestions, and only the newly created text making it easier for prompting suggestions.
>>> outputs = generator("My tart needs some", num_return_sequences=4, return_full_text=False)
```
```python
>>> from transformers import pipeline
>>> generator = pipeline(model="HuggingFaceH4/zephyr-7b-beta")
>>> # Zephyr-beta is a conversational model, so let's pass it a chat instead of a single string
>>> generator([{"role": "user", "content": "What is the capital of France? Answer in one word."}], do_sample=False, max_new_tokens=2)
[{'generated_text': [{'role': 'user', 'content': 'What is the capital of France? Answer in one word.'}, {'role': 'assistant', 'content': 'Paris'}]}]
```
Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial). You can pass text
generation parameters to this pipeline to control stopping criteria, decoding strategy, and more. Learn more about
text generation parameters in [Text generation strategies](../generation_strategies) and [Text
generation](text_generation).
This language generation pipeline can currently be loaded from [`pipeline`] using the following task identifier:
`"text-generation"`.
The models that this pipeline can use are models that have been trained with an autoregressive language modeling
objective. See the list of available [text completion models](https://huggingface.co/models?filter=text-generation)
and the list of [conversational models](https://huggingface.co/models?other=conversational)
on [huggingface.co/models].
"""
# Prefix text to help Transformer-XL and XLNet with short prompts as proposed by Aman Rusia
# in https://github.com/rusiaaman/XLNet-gen#methodology
# and https://medium.com/@amanrusia/xlnet-speaks-comparison-to-gpt-2-ea1a4e9ba39e
XL_PREFIX = """
In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The
voice of Nicholas's young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western
Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision
and denounces one of the men as a horse thief. Although his father initially slaps him for making such an
accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of
the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,
begging for his blessing. <eod> </s> <eos>
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES if self.framework == "tf" else MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
)
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
prefix = None
if self.prefix is not None:
prefix = self.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
prefix = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
preprocess_params, forward_params, _ = self._sanitize_parameters(prefix=prefix, **self._forward_params)
self._preprocess_params = {**self._preprocess_params, **preprocess_params}
self._forward_params = {**self._forward_params, **forward_params}
def _sanitize_parameters(
self,
return_full_text=None,
return_tensors=None,
return_text=None,
return_type=None,
clean_up_tokenization_spaces=None,
prefix=None,
handle_long_generation=None,
stop_sequence=None,
truncation=None,
max_length=None,
continue_final_message=None,
**generate_kwargs,
):
preprocess_params = {}
add_special_tokens = False
if "add_special_tokens" in generate_kwargs:
add_special_tokens = preprocess_params["add_special_tokens"] = generate_kwargs.pop("add_special_tokens")
if "padding" in generate_kwargs:
preprocess_params["padding"] = generate_kwargs.pop("padding")
if truncation is not None:
preprocess_params["truncation"] = truncation
if max_length is not None:
preprocess_params["max_length"] = max_length
generate_kwargs["max_length"] = max_length
if prefix is not None:
preprocess_params["prefix"] = prefix
if prefix:
prefix_inputs = self.tokenizer(
prefix, padding=False, add_special_tokens=add_special_tokens, return_tensors=self.framework
)
generate_kwargs["prefix_length"] = prefix_inputs["input_ids"].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
f"{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected"
" [None, 'hole']"
)
preprocess_params["handle_long_generation"] = handle_long_generation
if continue_final_message is not None:
preprocess_params["continue_final_message"] = continue_final_message
preprocess_params.update(generate_kwargs)
forward_params = generate_kwargs
postprocess_params = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError("`return_text` is mutually exclusive with `return_full_text`")
if return_tensors is not None:
raise ValueError("`return_full_text` is mutually exclusive with `return_tensors`")
return_type = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError("`return_text` is mutually exclusive with `return_tensors`")
return_type = ReturnType.TENSORS
if return_type is not None:
postprocess_params["return_type"] = return_type
if clean_up_tokenization_spaces is not None:
postprocess_params["clean_up_tokenization_spaces"] = clean_up_tokenization_spaces
if continue_final_message is not None:
postprocess_params["continue_final_message"] = continue_final_message
if stop_sequence is not None:
stop_sequence_ids = self.tokenizer.encode(stop_sequence, add_special_tokens=False)
if len(stop_sequence_ids) > 1:
warnings.warn(
"Stopping on a multiple token sequence is not yet supported on transformers. The first token of"
" the stop sequence will be used as the stop sequence string in the interim."
)
generate_kwargs["eos_token_id"] = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
# overriding _parse_and_tokenize to allow for unusual language-modeling tokenizer arguments
def _parse_and_tokenize(self, *args, **kwargs):
"""
Parse arguments and tokenize
"""
# Parse arguments
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({"add_space_before_punct_symbol": True})
return super()._parse_and_tokenize(*args, **kwargs)
| |
183817
|
def __call__(self, text_inputs, **kwargs):
"""
Complete the prompt(s) given as inputs.
Args:
text_inputs (`str`, `List[str]`, List[Dict[str, str]], or `List[List[Dict[str, str]]]`):
One or several prompts (or one list of prompts) to complete. If strings or a list of string are
passed, this pipeline will continue each prompt. Alternatively, a "chat", in the form of a list
of dicts with "role" and "content" keys, can be passed, or a list of such chats. When chats are passed,
the model's chat template will be used to format them before passing them to the model.
return_tensors (`bool`, *optional*, defaults to `False`):
Returns the tensors of predictions (as token indices) in the outputs. If set to
`True`, the decoded text is not returned.
return_text (`bool`, *optional*):
Returns the decoded texts in the outputs.
return_full_text (`bool`, *optional*, defaults to `True`):
If set to `False` only added text is returned, otherwise the full text is returned. Cannot be
specified at the same time as `return_text`.
clean_up_tokenization_spaces (`bool`, *optional*, defaults to `True`):
Whether or not to clean up the potential extra spaces in the text output.
continue_final_message( `bool`, *optional*): This indicates that you want the model to continue the
last message in the input chat rather than starting a new one, allowing you to "prefill" its response.
By default this is `True` when the final message in the input chat has the `assistant` role and
`False` otherwise, but you can manually override that behaviour by setting this flag.
prefix (`str`, *optional*):
Prefix added to prompt.
handle_long_generation (`str`, *optional*):
By default, this pipelines does not handle long generation (ones that exceed in one form or the other
the model maximum length). There is no perfect way to adress this (more info
:https://github.com/huggingface/transformers/issues/14033#issuecomment-948385227). This provides common
strategies to work around that problem depending on your use case.
- `None` : default strategy where nothing in particular happens
- `"hole"`: Truncates left of input, and leaves a gap wide enough to let generation happen (might
truncate a lot of the prompt and not suitable when generation exceed the model capacity)
generate_kwargs (`dict`, *optional*):
Additional keyword arguments to pass along to the generate method of the model (see the generate method
corresponding to your framework [here](./text_generation)).
Return:
A list or a list of lists of `dict`: Returns one of the following dictionaries (cannot return a combination
of both `generated_text` and `generated_token_ids`):
- **generated_text** (`str`, present when `return_text=True`) -- The generated text.
- **generated_token_ids** (`torch.Tensor` or `tf.Tensor`, present when `return_tensors=True`) -- The token
ids of the generated text.
"""
if isinstance(
text_inputs, (list, tuple, KeyDataset) if is_torch_available() else (list, tuple)
) and isinstance(text_inputs[0], (list, tuple, dict)):
# We have one or more prompts in list-of-dicts format, so this is chat mode
if isinstance(text_inputs[0], dict):
return super().__call__(Chat(text_inputs), **kwargs)
else:
chats = [Chat(chat) for chat in text_inputs] # 🐈 🐈 🐈
return super().__call__(chats, **kwargs)
else:
return super().__call__(text_inputs, **kwargs)
def preprocess(
self,
prompt_text,
prefix="",
handle_long_generation=None,
add_special_tokens=None,
truncation=None,
padding=None,
max_length=None,
continue_final_message=None,
**generate_kwargs,
):
# Only set non-None tokenizer kwargs, so as to rely on the tokenizer's defaults
tokenizer_kwargs = {
"add_special_tokens": add_special_tokens,
"truncation": truncation,
"padding": padding,
"max_length": max_length,
}
tokenizer_kwargs = {key: value for key, value in tokenizer_kwargs.items() if value is not None}
if isinstance(prompt_text, Chat):
tokenizer_kwargs.pop("add_special_tokens", None) # ignore add_special_tokens on chats
# If the user passes a chat that ends in an assistant message, we treat it as a prefill by default
# because very few models support multiple separate, consecutive assistant messages
if continue_final_message is None:
continue_final_message = prompt_text.messages[-1]["role"] == "assistant"
inputs = self.tokenizer.apply_chat_template(
prompt_text.messages,
add_generation_prompt=not continue_final_message,
continue_final_message=continue_final_message,
return_dict=True,
return_tensors=self.framework,
**tokenizer_kwargs,
)
else:
inputs = self.tokenizer(prefix + prompt_text, return_tensors=self.framework, **tokenizer_kwargs)
inputs["prompt_text"] = prompt_text
if handle_long_generation == "hole":
cur_len = inputs["input_ids"].shape[-1]
if "max_new_tokens" in generate_kwargs:
new_tokens = generate_kwargs["max_new_tokens"]
else:
new_tokens = generate_kwargs.get("max_length", self.generation_config.max_length) - cur_len
if new_tokens < 0:
raise ValueError("We cannot infer how many new tokens are expected")
if cur_len + new_tokens > self.tokenizer.model_max_length:
keep_length = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
"We cannot use `hole` to handle this generation the number of desired tokens exceeds the"
" models max length"
)
inputs["input_ids"] = inputs["input_ids"][:, -keep_length:]
if "attention_mask" in inputs:
inputs["attention_mask"] = inputs["attention_mask"][:, -keep_length:]
return inputs
def _forward(self, model_inputs, **generate_kwargs):
input_ids = model_inputs["input_ids"]
attention_mask = model_inputs.get("attention_mask", None)
# Allow empty prompts
if input_ids.shape[1] == 0:
input_ids = None
attention_mask = None
in_b = 1
else:
in_b = input_ids.shape[0]
prompt_text = model_inputs.pop("prompt_text")
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
prefix_length = generate_kwargs.pop("prefix_length", 0)
if prefix_length > 0:
has_max_new_tokens = "max_new_tokens" in generate_kwargs or (
"generation_config" in generate_kwargs
and generate_kwargs["generation_config"].max_new_tokens is not None
)
if not has_max_new_tokens:
generate_kwargs["max_length"] = generate_kwargs.get("max_length") or self.generation_config.max_length
generate_kwargs["max_length"] += prefix_length
has_min_new_tokens = "min_new_tokens" in generate_kwargs or (
"generation_config" in generate_kwargs
and generate_kwargs["generation_config"].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# User-defined `generation_config` passed to the pipeline call take precedence
if "generation_config" not in generate_kwargs:
generate_kwargs["generation_config"] = self.generation_config
generated_sequence = self.model.generate(input_ids=input_ids, attention_mask=attention_mask, **generate_kwargs)
out_b = generated_sequence.shape[0]
if self.framework == "pt":
generated_sequence = generated_sequence.reshape(in_b, out_b // in_b, *generated_sequence.shape[1:])
elif self.framework == "tf":
generated_sequence = tf.reshape(generated_sequence, (in_b, out_b // in_b, *generated_sequence.shape[1:]))
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def
| |
183840
|
def save_pretrained(
self,
save_directory: Union[str, os.PathLike],
safe_serialization: bool = True,
**kwargs,
):
"""
Save the pipeline's model and tokenizer.
Args:
save_directory (`str` or `os.PathLike`):
A path to the directory where to saved. It will be created if it doesn't exist.
safe_serialization (`str`):
Whether to save the model using `safetensors` or the traditional way for PyTorch or Tensorflow.
kwargs (`Dict[str, Any]`, *optional*):
Additional key word arguments passed along to the [`~utils.PushToHubMixin.push_to_hub`] method.
"""
use_auth_token = kwargs.pop("use_auth_token", None)
if use_auth_token is not None:
warnings.warn(
"The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.",
FutureWarning,
)
if kwargs.get("token", None) is not None:
raise ValueError(
"`token` and `use_auth_token` are both specified. Please set only the argument `token`."
)
kwargs["token"] = use_auth_token
if os.path.isfile(save_directory):
logger.error(f"Provided path ({save_directory}) should be a directory, not a file")
return
os.makedirs(save_directory, exist_ok=True)
if hasattr(self, "_registered_impl"):
# Add info to the config
pipeline_info = self._registered_impl.copy()
custom_pipelines = {}
for task, info in pipeline_info.items():
if info["impl"] != self.__class__:
continue
info = info.copy()
module_name = info["impl"].__module__
last_module = module_name.split(".")[-1]
# Change classes into their names/full names
info["impl"] = f"{last_module}.{info['impl'].__name__}"
info["pt"] = tuple(c.__name__ for c in info["pt"])
info["tf"] = tuple(c.__name__ for c in info["tf"])
custom_pipelines[task] = info
self.model.config.custom_pipelines = custom_pipelines
# Save the pipeline custom code
custom_object_save(self, save_directory)
kwargs["safe_serialization"] = safe_serialization
self.model.save_pretrained(save_directory, **kwargs)
if self.tokenizer is not None:
self.tokenizer.save_pretrained(save_directory, **kwargs)
if self.feature_extractor is not None:
self.feature_extractor.save_pretrained(save_directory, **kwargs)
if self.image_processor is not None:
self.image_processor.save_pretrained(save_directory, **kwargs)
if self.modelcard is not None:
self.modelcard.save_pretrained(save_directory)
def transform(self, X):
"""
Scikit / Keras interface to transformers' pipelines. This method will forward to __call__().
"""
return self(X)
def predict(self, X):
"""
Scikit / Keras interface to transformers' pipelines. This method will forward to __call__().
"""
return self(X)
@property
def torch_dtype(self) -> Optional["torch.dtype"]:
"""
Torch dtype of the model (if it's Pytorch model), `None` otherwise.
"""
return getattr(self.model, "dtype", None)
@contextmanager
def device_placement(self):
"""
Context Manager allowing tensor allocation on the user-specified device in framework agnostic way.
Returns:
Context manager
Examples:
```python
# Explicitly ask for tensor allocation on CUDA device :0
pipe = pipeline(..., device=0)
with pipe.device_placement():
# Every framework specific tensor allocation will be done on the request device
output = pipe(...)
```"""
if self.framework == "tf":
with tf.device("/CPU:0" if self.device == -1 else f"/device:GPU:{self.device}"):
yield
else:
if self.device.type == "cuda":
with torch.cuda.device(self.device):
yield
elif self.device.type == "mlu":
with torch.mlu.device(self.device):
yield
elif self.device.type == "musa":
with torch.musa.device(self.device):
yield
else:
yield
def ensure_tensor_on_device(self, **inputs):
"""
Ensure PyTorch tensors are on the specified device.
Args:
inputs (keyword arguments that should be `torch.Tensor`, the rest is ignored):
The tensors to place on `self.device`.
Recursive on lists **only**.
Return:
`Dict[str, torch.Tensor]`: The same as `inputs` but on the proper device.
"""
return self._ensure_tensor_on_device(inputs, self.device)
def _ensure_tensor_on_device(self, inputs, device):
if isinstance(inputs, ModelOutput):
return ModelOutput(
{name: self._ensure_tensor_on_device(tensor, device) for name, tensor in inputs.items()}
)
elif isinstance(inputs, dict):
return {name: self._ensure_tensor_on_device(tensor, device) for name, tensor in inputs.items()}
elif isinstance(inputs, UserDict):
return UserDict({name: self._ensure_tensor_on_device(tensor, device) for name, tensor in inputs.items()})
elif isinstance(inputs, list):
return [self._ensure_tensor_on_device(item, device) for item in inputs]
elif isinstance(inputs, tuple):
return tuple([self._ensure_tensor_on_device(item, device) for item in inputs])
elif isinstance(inputs, torch.Tensor):
return inputs.to(device)
else:
return inputs
def check_model_type(self, supported_models: Union[List[str], dict]):
"""
Check if the model class is in supported by the pipeline.
Args:
supported_models (`List[str]` or `dict`):
The list of models supported by the pipeline, or a dictionary with model class values.
"""
if not isinstance(supported_models, list): # Create from a model mapping
supported_models_names = []
for _, model_name in supported_models.items():
# Mapping can now contain tuples of models for the same configuration.
if isinstance(model_name, tuple):
supported_models_names.extend(list(model_name))
else:
supported_models_names.append(model_name)
if hasattr(supported_models, "_model_mapping"):
for _, model in supported_models._model_mapping._extra_content.items():
if isinstance(model_name, tuple):
supported_models_names.extend([m.__name__ for m in model])
else:
supported_models_names.append(model.__name__)
supported_models = supported_models_names
if self.model.__class__.__name__ not in supported_models:
logger.error(
f"The model '{self.model.__class__.__name__}' is not supported for {self.task}. Supported models are"
f" {supported_models}."
)
@abstractmethod
def _sanitize_parameters(self, **pipeline_parameters):
"""
_sanitize_parameters will be called with any excessive named arguments from either `__init__` or `__call__`
methods. It should return 3 dictionaries of the resolved parameters used by the various `preprocess`,
`forward` and `postprocess` methods. Do not fill dictionaries if the caller didn't specify a kwargs. This
lets you keep defaults in function signatures, which is more "natural".
It is not meant to be called directly, it will be automatically called and the final parameters resolved by
`__init__` and `__call__`
"""
raise NotImplementedError("_sanitize_parameters not implemented")
@abstractmethod
def preprocess(self, input_: Any, **preprocess_parameters: Dict) -> Dict[str, GenericTensor]:
"""
Preprocess will take the `input_` of a specific pipeline and return a dictionary of everything necessary for
`_forward` to run properly. It should contain at least one tensor, but might have arbitrary other items.
"""
raise NotImplementedError("preprocess not implemented")
@abstractmethod
def _forward(self, input_tensors: Dict[str, GenericTensor], **forward_parameters: Dict) -> ModelOutput:
"""
_forward will receive the prepared dictionary from `preprocess` and run it on the model. This method might
involve the GPU or the CPU and should be agnostic to it. Isolating this function is the reason for `preprocess`
and `postprocess` to exist, so that the hot path, this method generally can run as fast as possible.
It is not meant to be called directly, `forward` is preferred. It is basically the same but contains additional
code surrounding `_forward` making sure tensors and models are on the same device, disabling the training part
of the code (leading to faster inference).
"""
raise NotImplementedError("_forward not implemented")
| |
183905
|
MMY_INPUTS = [[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]]
DUMMY_MASK = [[1, 1, 1, 1, 1], [1, 1, 1, 0, 0], [0, 0, 0, 1, 1]]
def check_min_version(min_version):
if version.parse(__version__) < version.parse(min_version):
if "dev" in min_version:
error_message = (
"This example requires a source install from HuggingFace Transformers (see "
"`https://huggingface.co/docs/transformers/installation#install-from-source`),"
)
else:
error_message = f"This example requires a minimum version of {min_version},"
error_message += f" but the version found is {__version__}.\n"
raise ImportError(
error_message
+ "Check out https://github.com/huggingface/transformers/tree/main/examples#important-note for the examples corresponding to other "
"versions of HuggingFace Transformers."
)
@lru_cache()
def get_available_devices() -> FrozenSet[str]:
"""
Returns a frozenset of devices available for the current PyTorch installation.
"""
devices = {"cpu"} # `cpu` is always supported as a device in PyTorch
if is_torch_cuda_available():
devices.add("cuda")
if is_torch_mps_available():
devices.add("mps")
if is_torch_xpu_available():
devices.add("xpu")
if is_torch_npu_available():
devices.add("npu")
if is_torch_mlu_available():
devices.add("mlu")
if is_torch_musa_available():
devices.add("musa")
return frozenset(devices)
| |
183974
|
@dataclass
class GPTQConfig(QuantizationConfigMixin):
"""
This is a wrapper class about all possible attributes and features that you can play with a model that has been
loaded using `optimum` api for gptq quantization relying on auto_gptq backend.
Args:
bits (`int`):
The number of bits to quantize to, supported numbers are (2, 3, 4, 8).
tokenizer (`str` or `PreTrainedTokenizerBase`, *optional*):
The tokenizer used to process the dataset. You can pass either:
- A custom tokenizer object.
- A string, the *model id* of a predefined tokenizer hosted inside a model repo on huggingface.co.
- A path to a *directory* containing vocabulary files required by the tokenizer, for instance saved
using the [`~PreTrainedTokenizer.save_pretrained`] method, e.g., `./my_model_directory/`.
dataset (`Union[List[str]]`, *optional*):
The dataset used for quantization. You can provide your own dataset in a list of string or just use the
original datasets used in GPTQ paper ['wikitext2','c4','c4-new']
group_size (`int`, *optional*, defaults to 128):
The group size to use for quantization. Recommended value is 128 and -1 uses per-column quantization.
damp_percent (`float`, *optional*, defaults to 0.1):
The percent of the average Hessian diagonal to use for dampening. Recommended value is 0.1.
desc_act (`bool`, *optional*, defaults to `False`):
Whether to quantize columns in order of decreasing activation size. Setting it to False can significantly
speed up inference but the perplexity may become slightly worse. Also known as act-order.
sym (`bool`, *optional*, defaults to `True`):
Whether to use symetric quantization.
true_sequential (`bool`, *optional*, defaults to `True`):
Whether to perform sequential quantization even within a single Transformer block. Instead of quantizing
the entire block at once, we perform layer-wise quantization. As a result, each layer undergoes
quantization using inputs that have passed through the previously quantized layers.
use_cuda_fp16 (`bool`, *optional*, defaults to `False`):
Whether or not to use optimized cuda kernel for fp16 model. Need to have model in fp16.
model_seqlen (`int`, *optional*):
The maximum sequence length that the model can take.
block_name_to_quantize (`str`, *optional*):
The transformers block name to quantize. If None, we will infer the block name using common patterns (e.g. model.layers)
module_name_preceding_first_block (`List[str]`, *optional*):
The layers that are preceding the first Transformer block.
batch_size (`int`, *optional*, defaults to 1):
The batch size used when processing the dataset
pad_token_id (`int`, *optional*):
The pad token id. Needed to prepare the dataset when `batch_size` > 1.
use_exllama (`bool`, *optional*):
Whether to use exllama backend. Defaults to `True` if unset. Only works with `bits` = 4.
max_input_length (`int`, *optional*):
The maximum input length. This is needed to initialize a buffer that depends on the maximum expected input
length. It is specific to the exllama backend with act-order.
exllama_config (`Dict[str, Any]`, *optional*):
The exllama config. You can specify the version of the exllama kernel through the `version` key. Defaults
to `{"version": 1}` if unset.
cache_block_outputs (`bool`, *optional*, defaults to `True`):
Whether to cache block outputs to reuse as inputs for the succeeding block.
modules_in_block_to_quantize (`List[List[str]]`, *optional*):
List of list of module names to quantize in the specified block. This argument is useful to exclude certain linear modules from being quantized.
The block to quantize can be specified by setting `block_name_to_quantize`. We will quantize each list sequentially. If not set, we will quantize all linear layers.
Example: `modules_in_block_to_quantize =[["self_attn.k_proj", "self_attn.v_proj", "self_attn.q_proj"], ["self_attn.o_proj"]]`.
In this example, we will first quantize the q,k,v layers simultaneously since they are independent.
Then, we will quantize `self_attn.o_proj` layer with the q,k,v layers quantized. This way, we will get
better results since it reflects the real input `self_attn.o_proj` will get when the model is quantized.
"""
def __init__(
self,
bits: int,
tokenizer: Any = None,
dataset: Optional[Union[List[str], str]] = None,
group_size: int = 128,
damp_percent: float = 0.1,
desc_act: bool = False,
sym: bool = True,
true_sequential: bool = True,
use_cuda_fp16: bool = False,
model_seqlen: Optional[int] = None,
block_name_to_quantize: Optional[str] = None,
module_name_preceding_first_block: Optional[List[str]] = None,
batch_size: int = 1,
pad_token_id: Optional[int] = None,
use_exllama: Optional[bool] = None,
max_input_length: Optional[int] = None,
exllama_config: Optional[Dict[str, Any]] = None,
cache_block_outputs: bool = True,
modules_in_block_to_quantize: Optional[List[List[str]]] = None,
**kwargs,
):
self.quant_method = QuantizationMethod.GPTQ
self.bits = bits
self.tokenizer = tokenizer
self.dataset = dataset
self.group_size = group_size
self.damp_percent = damp_percent
self.desc_act = desc_act
self.sym = sym
self.true_sequential = true_sequential
self.use_cuda_fp16 = use_cuda_fp16
self.model_seqlen = model_seqlen
self.block_name_to_quantize = block_name_to_quantize
self.module_name_preceding_first_block = module_name_preceding_first_block
self.batch_size = batch_size
self.pad_token_id = pad_token_id
self.use_exllama = use_exllama
self.max_input_length = max_input_length
self.exllama_config = exllama_config
self.disable_exllama = kwargs.pop("disable_exllama", None)
self.cache_block_outputs = cache_block_outputs
self.modules_in_block_to_quantize = modules_in_block_to_quantize
self.post_init()
def get_loading_attributes(self):
attibutes_dict = copy.deepcopy(self.__dict__)
loading_attibutes = ["disable_exllama", "use_exllama", "exllama_config", "use_cuda_fp16", "max_input_length"]
loading_attibutes_dict = {i: j for i, j in attibutes_dict.items() if i in loading_attibutes}
return loading_attibutes_dict
| |
184701
|
@add_start_docstrings(
"The MARIAN Model with a language modeling head. Can be used for translation.", MARIAN_START_DOCSTRING
)
class FlaxMarianMTModel(FlaxMarianPreTrainedModel):
module_class = FlaxMarianMTModule
dtype: jnp.dtype = jnp.float32
@add_start_docstrings(MARIAN_DECODE_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=FlaxCausalLMOutputWithCrossAttentions, config_class=MarianConfig)
def decode(
self,
decoder_input_ids,
encoder_outputs,
encoder_attention_mask: Optional[jnp.ndarray] = None,
decoder_attention_mask: Optional[jnp.ndarray] = None,
decoder_position_ids: Optional[jnp.ndarray] = None,
past_key_values: dict = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
train: bool = False,
params: dict = None,
dropout_rng: PRNGKey = None,
):
r"""
Returns:
Example:
```python
>>> import jax.numpy as jnp
>>> from transformers import AutoTokenizer, FlaxMarianMTModel
>>> model = FlaxMarianMTModel.from_pretrained("Helsinki-NLP/opus-mt-en-de")
>>> tokenizer = AutoTokenizer.from_pretrained("Helsinki-NLP/opus-mt-en-de")
>>> text = "My friends are cool but they eat too many carbs."
>>> inputs = tokenizer(text, max_length=64, return_tensors="jax")
>>> encoder_outputs = model.encode(**inputs)
>>> decoder_start_token_id = model.config.decoder_start_token_id
>>> decoder_input_ids = jnp.ones((inputs.input_ids.shape[0], 1), dtype="i4") * decoder_start_token_id
>>> outputs = model.decode(decoder_input_ids, encoder_outputs)
>>> logits = outputs.logits
```"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.return_dict
encoder_hidden_states = encoder_outputs[0]
if encoder_attention_mask is None:
batch_size, sequence_length = encoder_hidden_states.shape[:2]
encoder_attention_mask = jnp.ones((batch_size, sequence_length))
batch_size, sequence_length = decoder_input_ids.shape
if decoder_attention_mask is None:
decoder_attention_mask = jnp.ones((batch_size, sequence_length))
if decoder_position_ids is None:
if past_key_values is not None:
raise ValueError("Make sure to provide `decoder_position_ids` when passing `past_key_values`.")
decoder_position_ids = jnp.broadcast_to(
jnp.arange(sequence_length)[None, :], (batch_size, sequence_length)
)
# Handle any PRNG if needed
rngs = {}
if dropout_rng is not None:
rngs["dropout"] = dropout_rng
inputs = {"params": params or self.params}
# if past_key_values are passed then cache is already initialized a private flag init_cache has to be
# passed down to ensure cache is used. It has to be made sure that cache is marked as mutable so that
# it can be changed by FlaxMarianAttention module
if past_key_values:
inputs["cache"] = past_key_values
mutable = ["cache"]
else:
mutable = False
def _decoder_forward(module, decoder_input_ids, decoder_attention_mask, decoder_position_ids, **kwargs):
decoder_module = module._get_decoder_module()
outputs = decoder_module(
decoder_input_ids,
decoder_attention_mask,
decoder_position_ids,
**kwargs,
)
hidden_states = outputs[0]
if self.config.tie_word_embeddings:
shared_embedding = module.model.variables["params"]["shared"]["embedding"]
lm_logits = module.lm_head.apply({"params": {"kernel": shared_embedding.T}}, hidden_states)
else:
lm_logits = module.lm_head(hidden_states)
lm_logits += module.final_logits_bias.astype(self.dtype)
return lm_logits, outputs
outputs = self.module.apply(
inputs,
decoder_input_ids=jnp.array(decoder_input_ids, dtype="i4"),
decoder_attention_mask=jnp.array(decoder_attention_mask, dtype="i4"),
decoder_position_ids=jnp.array(decoder_position_ids, dtype="i4"),
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=jnp.array(encoder_attention_mask, dtype="i4"),
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
deterministic=not train,
rngs=rngs,
mutable=mutable,
method=_decoder_forward,
)
if past_key_values is None:
lm_logits, decoder_outputs = outputs
else:
(lm_logits, decoder_outputs), past = outputs
if return_dict:
outputs = FlaxCausalLMOutputWithCrossAttentions(
logits=lm_logits,
hidden_states=decoder_outputs.hidden_states,
attentions=decoder_outputs.attentions,
cross_attentions=decoder_outputs.cross_attentions,
)
else:
outputs = (lm_logits,) + decoder_outputs[1:]
# add updated cache to model output
if past_key_values is not None and return_dict:
outputs["past_key_values"] = unfreeze(past["cache"])
return outputs
elif past_key_values is not None and not return_dict:
outputs = outputs[:1] + (unfreeze(past["cache"]),) + outputs[1:]
return outputs
def _adapt_logits_for_beam_search(self, logits):
"""This function enforces the padding token never to be generated."""
logits = logits.at[:, :, self.config.pad_token_id].set(float("-inf"))
return logits
def prepare_inputs_for_generation(
self,
decoder_input_ids,
max_length,
attention_mask: Optional[jax.Array] = None,
decoder_attention_mask: Optional[jax.Array] = None,
encoder_outputs=None,
**kwargs,
):
# initializing the cache
batch_size, seq_length = decoder_input_ids.shape
past_key_values = self.init_cache(batch_size, max_length, encoder_outputs)
# Note that usually one would have to put 0's in the attention_mask for x > input_ids.shape[-1] and x < cache_length.
# But since the decoder uses a causal mask, those positions are masked anyways.
# Thus we can create a single static attention_mask here, which is more efficient for compilation
extended_attention_mask = jnp.ones((batch_size, max_length), dtype="i4")
if decoder_attention_mask is not None:
position_ids = decoder_attention_mask.cumsum(axis=-1) - 1
extended_attention_mask = lax.dynamic_update_slice(extended_attention_mask, decoder_attention_mask, (0, 0))
else:
position_ids = jnp.broadcast_to(jnp.arange(seq_length, dtype="i4")[None, :], (batch_size, seq_length))
return {
"past_key_values": past_key_values,
"encoder_outputs": encoder_outputs,
"encoder_attention_mask": attention_mask,
"decoder_attention_mask": extended_attention_mask,
"decoder_position_ids": position_ids,
}
def update_inputs_for_generation(self, model_outputs, model_kwargs):
model_kwargs["past_key_values"] = model_outputs.past_key_values
model_kwargs["decoder_position_ids"] = model_kwargs["decoder_position_ids"][:, -1:] + 1
return model_kwargs
FLAX_MARIAN_MT_DOCSTRING = """
Returns:
Example:
```python
>>> from transformers import AutoTokenizer, FlaxMarianMTModel
>>> model = FlaxMarianMTModel.from_pretrained("Helsinki-NLP/opus-mt-en-de")
>>> tokenizer = AutoTokenizer.from_pretrained("Helsinki-NLP/opus-mt-en-de")
>>> text = "My friends are cool but they eat too many carbs."
>>> input_ids = tokenizer(text, max_length=64, return_tensors="jax").input_ids
>>> sequences = model.generate(input_ids, max_length=64, num_beams=2).sequences
>>> outputs = tokenizer.batch_decode(sequences, skip_special_tokens=True)
>>> # should give *Meine Freunde sind cool, aber sie essen zu viele Kohlenhydrate.*
```
"""
overwrite_call_docstring(
FlaxMarianMTModel,
MARIAN_INPUTS_DOCSTRING + FLAX_MARIAN_MT_DOCSTRING,
)
append_replace_return_docstrings(FlaxMarianMTModel, output_type=FlaxSeq2SeqLMOutput, config_class=_CONFIG_FOR_DOC)
| |
189980
|
def load_adapter(
self,
peft_model_id: Optional[str] = None,
adapter_name: Optional[str] = None,
revision: Optional[str] = None,
token: Optional[str] = None,
device_map: Optional[str] = "auto",
max_memory: Optional[str] = None,
offload_folder: Optional[str] = None,
offload_index: Optional[int] = None,
peft_config: Dict[str, Any] = None,
adapter_state_dict: Optional[Dict[str, "torch.Tensor"]] = None,
low_cpu_mem_usage: bool = False,
adapter_kwargs: Optional[Dict[str, Any]] = None,
) -> None:
"""
Load adapter weights from file or remote Hub folder. If you are not familiar with adapters and PEFT methods, we
invite you to read more about them on PEFT official documentation: https://huggingface.co/docs/peft
Requires peft as a backend to load the adapter weights.
Args:
peft_model_id (`str`, *optional*):
The identifier of the model to look for on the Hub, or a local path to the saved adapter config file
and adapter weights.
adapter_name (`str`, *optional*):
The adapter name to use. If not set, will use the default adapter.
revision (`str`, *optional*, defaults to `"main"`):
The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a
git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any
identifier allowed by git.
<Tip>
To test a pull request you made on the Hub, you can pass `revision="refs/pr/<pr_number>"`.
</Tip>
token (`str`, `optional`):
Whether to use authentication token to load the remote folder. Userful to load private repositories
that are on HuggingFace Hub. You might need to call `huggingface-cli login` and paste your tokens to
cache it.
device_map (`str` or `Dict[str, Union[int, str, torch.device]]` or `int` or `torch.device`, *optional*):
A map that specifies where each submodule should go. It doesn't need to be refined to each
parameter/buffer name, once a given module name is inside, every submodule of it will be sent to the
same device. If we only pass the device (*e.g.*, `"cpu"`, `"cuda:1"`, `"mps"`, or a GPU ordinal rank
like `1`) on which the model will be allocated, the device map will map the entire model to this
device. Passing `device_map = 0` means put the whole model on GPU 0.
To have Accelerate compute the most optimized `device_map` automatically, set `device_map="auto"`. For
more information about each option see [designing a device
map](https://hf.co/docs/accelerate/main/en/usage_guides/big_modeling#designing-a-device-map).
max_memory (`Dict`, *optional*):
A dictionary device identifier to maximum memory. Will default to the maximum memory available for each
GPU and the available CPU RAM if unset.
offload_folder (`str` or `os.PathLike`, `optional`):
If the `device_map` contains any value `"disk"`, the folder where we will offload weights.
offload_index (`int`, `optional`):
`offload_index` argument to be passed to `accelerate.dispatch_model` method.
peft_config (`Dict[str, Any]`, *optional*):
The configuration of the adapter to add, supported adapters are non-prefix tuning and adaption prompts
methods. This argument is used in case users directly pass PEFT state dicts
adapter_state_dict (`Dict[str, torch.Tensor]`, *optional*):
The state dict of the adapter to load. This argument is used in case users directly pass PEFT state
dicts
low_cpu_mem_usage (`bool`, *optional*, defaults to `False`):
Reduce memory usage while loading the PEFT adapter. This should also speed up the loading process.
Requires PEFT version 0.13.0 or higher.
adapter_kwargs (`Dict[str, Any]`, *optional*):
Additional keyword arguments passed along to the `from_pretrained` method of the adapter config and
`find_adapter_config_file` method.
"""
check_peft_version(min_version=MIN_PEFT_VERSION)
# peft only supports low_cpu_mem_usage starting from v0.13.0
peft_load_kwargs = {}
if low_cpu_mem_usage:
min_version_lcmu = "0.13.0"
if version.parse(importlib.metadata.version("peft")) >= version.parse(min_version_lcmu):
peft_load_kwargs["low_cpu_mem_usage"] = low_cpu_mem_usage
else:
raise ValueError(
"The version of PEFT you are using does not support `low_cpu_mem_usage` yet, "
f"please install PEFT >= {min_version_lcmu}."
)
adapter_name = adapter_name if adapter_name is not None else "default"
if adapter_kwargs is None:
adapter_kwargs = {}
from peft import PeftConfig, inject_adapter_in_model, load_peft_weights
from peft.utils import set_peft_model_state_dict
if self._hf_peft_config_loaded and adapter_name in self.peft_config:
raise ValueError(f"Adapter with name {adapter_name} already exists. Please use a different name.")
if peft_model_id is None and (adapter_state_dict is None and peft_config is None):
raise ValueError(
"You should either pass a `peft_model_id` or a `peft_config` and `adapter_state_dict` to load an adapter."
)
if "device" not in adapter_kwargs:
device = self.device if not hasattr(self, "hf_device_map") else list(self.hf_device_map.values())[0]
else:
device = adapter_kwargs.pop("device")
# To avoid PEFT errors later on with safetensors.
if isinstance(device, torch.device):
device = str(device)
# We keep `revision` in the signature for backward compatibility
if revision is not None and "revision" not in adapter_kwargs:
adapter_kwargs["revision"] = revision
elif revision is not None and "revision" in adapter_kwargs and revision != adapter_kwargs["revision"]:
logger.error(
"You passed a `revision` argument both in `adapter_kwargs` and as a standalone argument. "
"The one in `adapter_kwargs` will be used."
)
# Override token with adapter_kwargs' token
if "token" in adapter_kwargs:
token = adapter_kwargs.pop("token")
if peft_config is None:
adapter_config_file = find_adapter_config_file(
peft_model_id,
token=token,
**adapter_kwargs,
)
if adapter_config_file is None:
raise ValueError(
f"adapter model file not found in {peft_model_id}. Make sure you are passing the correct path to the "
"adapter model."
)
peft_config = PeftConfig.from_pretrained(
peft_model_id,
token=token,
**adapter_kwargs,
)
# Create and add fresh new adapters into the model.
inject_adapter_in_model(peft_config, self, adapter_name, **peft_load_kwargs)
if not self._hf_peft_config_loaded:
self._hf_peft_config_loaded = True
if peft_model_id is not None:
adapter_state_dict = load_peft_weights(peft_model_id, token=token, device=device, **adapter_kwargs)
# We need to pre-process the state dict to remove unneeded prefixes - for backward compatibility
processed_adapter_state_dict = {}
prefix = "base_model.model."
for key, value in adapter_state_dict.items():
if key.startswith(prefix):
new_key = key[len(prefix) :]
else:
new_key = key
processed_adapter_state_dict[new_key] = value
# Load state dict
incompatible_keys = set_peft_model_state_dict(
self, processed_adapter_state_dict, adapter_name, **peft_load_kwargs
)
if incompatible_keys is not None:
# check only for unexpected keys
if hasattr(incompatible_keys, "unexpected_keys") and len(incompatible_keys.unexpected_keys) > 0:
logger.warning(
f"Loading adapter weights from {peft_model_id} led to unexpected keys not found in the model: "
f" {incompatible_keys.unexpected_keys}. "
)
# Re-dispatch model and hooks in case the model is offloaded to CPU / Disk.
if (
(getattr(self, "hf_device_map", None) is not None)
and (len(set(self.hf_device_map.values()).intersection({"cpu", "disk"})) > 0)
and len(self.peft_config) == 1
):
self._dispatch_accelerate_model(
device_map=device_map,
max_memory=max_memory,
offload_folder=offload_folder,
offload_index=offload_index,
)
| |
189981
|
def add_adapter(self, adapter_config, adapter_name: Optional[str] = None) -> None:
r"""
If you are not familiar with adapters and PEFT methods, we invite you to read more about them on the PEFT
official documentation: https://huggingface.co/docs/peft
Adds a fresh new adapter to the current model for training purpose. If no adapter name is passed, a default
name is assigned to the adapter to follow the convention of PEFT library (in PEFT we use "default" as the
default adapter name).
Args:
adapter_config (`~peft.PeftConfig`):
The configuration of the adapter to add, supported adapters are non-prefix tuning and adaption prompts
methods
adapter_name (`str`, *optional*, defaults to `"default"`):
The name of the adapter to add. If no name is passed, a default name is assigned to the adapter.
"""
check_peft_version(min_version=MIN_PEFT_VERSION)
from peft import PeftConfig, inject_adapter_in_model
adapter_name = adapter_name or "default"
if not self._hf_peft_config_loaded:
self._hf_peft_config_loaded = True
elif adapter_name in self.peft_config:
raise ValueError(f"Adapter with name {adapter_name} already exists. Please use a different name.")
if not isinstance(adapter_config, PeftConfig):
raise TypeError(f"adapter_config should be an instance of PeftConfig. Got {type(adapter_config)} instead.")
# Retrieve the name or path of the model, one could also use self.config._name_or_path
# but to be consistent with what we do in PEFT: https://github.com/huggingface/peft/blob/6e783780ca9df3a623992cc4d1d665001232eae0/src/peft/mapping.py#L100
adapter_config.base_model_name_or_path = self.__dict__.get("name_or_path", None)
inject_adapter_in_model(adapter_config, self, adapter_name)
self.set_adapter(adapter_name)
def set_adapter(self, adapter_name: Union[List[str], str]) -> None:
"""
If you are not familiar with adapters and PEFT methods, we invite you to read more about them on the PEFT
official documentation: https://huggingface.co/docs/peft
Sets a specific adapter by forcing the model to use a that adapter and disable the other adapters.
Args:
adapter_name (`Union[List[str], str]`):
The name of the adapter to set. Can be also a list of strings to set multiple adapters.
"""
check_peft_version(min_version=MIN_PEFT_VERSION)
if not self._hf_peft_config_loaded:
raise ValueError("No adapter loaded. Please load an adapter first.")
elif isinstance(adapter_name, list):
missing = set(adapter_name) - set(self.peft_config)
if len(missing) > 0:
raise ValueError(
f"Following adapter(s) could not be found: {', '.join(missing)}. Make sure you are passing the correct adapter name(s)."
f" current loaded adapters are: {list(self.peft_config.keys())}"
)
elif adapter_name not in self.peft_config:
raise ValueError(
f"Adapter with name {adapter_name} not found. Please pass the correct adapter name among {list(self.peft_config.keys())}"
)
from peft.tuners.tuners_utils import BaseTunerLayer
from peft.utils import ModulesToSaveWrapper
_adapters_has_been_set = False
for _, module in self.named_modules():
if isinstance(module, (BaseTunerLayer, ModulesToSaveWrapper)):
# For backward compatbility with previous PEFT versions
if hasattr(module, "set_adapter"):
module.set_adapter(adapter_name)
else:
module.active_adapter = adapter_name
_adapters_has_been_set = True
if not _adapters_has_been_set:
raise ValueError(
"Did not succeeded in setting the adapter. Please make sure you are using a model that supports adapters."
)
def disable_adapters(self) -> None:
r"""
If you are not familiar with adapters and PEFT methods, we invite you to read more about them on the PEFT
official documentation: https://huggingface.co/docs/peft
Disable all adapters that are attached to the model. This leads to inferring with the base model only.
"""
check_peft_version(min_version=MIN_PEFT_VERSION)
if not self._hf_peft_config_loaded:
raise ValueError("No adapter loaded. Please load an adapter first.")
from peft.tuners.tuners_utils import BaseTunerLayer
from peft.utils import ModulesToSaveWrapper
for _, module in self.named_modules():
if isinstance(module, (BaseTunerLayer, ModulesToSaveWrapper)):
# The recent version of PEFT need to call `enable_adapters` instead
if hasattr(module, "enable_adapters"):
module.enable_adapters(enabled=False)
else:
module.disable_adapters = True
def enable_adapters(self) -> None:
"""
If you are not familiar with adapters and PEFT methods, we invite you to read more about them on the PEFT
official documentation: https://huggingface.co/docs/peft
Enable adapters that are attached to the model. The model will use `self.active_adapter()`
"""
check_peft_version(min_version=MIN_PEFT_VERSION)
if not self._hf_peft_config_loaded:
raise ValueError("No adapter loaded. Please load an adapter first.")
from peft.tuners.tuners_utils import BaseTunerLayer
for _, module in self.named_modules():
if isinstance(module, BaseTunerLayer):
# The recent version of PEFT need to call `enable_adapters` instead
if hasattr(module, "enable_adapters"):
module.enable_adapters(enabled=True)
else:
module.disable_adapters = False
def active_adapters(self) -> List[str]:
"""
If you are not familiar with adapters and PEFT methods, we invite you to read more about them on the PEFT
official documentation: https://huggingface.co/docs/peft
Gets the current active adapters of the model. In case of multi-adapter inference (combining multiple adapters
for inference) returns the list of all active adapters so that users can deal with them accordingly.
For previous PEFT versions (that does not support multi-adapter inference), `module.active_adapter` will return
a single string.
"""
check_peft_version(min_version=MIN_PEFT_VERSION)
if not is_peft_available():
raise ImportError("PEFT is not available. Please install PEFT to use this function: `pip install peft`.")
if not self._hf_peft_config_loaded:
raise ValueError("No adapter loaded. Please load an adapter first.")
from peft.tuners.tuners_utils import BaseTunerLayer
for _, module in self.named_modules():
if isinstance(module, BaseTunerLayer):
active_adapters = module.active_adapter
break
# For previous PEFT versions
if isinstance(active_adapters, str):
active_adapters = [active_adapters]
return active_adapters
def active_adapter(self) -> str:
warnings.warn(
"The `active_adapter` method is deprecated and will be removed in a future version.", FutureWarning
)
return self.active_adapters()[0]
def get_adapter_state_dict(self, adapter_name: Optional[str] = None) -> dict:
"""
If you are not familiar with adapters and PEFT methods, we invite you to read more about them on the PEFT
official documentation: https://huggingface.co/docs/peft
Gets the adapter state dict that should only contain the weights tensors of the specified adapter_name adapter.
If no adapter_name is passed, the active adapter is used.
Args:
adapter_name (`str`, *optional*):
The name of the adapter to get the state dict from. If no name is passed, the active adapter is used.
"""
check_peft_version(min_version=MIN_PEFT_VERSION)
if not self._hf_peft_config_loaded:
raise ValueError("No adapter loaded. Please load an adapter first.")
from peft import get_peft_model_state_dict
if adapter_name is None:
adapter_name = self.active_adapter()
adapter_state_dict = get_peft_model_state_dict(self, adapter_name=adapter_name)
return adapter_state_dict
| |
190144
|
def _validate_model_kwargs(self, model_kwargs: Dict[str, Any]):
"""Validates model kwargs for generation. Generate argument typos will also be caught here."""
# If a `Cache` instance is passed, checks whether the model is compatible with it
if isinstance(model_kwargs.get("past_key_values", None), Cache) and not self._supports_cache_class:
raise ValueError(
f"{self.__class__.__name__} does not support an instance of `Cache` as `past_key_values`. Please "
"check the model documentation for supported cache formats."
)
# Excludes arguments that are handled before calling any model function
if self.config.is_encoder_decoder:
for key in ["decoder_input_ids"]:
model_kwargs.pop(key, None)
unused_model_args = []
model_args = set(inspect.signature(self.prepare_inputs_for_generation).parameters)
# `kwargs`/`model_kwargs` is often used to handle optional forward pass inputs like `attention_mask`. If
# `prepare_inputs_for_generation` doesn't accept them, then a stricter check can be made ;)
if "kwargs" in model_args or "model_kwargs" in model_args:
model_args |= set(inspect.signature(self.forward).parameters)
# Encoder-Decoder models may also need Encoder arguments from `model_kwargs`
if self.config.is_encoder_decoder:
base_model = getattr(self, self.base_model_prefix, None)
# allow encoder kwargs
encoder = getattr(self, "encoder", None)
# `MusicgenForConditionalGeneration` has `text_encoder` and `audio_encoder`.
# Also, it has `base_model_prefix = "encoder_decoder"` but there is no `self.encoder_decoder`
# TODO: A better way to handle this.
if encoder is None and base_model is not None:
encoder = getattr(base_model, "encoder", None)
if encoder is not None:
encoder_model_args = set(inspect.signature(encoder.forward).parameters)
model_args |= encoder_model_args
# allow decoder kwargs
decoder = getattr(self, "decoder", None)
if decoder is None and base_model is not None:
decoder = getattr(base_model, "decoder", None)
if decoder is not None:
decoder_model_args = set(inspect.signature(decoder.forward).parameters)
model_args |= {f"decoder_{x}" for x in decoder_model_args}
# allow assistant_encoder_outputs to be passed if we're doing assisted generating
if "assistant_encoder_outputs" in model_kwargs:
model_args |= {"assistant_encoder_outputs"}
for key, value in model_kwargs.items():
if value is not None and key not in model_args:
unused_model_args.append(key)
if unused_model_args:
raise ValueError(
f"The following `model_kwargs` are not used by the model: {unused_model_args} (note: typos in the"
" generate arguments will also show up in this list)"
)
def _validate_generated_length(self, generation_config, input_ids_length, has_default_max_length):
"""Performs validation related to the resulting generated length"""
# Can't throw warnings/exceptions during compilation
if is_torchdynamo_compiling():
return
# 1. Max length warnings related to poor parameterization
if has_default_max_length and generation_config.max_new_tokens is None and generation_config.max_length == 20:
# 20 is the default max_length of the generation config
warnings.warn(
f"Using the model-agnostic default `max_length` (={generation_config.max_length}) to control the "
"generation length. We recommend setting `max_new_tokens` to control the maximum length of the "
"generation.",
UserWarning,
)
if input_ids_length >= generation_config.max_length:
input_ids_string = "decoder_input_ids" if self.config.is_encoder_decoder else "input_ids"
raise ValueError(
f"Input length of {input_ids_string} is {input_ids_length}, but `max_length` is set to"
f" {generation_config.max_length}. This can lead to unexpected behavior. You should consider"
" increasing `max_length` or, better yet, setting `max_new_tokens`."
)
# 2. Min length warnings due to unfeasible parameter combinations
min_length_error_suffix = (
" Generation will stop at the defined maximum length. You should decrease the minimum length and/or "
"increase the maximum length."
)
if has_default_max_length:
min_length_error_suffix += (
f" Note that `max_length` is set to {generation_config.max_length}, its default value."
)
if generation_config.min_length is not None and generation_config.min_length > generation_config.max_length:
warnings.warn(
f"Unfeasible length constraints: `min_length` ({generation_config.min_length}) is larger than"
f" the maximum possible length ({generation_config.max_length})." + min_length_error_suffix,
UserWarning,
)
if generation_config.min_new_tokens is not None:
min_length = generation_config.min_new_tokens + input_ids_length
if min_length > generation_config.max_length:
warnings.warn(
f"Unfeasible length constraints: `min_new_tokens` ({generation_config.min_new_tokens}), when "
f"added to the prompt length ({input_ids_length}), is larger than"
f" the maximum possible length ({generation_config.max_length})." + min_length_error_suffix,
UserWarning,
)
def _prepare_generated_length(
self,
generation_config,
has_default_max_length,
has_default_min_length,
model_input_name,
input_ids_length,
inputs_tensor,
):
"""Prepared max and min length in generaion configs to avoid clashes between similar attributes"""
if generation_config.max_new_tokens is not None:
if not has_default_max_length and generation_config.max_length is not None:
logger.warning(
f"Both `max_new_tokens` (={generation_config.max_new_tokens}) and `max_length`(="
f"{generation_config.max_length}) seem to have been set. `max_new_tokens` will take precedence. "
"Please refer to the documentation for more information. "
"(https://huggingface.co/docs/transformers/main/en/main_classes/text_generation)"
)
generation_config.max_length = generation_config.max_new_tokens + input_ids_length
# if both `inputs_embeds` and `input_ids` are passed, we do not correct the length
# otherwise we need total length [inputs-embeds-len + new-tokens-len] to not go beyond indicated `max_length``
elif (
model_input_name == "inputs_embeds"
and input_ids_length != inputs_tensor.shape[1]
and not self.config.is_encoder_decoder
):
generation_config.max_length -= inputs_tensor.shape[1]
# same for min length
if generation_config.min_new_tokens is not None:
if not has_default_min_length:
logger.warning(
f"Both `min_new_tokens` (={generation_config.min_new_tokens}) and `min_length`(="
f"{generation_config.min_length}) seem to have been set. `min_new_tokens` will take precedence. "
"Please refer to the documentation for more information. "
"(https://huggingface.co/docs/transformers/main/en/main_classes/text_generation)"
)
generation_config.min_length = generation_config.min_new_tokens + input_ids_length
elif (
model_input_name == "inputs_embeds"
and input_ids_length != inputs_tensor.shape[1]
and not self.config.is_encoder_decoder
):
generation_config.min_length = max(generation_config.min_length - inputs_tensor.shape[1], 0)
return generation_config
| |
190387
|
import logging
from abc import ABC
from typing import Awaitable, Callable, List, Optional, Union
from urllib.parse import urljoin
import aiohttp
import tiktoken
from azure.core.credentials import AzureKeyCredential
from azure.core.credentials_async import AsyncTokenCredential
from azure.identity.aio import get_bearer_token_provider
from openai import AsyncAzureOpenAI, AsyncOpenAI, RateLimitError
from tenacity import (
AsyncRetrying,
retry_if_exception_type,
stop_after_attempt,
wait_random_exponential,
)
from typing_extensions import TypedDict
logger = logging.getLogger("scripts")
class EmbeddingBatch:
"""
Represents a batch of text that is going to be embedded
"""
def __init__(self, texts: List[str], token_length: int):
self.texts = texts
self.token_length = token_length
class ExtraArgs(TypedDict, total=False):
dimensions: int
class OpenAIEmbeddings(ABC):
"""
Contains common logic across both OpenAI and Azure OpenAI embedding services
Can split source text into batches for more efficient embedding calls
"""
SUPPORTED_BATCH_AOAI_MODEL = {
"text-embedding-ada-002": {"token_limit": 8100, "max_batch_size": 16},
"text-embedding-3-small": {"token_limit": 8100, "max_batch_size": 16},
"text-embedding-3-large": {"token_limit": 8100, "max_batch_size": 16},
}
SUPPORTED_DIMENSIONS_MODEL = {
"text-embedding-ada-002": False,
"text-embedding-3-small": True,
"text-embedding-3-large": True,
}
def __init__(self, open_ai_model_name: str, open_ai_dimensions: int, disable_batch: bool = False):
self.open_ai_model_name = open_ai_model_name
self.open_ai_dimensions = open_ai_dimensions
self.disable_batch = disable_batch
async def create_client(self) -> AsyncOpenAI:
raise NotImplementedError
def before_retry_sleep(self, retry_state):
logger.info("Rate limited on the OpenAI embeddings API, sleeping before retrying...")
def calculate_token_length(self, text: str):
encoding = tiktoken.encoding_for_model(self.open_ai_model_name)
return len(encoding.encode(text))
def split_text_into_batches(self, texts: List[str]) -> List[EmbeddingBatch]:
batch_info = OpenAIEmbeddings.SUPPORTED_BATCH_AOAI_MODEL.get(self.open_ai_model_name)
if not batch_info:
raise NotImplementedError(
f"Model {self.open_ai_model_name} is not supported with batch embedding operations"
)
batch_token_limit = batch_info["token_limit"]
batch_max_size = batch_info["max_batch_size"]
batches: List[EmbeddingBatch] = []
batch: List[str] = []
batch_token_length = 0
for text in texts:
text_token_length = self.calculate_token_length(text)
if batch_token_length + text_token_length >= batch_token_limit and len(batch) > 0:
batches.append(EmbeddingBatch(batch, batch_token_length))
batch = []
batch_token_length = 0
batch.append(text)
batch_token_length = batch_token_length + text_token_length
if len(batch) == batch_max_size:
batches.append(EmbeddingBatch(batch, batch_token_length))
batch = []
batch_token_length = 0
if len(batch) > 0:
batches.append(EmbeddingBatch(batch, batch_token_length))
return batches
async def create_embedding_batch(self, texts: List[str], dimensions_args: ExtraArgs) -> List[List[float]]:
batches = self.split_text_into_batches(texts)
embeddings = []
client = await self.create_client()
for batch in batches:
async for attempt in AsyncRetrying(
retry=retry_if_exception_type(RateLimitError),
wait=wait_random_exponential(min=15, max=60),
stop=stop_after_attempt(15),
before_sleep=self.before_retry_sleep,
):
with attempt:
emb_response = await client.embeddings.create(
model=self.open_ai_model_name, input=batch.texts, **dimensions_args
)
embeddings.extend([data.embedding for data in emb_response.data])
logger.info(
"Computed embeddings in batch. Batch size: %d, Token count: %d",
len(batch.texts),
batch.token_length,
)
return embeddings
async def create_embedding_single(self, text: str, dimensions_args: ExtraArgs) -> List[float]:
client = await self.create_client()
async for attempt in AsyncRetrying(
retry=retry_if_exception_type(RateLimitError),
wait=wait_random_exponential(min=15, max=60),
stop=stop_after_attempt(15),
before_sleep=self.before_retry_sleep,
):
with attempt:
emb_response = await client.embeddings.create(
model=self.open_ai_model_name, input=text, **dimensions_args
)
logger.info("Computed embedding for text section. Character count: %d", len(text))
return emb_response.data[0].embedding
async def create_embeddings(self, texts: List[str]) -> List[List[float]]:
dimensions_args: ExtraArgs = (
{"dimensions": self.open_ai_dimensions}
if OpenAIEmbeddings.SUPPORTED_DIMENSIONS_MODEL.get(self.open_ai_model_name)
else {}
)
if not self.disable_batch and self.open_ai_model_name in OpenAIEmbeddings.SUPPORTED_BATCH_AOAI_MODEL:
return await self.create_embedding_batch(texts, dimensions_args)
return [await self.create_embedding_single(text, dimensions_args) for text in texts]
class AzureOpenAIEmbeddingService(OpenAIEmbeddings):
"""
Class for using Azure OpenAI embeddings
To learn more please visit https://learn.microsoft.com/azure/ai-services/openai/concepts/understand-embeddings
"""
def __init__(
self,
open_ai_service: Union[str, None],
open_ai_deployment: Union[str, None],
open_ai_model_name: str,
open_ai_dimensions: int,
credential: Union[AsyncTokenCredential, AzureKeyCredential],
open_ai_custom_url: Union[str, None] = None,
disable_batch: bool = False,
):
super().__init__(open_ai_model_name, open_ai_dimensions, disable_batch)
self.open_ai_service = open_ai_service
if open_ai_service:
self.open_ai_endpoint = f"https://{open_ai_service}.openai.azure.com"
elif open_ai_custom_url:
self.open_ai_endpoint = open_ai_custom_url
else:
raise ValueError("Either open_ai_service or open_ai_custom_url must be provided")
self.open_ai_deployment = open_ai_deployment
self.credential = credential
async def create_client(self) -> AsyncOpenAI:
class AuthArgs(TypedDict, total=False):
api_key: str
azure_ad_token_provider: Callable[[], Union[str, Awaitable[str]]]
auth_args = AuthArgs()
if isinstance(self.credential, AzureKeyCredential):
auth_args["api_key"] = self.credential.key
elif isinstance(self.credential, AsyncTokenCredential):
auth_args["azure_ad_token_provider"] = get_bearer_token_provider(
self.credential, "https://cognitiveservices.azure.com/.default"
)
else:
raise TypeError("Invalid credential type")
return AsyncAzureOpenAI(
azure_endpoint=self.open_ai_endpoint,
azure_deployment=self.open_ai_deployment,
api_version="2023-05-15",
**auth_args,
)
class OpenAIEmbeddingService(OpenAIEmbeddings):
"""
Class for using OpenAI embeddings
To learn more please visit https://platform.openai.com/docs/guides/embeddings
"""
def __init__(
self,
open_ai_model_name: str,
open_ai_dimensions: int,
credential: str,
organization: Optional[str] = None,
disable_batch: bool = False,
):
super().__init__(open_ai_model_name, open_ai_dimensions, disable_batch)
self.credential = credential
self.organization = organization
async def create_client(self) -> AsyncOpenAI:
return AsyncOpenAI(api_key=self.credential, organization=self.organization)
| |
190392
|
import json
from typing import IO, AsyncGenerator
from .page import Page
from .parser import Parser
class JsonParser(Parser):
"""
Concrete parser that can parse JSON into Page objects. A top-level object becomes a single Page, while a top-level array becomes multiple Page objects.
"""
async def parse(self, content: IO) -> AsyncGenerator[Page, None]:
offset = 0
data = json.loads(content.read())
if isinstance(data, list):
for i, obj in enumerate(data):
offset += 1 # For opening bracket or comma before object
page_text = json.dumps(obj)
yield Page(i, offset, page_text)
offset += len(page_text)
elif isinstance(data, dict):
yield Page(0, 0, json.dumps(data))
| |
190409
|
import io
import pytest
from prepdocslib.jsonparser import JsonParser
@pytest.mark.asyncio
async def test_jsonparser_single_obj():
file = io.StringIO('{"test": "test"}')
file.name = "test.json"
jsonparser = JsonParser()
pages = [page async for page in jsonparser.parse(file)]
assert len(pages) == 1
assert pages[0].page_num == 0
assert pages[0].offset == 0
assert pages[0].text == '{"test": "test"}'
@pytest.mark.asyncio
async def test_jsonparser_array_multiple_obj():
file = io.StringIO('[{"test1": "test"},{"test2": "test"}]')
file.name = "test.json"
jsonparser = JsonParser()
pages = [page async for page in jsonparser.parse(file)]
assert len(pages) == 2
assert pages[0].page_num == 0
assert pages[0].offset == 1
assert pages[0].text == '{"test1": "test"}'
assert pages[1].page_num == 1
assert pages[1].offset == 19
assert pages[1].text == '{"test2": "test"}'
| |
190434
|
import io
import openai
import openai.types
import pytest
from azure.core.credentials import AzureKeyCredential
from azure.search.documents.aio import SearchClient
from azure.search.documents.indexes.aio import SearchIndexClient
from azure.search.documents.indexes.models import (
SearchFieldDataType,
SearchIndex,
SimpleField,
)
from openai.types.create_embedding_response import Usage
from prepdocslib.embeddings import AzureOpenAIEmbeddingService
from prepdocslib.listfilestrategy import File
from prepdocslib.searchmanager import SearchManager, Section
from prepdocslib.strategy import SearchInfo
from prepdocslib.textsplitter import SplitPage
from .mocks import (
MOCK_EMBEDDING_DIMENSIONS,
MOCK_EMBEDDING_MODEL_NAME,
MockClient,
MockEmbeddingsClient,
)
@pytest.fixture
def search_info():
return SearchInfo(
endpoint="https://testsearchclient.blob.core.windows.net",
credential=AzureKeyCredential("test"),
index_name="test",
)
@pytest.mark.asyncio
async def test_create_index_doesnt_exist_yet(monkeypatch, search_info):
indexes = []
async def mock_create_index(self, index):
indexes.append(index)
async def mock_list_index_names(self):
for index in []:
yield index
monkeypatch.setattr(SearchIndexClient, "create_index", mock_create_index)
monkeypatch.setattr(SearchIndexClient, "list_index_names", mock_list_index_names)
manager = SearchManager(search_info)
await manager.create_index()
assert len(indexes) == 1, "It should have created one index"
assert indexes[0].name == "test"
assert len(indexes[0].fields) == 7
@pytest.mark.asyncio
async def test_create_index_using_int_vectorization(monkeypatch, search_info):
indexes = []
async def mock_create_index(self, index):
indexes.append(index)
async def mock_list_index_names(self):
for index in []:
yield index
monkeypatch.setattr(SearchIndexClient, "create_index", mock_create_index)
monkeypatch.setattr(SearchIndexClient, "list_index_names", mock_list_index_names)
manager = SearchManager(search_info, use_int_vectorization=True)
await manager.create_index()
assert len(indexes) == 1, "It should have created one index"
assert indexes[0].name == "test"
assert len(indexes[0].fields) == 8
@pytest.mark.asyncio
async def test_create_index_does_exist(monkeypatch, search_info):
created_indexes = []
updated_indexes = []
async def mock_create_index(self, index):
created_indexes.append(index)
async def mock_list_index_names(self):
yield "test"
async def mock_get_index(self, *args, **kwargs):
return SearchIndex(
name="test",
fields=[
SimpleField(
name="storageUrl",
type=SearchFieldDataType.String,
filterable=True,
)
],
)
async def mock_create_or_update_index(self, index, *args, **kwargs):
updated_indexes.append(index)
monkeypatch.setattr(SearchIndexClient, "create_index", mock_create_index)
monkeypatch.setattr(SearchIndexClient, "list_index_names", mock_list_index_names)
monkeypatch.setattr(SearchIndexClient, "get_index", mock_get_index)
monkeypatch.setattr(SearchIndexClient, "create_or_update_index", mock_create_or_update_index)
manager = SearchManager(search_info)
await manager.create_index()
assert len(created_indexes) == 0, "It should not have created a new index"
assert len(updated_indexes) == 0, "It should not have updated the existing index"
@pytest.mark.asyncio
async def test_create_index_add_field(monkeypatch, search_info):
created_indexes = []
updated_indexes = []
async def mock_create_index(self, index):
created_indexes.append(index)
async def mock_list_index_names(self):
yield "test"
async def mock_get_index(self, *args, **kwargs):
return SearchIndex(
name="test",
fields=[],
)
async def mock_create_or_update_index(self, index, *args, **kwargs):
updated_indexes.append(index)
monkeypatch.setattr(SearchIndexClient, "create_index", mock_create_index)
monkeypatch.setattr(SearchIndexClient, "list_index_names", mock_list_index_names)
monkeypatch.setattr(SearchIndexClient, "get_index", mock_get_index)
monkeypatch.setattr(SearchIndexClient, "create_or_update_index", mock_create_or_update_index)
manager = SearchManager(search_info)
await manager.create_index()
assert len(created_indexes) == 0, "It should not have created a new index"
assert len(updated_indexes) == 1, "It should have updated the existing index"
assert len(updated_indexes[0].fields) == 1
assert updated_indexes[0].fields[0].name == "storageUrl"
@pytest.mark.asyncio
async def test_create_index_acls(monkeypatch, search_info):
indexes = []
async def mock_create_index(self, index):
indexes.append(index)
async def mock_list_index_names(self):
for index in []:
yield index
monkeypatch.setattr(SearchIndexClient, "create_index", mock_create_index)
monkeypatch.setattr(SearchIndexClient, "list_index_names", mock_list_index_names)
manager = SearchManager(
search_info,
use_acls=True,
)
await manager.create_index()
assert len(indexes) == 1, "It should have created one index"
assert indexes[0].name == "test"
assert len(indexes[0].fields) == 9
@pytest.mark.asyncio
async def test_update_content(monkeypatch, search_info):
async def mock_upload_documents(self, documents):
assert len(documents) == 1
assert documents[0]["id"] == "file-foo_pdf-666F6F2E706466-page-0"
assert documents[0]["content"] == "test content"
assert documents[0]["category"] == "test"
assert documents[0]["sourcepage"] == "foo.pdf#page=1"
assert documents[0]["sourcefile"] == "foo.pdf"
monkeypatch.setattr(SearchClient, "upload_documents", mock_upload_documents)
manager = SearchManager(search_info)
test_io = io.BytesIO(b"test content")
test_io.name = "test/foo.pdf"
file = File(test_io)
await manager.update_content(
[
Section(
split_page=SplitPage(
page_num=0,
text="test content",
),
content=file,
category="test",
)
]
)
@pytest.mark.asyncio
async def test_update_content_many(monkeypatch, search_info):
ids = []
async def mock_upload_documents(self, documents):
ids.extend([doc["id"] for doc in documents])
monkeypatch.setattr(SearchClient, "upload_documents", mock_upload_documents)
manager = SearchManager(search_info)
# create 1500 sections for 500 pages
sections = []
test_io = io.BytesIO(b"test page")
test_io.name = "test/foo.pdf"
file = File(test_io)
for page_num in range(500):
for page_section_num in range(3):
sections.append(
Section(
split_page=SplitPage(
page_num=page_num,
text=f"test section {page_section_num}",
),
content=file,
category="test",
)
)
await manager.update_content(sections)
assert len(ids) == 1500, "Wrong number of documents uploaded"
assert len(set(ids)) == 1500, "Document ids are not unique"
| |
190648
|
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 225.1/225.1 kB 11.7 MB/s eta 0:00:00
[19:24:01+0000] Downloading PyJWT-2.8.0-py3-none-any.whl (22 kB)
[19:24:07+0000] Installing collected packages: pytz, fixedint, azure-common, zipp, wrapt, urllib3, tzdata, typing-extensions, types-pytz, types-pillow, tqdm, tenacity, sniffio, six, regex, pyjwt, pycparser, pyasn1, priority, portalocker, pillow, packaging, opentelemetry-util-http, opentelemetry-semantic-conventions, oauthlib, numpy, multidict, markupsafe, itsdangerous, idna, hyperframe, hpack, h11, frozenlist, distro, click, charset-normalizer, certifi, blinker, attrs, asgiref, annotated-types, aiofiles, yarl, wsproto, werkzeug, uvicorn, rsa, requests, python-dateutil, pydantic-core, pandas-stubs, jinja2, isodate, importlib-metadata, httpcore, h2, ecdsa, deprecated, cffi, anyio, aiosignal, tiktoken, requests-oauthlib, python-jose, pydantic, pandas, opentelemetry-api, hypercorn, httpx, flask, cryptography, azure-core, aiohttp, quart, opentelemetry-sdk, opentelemetry-instrumentation, openai, msrest, azure-storage-blob, azure-search-documents, azure-keyvault-secrets, azure-core-tracing-opentelemetry, quart-cors, opentelemetry-resource-detector-azure, opentelemetry-instrumentation-wsgi, opentelemetry-instrumentation-urllib3, opentelemetry-instrumentation-urllib, opentelemetry-instrumentation-requests, opentelemetry-instrumentation-httpx, opentelemetry-instrumentation-dbapi, opentelemetry-instrumentation-asgi, opentelemetry-instrumentation-aiohttp-client, msal, azure-monitor-opentelemetry-exporter, opentelemetry-instrumentation-psycopg2, opentelemetry-instrumentation-flask, opentelemetry-instrumentation-fastapi, opentelemetry-instrumentation-django, msal-extensions, azure-monitor-opentelemetry, azure-identity
[19:25:31+0000] Successfully installed aiofiles-23.2.1 aiohttp-3.9.3 aiosignal-1.3.1 annotated-types-0.6.0 anyio-4.2.0 asgiref-3.7.2 attrs-23.2.0 azure-common-1.1.28 azure-core-1.29.7 azure-core-tracing-opentelemetry-1.0.0b11 azure-identity-1.15.0 azure-keyvault-secrets-4.7.0 azure-monitor-opentelemetry-1.2.0 azure-monitor-opentelemetry-exporter-1.0.0b21 azure-search-documents-11.6.0b1 azure-storage-blob-12.19.0 blinker-1.7.0 certifi-2023.11.17 cffi-1.16.0 charset-normalizer-3.3.2 click-8.1.7 cryptography-42.0.1 deprecated-1.2.14 distro-1.9.0 ecdsa-0.18.0 fixedint-0.1.6 flask-3.0.1 frozenlist-1.4.1 h11-0.14.0 h2-4.1.0 hpack-4.0.0 httpcore-1.0.2 httpx-0.26.0 hypercorn-0.16.0 hyperframe-6.0.1 idna-3.6 importlib-metadata-6.11.0 isodate-0.6.1 itsdangerous-2.1.2 jinja2-3.1.3 markupsafe-2.1.4 msal-1.26.0 msal-extensions-1.1.0 msrest-0.7.1 multidict-6.0.4 numpy-1.26.3 oauthlib-3.2.2 openai-1.10.0 opentelemetry-api-1.22.0 opentelemetry-instrumentation-0.43b0 opentelemetry-instrumentation-aiohttp-client-0.43b0 opentelemetry-instrumentation-asgi-0.43b0 opentelemetry-instrumentation-dbapi-0.43b0 opentelemetry-instrumentation-django-0.43b0 opentelemetry-instrumentation-fastapi-0.43b0 opentelemetry-instrumentation-flask-0.43b0 opentelemetry-instrumentation-httpx-0.43b0 opentelemetry-instrumentation-psycopg2-0.43b0 opentelemetry-instrumentation-requests-0.43b0 opentelemetry-instrumentation-urllib-0.43b0 opentelemetry-instrumentation-urllib3-0.43b0 opentelemetry-instrumentation-wsgi-0.43b0 opentelemetry-resource-detector-azure-0.1.3 opentelemetry-sdk-1.22.0 opentelemetry-semantic-conventions-0.43b0 opentelemetry-util-http-0.43b0 packaging-23.2 pandas-2.2.0 pandas-stubs-2.1.4.231227 pillow-10.2.0 portalocker-2.8.2 priority-2.0.0 pyasn1-0.5.1 pycparser-2.21 pydantic-2.6.0 pydantic-core-2.16.1 pyjwt-2.8.0 python-dateutil-2.8.2 python-jose-3.3.0 pytz-2023.4 quart-0.19.4 quart-cors-0.7.0 regex-2023.12.25 requests-2.31.0 requests-oauthlib-1.3.1 rsa-4.9 six-1.16.0 sniffio-1.3.0 tenacity-8.2.3 tiktoken-0.5.2 tqdm-4.66.1 types-pillow-10.2.0.20240206 types-pytz-2023.4.0.20240130 typing-extensions-4.9.0 tzdata-2023.4 urllib3-2.1.0 uvicorn-0.27.0.post1 werkzeug-3.0.1 wrapt-1.16.0 wsproto-1.2.0 yarl-1.9.4 zipp-3.17.0
[notice] A new release of pip is available: 23.2.1 -> 24.0
[notice] To update, run: pip install --upgrade pip
Not a vso image, so not writing build commands
Preparing output...
Copying files to destination directory '/tmp/_preCompressedDestinationDir'...
Done in 48 sec(s).
Compressing content of directory '/tmp/_preCompressedDestinationDir'...
Copied the compressed output to '/home/site/wwwroot'
Removing existing manifest file
Creating a manifest file...
Manifest file created.
Copying .ostype to manifest output directory.
Done in 522 sec(s).
```
</details>
Look for these important steps in the Oryx build:
- _Detected following platforms: python: 3.11.7_
That should match your runtime in the App Service configuration.
- _Running pip install..._
That should install all the requirements in your requirements.txt - if it didn't find your requirements.txt, then you won't see the packages installed.
If you see all those steps in the Oryx build, then that's a good sign that the build went well, and you can move on to checking the App Service logs.
## Checking the app logs for errors
Select _Advanced Tools_ from the side nav:

Select _Go_ to open the Kudu website.
When the Kudu website loads, find the _Current Docker Logs_ link and select _Download as zip_ next to it:

In the downloaded zip file, find the filename that starts with the most recent date and ends with "_default_docker.log":

Open that file to see the full logs, with the most recent logs at the bottom.
<details>
<summary>Here are the full logs for the app successfully starting:</summary>
```plaintext
2024-02-08T19:30:27.900249002Z _____
2024-02-08T19:30:27.900282702Z / _ \ __________ _________ ____
2024-02-08T19:30:27.90
| |
190651
|
# Local development of Chat App
You can only run locally **after** having successfully run the `azd up` command. If you haven't yet, follow the steps in [Azure deployment](../README.md#azure-deployment) above.
1. Run `azd auth login`
2. Change dir to `app`
3. Run `./start.ps1` or `./start.sh` or run the "VS Code Task: Start App" to start the project locally.
## Hot reloading frontend and backend files
When you run `./start.ps1` or `./start.sh`, the backend files will be watched and reloaded automatically. However, the frontend files will not be watched and reloaded automatically.
To enable hot reloading of frontend files, open a new terminal and navigate to the frontend directory:
```shell
cd app/frontend
```
Then run:
```shell
npm run dev
```
You should see:
```shell
> frontend@0.0.0 dev
> vite
VITE v4.5.1 ready in 957 ms
➜ Local: http://localhost:5173/
➜ Network: use --host to expose
➜ press h to show help
```
Navigate to the URL shown in the terminal (in this case, `http://localhost:5173/`). This local server will watch and reload frontend files. All backend requests will be routed to the Python server according to `vite.config.ts`.
Then, whenever you make changes to frontend files, the changes will be automatically reloaded, without any browser refresh needed.
## Using a local OpenAI-compatible API
You may want to save costs by developing against a local LLM server, such as
[llamafile](https://github.com/Mozilla-Ocho/llamafile/). Note that a local LLM
will generally be slower and not as sophisticated.
Once the local LLM server is running and serving an OpenAI-compatible endpoint, set these environment variables:
```shell
azd env set USE_VECTORS false
azd env set OPENAI_HOST local
azd env set OPENAI_BASE_URL <your local endpoint>
azd env set AZURE_OPENAI_CHATGPT_MODEL local-model-name
```
Then restart the local development server.
You should now be able to use the "Ask" tab.
⚠️ Limitations:
- The "Chat" tab will only work if the local language model supports function calling.
- Your search mode must be text only (no vectors), since the search index is only populated with OpenAI-generated embeddings, and the local OpenAI host can't generate those.
- The conversation history will be truncated using the GPT tokenizers, which may not be the same as the local model's tokenizer, so if you have a long conversation, you may end up with token limit errors.
> [!NOTE]
> You must set `OPENAI_HOST` back to a non-local value ("azure", "azure_custom", or "openai")
> before running `azd up` or `azd provision`, since the deployed backend can't access your local server.
### Using Ollama server
For example, to point at a local Ollama server running the `llama3.1:8b` model:
```shell
azd env set OPENAI_HOST local
azd env set OPENAI_BASE_URL http://localhost:11434/v1
azd env set AZURE_OPENAI_CHATGPT_MODEL llama3.1:8b
azd env set USE_VECTORS false
```
If you're running the app inside a VS Code Dev Container, use this local URL instead:
```shell
azd env set OPENAI_BASE_URL http://host.docker.internal:11434/v1
```
### Using llamafile server
To point at a local llamafile server running on its default port:
```shell
azd env set OPENAI_HOST local
azd env set OPENAI_BASE_URL http://localhost:8080/v1
azd env set USE_VECTORS false
```
Llamafile does *not* require a model name to be specified.
If you're running the app inside a VS Code Dev Container, use this local URL instead:
```shell
azd env set OPENAI_BASE_URL http://host.docker.internal:8080/v1
```
| |
190657
|
# Productionizing the Chat App
This sample is designed to be a starting point for your own production application,
but you should do a thorough review of the security and performance before deploying
to production. Here are some things to consider:
* [Azure resource configuration](#azure-resource-configuration)
* [Additional security measures](#additional-security-measures)
* [Load testing](#load-testing)
* [Evaluation](#evaluation)
## Azure resource configuration
### OpenAI Capacity
The default TPM (tokens per minute) is set to 30K. That is equivalent
to approximately 30 conversations per minute (assuming 1K per user message/response).
You can increase the capacity by changing the `chatGptDeploymentCapacity` and `embeddingDeploymentCapacity`
parameters in `infra/main.bicep` to your account's maximum capacity.
You can also view the Quotas tab in [Azure OpenAI studio](https://oai.azure.com/)
to understand how much capacity you have.
If the maximum TPM isn't enough for your expected load, you have a few options:
* Use a backoff mechanism to retry the request. This is helpful if you're running into a short-term quota due to bursts of activity but aren't over long-term quota. The [tenacity](https://tenacity.readthedocs.io/en/latest/) library is a good option for this, and this [pull request](https://github.com/Azure-Samples/azure-search-openai-demo/pull/500) shows how to apply it to this app.
* If you are consistently going over the TPM, then consider implementing a load balancer between OpenAI instances. Most developers implement that using Azure API Management or container-based load balancers. A native Python approach that integrates with the OpenAI Python API Library is also possible. For integration instructions with this sample, please check:
* [Scale Azure OpenAI for Python with Azure API Management](https://learn.microsoft.com/azure/developer/python/get-started-app-chat-scaling-with-azure-api-management)
* [Scale Azure OpenAI for Python chat using RAG with Azure Container Apps](https://learn.microsoft.com/azure/developer/python/get-started-app-chat-scaling-with-azure-container-apps)
* [Pull request: Scale Azure OpenAI for Python with the Python openai-priority-loadbalancer](https://github.com/Azure-Samples/azure-search-openai-demo/pull/1626)
### Azure Storage
The default storage account uses the `Standard_LRS` SKU.
To improve your resiliency, we recommend using `Standard_ZRS` for production deployments,
which you can specify using the `sku` property under the `storage` module in `infra/main.bicep`.
### Azure AI Search
The default search service uses the "Basic" SKU
with the free semantic ranker option, which gives you 1000 free queries a month.
After 1000 queries, you will get an error message about exceeding the semantic ranker free capacity.
* Assuming your app will experience more than 1000 questions per month,
you should upgrade the semantic ranker SKU from "free" to "standard" SKU:
```shell
azd env set AZURE_SEARCH_SEMANTIC_RANKER standard
```
Or disable semantic search entirely:
```shell
azd env set AZURE_SEARCH_SEMANTIC_RANKER disabled
```
* The search service can handle fairly large indexes, but it does have per-SKU limits on storage sizes, maximum vector dimensions, etc. You may want to upgrade the SKU to either a Standard or Storage Optimized SKU, depending on your expected load.
However, you [cannot change the SKU](https://learn.microsoft.com/azure/search/search-sku-tier#tier-upgrade-or-downgrade) of an existing search service, so you will need to re-index the data or manually copy it over.
You can change the SKU by setting the `AZURE_SEARCH_SERVICE_SKU` azd environment variable to [an allowed SKU](https://learn.microsoft.com/azure/templates/microsoft.search/searchservices?pivots=deployment-language-bicep#sku).
```shell
azd env set AZURE_SEARCH_SERVICE_SKU standard
```
See the [Azure AI Search service limits documentation](https://learn.microsoft.com/azure/search/search-limits-quotas-capacity) for more details.
* If you see errors about search service capacity being exceeded, you may find it helpful to increase
the number of replicas by changing `replicaCount` in `infra/core/search/search-services.bicep`
or manually scaling it from the Azure Portal.
### Azure App Service
The default app service plan uses the `Basic` SKU with 1 CPU core and 1.75 GB RAM.
We recommend using a Premium level SKU, starting with 1 CPU core.
You can use auto-scaling rules or scheduled scaling rules,
and scale up the maximum/minimum based on load.
## Additional security measures
* **Authentication**: By default, the deployed app is publicly accessible.
We recommend restricting access to authenticated users.
See [Enabling authentication](./deploy_features.md#enabling-authentication) to learn how to enable authentication.
* **Networking**: We recommend [deploying inside a Virtual Network](./deploy_private.md). If the app is only for
internal enterprise use, use a private DNS zone. Also consider using Azure API Management (APIM)
for firewalls and other forms of protection.
For more details, read [Azure OpenAI Landing Zone reference architecture](https://techcommunity.microsoft.com/t5/azure-architecture-blog/azure-openai-landing-zone-reference-architecture/ba-p/3882102).
## Load testing
We recommend running a loadtest for your expected number of users.
You can use the [locust tool](https://docs.locust.io/) with the `locustfile.py` in this sample
or set up a loadtest with Azure Load Testing.
To use locust, first install the dev requirements that includes locust:
```shell
python -m pip install -r requirements-dev.txt
```
Or manually install locust:
```shell
python -m pip install locust
```
Then run the locust command, specifying the name of the User class to use from `locustfile.py`. We've provided a `ChatUser` class that simulates a user asking questions and receiving answers, as well as a `ChatVisionUser` to simulate a user asking questions with the [GPT-4 vision mode enabled](/docs/gpt4v.md).
```shell
locust ChatUser
```
Open the locust UI at [http://localhost:8089/](http://localhost:8089/), the URI displayed in the terminal.
Start a new test with the URI of your website, e.g. `https://my-chat-app.azurewebsites.net`.
Do *not* end the URI with a slash. You can start by pointing at your localhost if you're concerned
more about load on OpenAI/AI Search than the host platform.
For the number of users and spawn rate, we recommend starting with 20 users and 1 users/second.
From there, you can keep increasing the number of users to simulate your expected load.
Here's an example loadtest for 50 users and a spawn rate of 1 per second:

After each test, check the local or App Service logs to see if there are any errors.
## Evaluation
Before you make your chat app available to users, you'll want to rigorously evaluate the answer quality. You can use tools in [the AI RAG Chat evaluator](https://github.com/Azure-Samples/ai-rag-chat-evaluator) repository to run evaluations, review results, and compare answers across runs.
| |
190832
|
import { NextRequest, NextResponse } from "next/server";
import { z } from "zod";
import { ChatOpenAI } from "@langchain/openai";
import { PromptTemplate } from "@langchain/core/prompts";
export const runtime = "edge";
const TEMPLATE = `Extract the requested fields from the input.
The field "entity" refers to the first mentioned entity in the input.
Input:
{input}`;
/**
* This handler initializes and calls an OpenAI Functions powered
* structured output chain. See the docs for more information:
*
* https://js.langchain.com/v0.2/docs/how_to/structured_output
*/
export async function POST(req: NextRequest) {
try {
const body = await req.json();
const messages = body.messages ?? [];
const currentMessageContent = messages[messages.length - 1].content;
const prompt = PromptTemplate.fromTemplate(TEMPLATE);
/**
* Function calling is currently only supported with ChatOpenAI models
*/
const model = new ChatOpenAI({
temperature: 0.8,
model: "gpt-3.5-turbo-0125",
});
/**
* We use Zod (https://zod.dev) to define our schema for convenience,
* but you can pass JSON schema if desired.
*/
const schema = z
.object({
tone: z
.enum(["positive", "negative", "neutral"])
.describe("The overall tone of the input"),
entity: z.string().describe("The entity mentioned in the input"),
word_count: z.number().describe("The number of words in the input"),
chat_response: z.string().describe("A response to the human's input"),
final_punctuation: z
.optional(z.string())
.describe("The final punctuation mark in the input, if any."),
})
.describe("Should always be used to properly format output");
/**
* Bind schema to the OpenAI model.
* Future invocations of the returned model will always match the schema.
*
* Under the hood, uses tool calling by default.
*/
const functionCallingModel = model.withStructuredOutput(schema, {
name: "output_formatter",
});
/**
* Returns a chain with the function calling model.
*/
const chain = prompt.pipe(functionCallingModel);
const result = await chain.invoke({
input: currentMessageContent,
});
return NextResponse.json(result, { status: 200 });
} catch (e: any) {
return NextResponse.json({ error: e.message }, { status: e.status ?? 500 });
}
}
| |
190850
|
`# QA and Chat over Documents
Chat and Question-Answering (QA) over \`data\` are popular LLM use-cases.
\`data\` can include many things, including:
* \`Unstructured data\` (e.g., PDFs)
* \`Structured data\` (e.g., SQL)
* \`Code\` (e.g., Python)
Below we will review Chat and QA on \`Unstructured data\`.

\`Unstructured data\` can be loaded from many sources.
Check out the [document loader integrations here](/docs/modules/data_connection/document_loaders/) to browse the set of supported loaders.
Each loader returns data as a LangChain \`Document\`.
\`Documents\` are turned into a Chat or QA app following the general steps below:
* \`Splitting\`: [Text splitters](/docs/modules/data_connection/document_transformers/) break \`Documents\` into splits of specified size
* \`Storage\`: Storage (e.g., often a [vectorstore](/docs/modules/data_connection/vectorstores/)) will house [and often embed](https://www.pinecone.io/learn/vector-embeddings/) the splits
* \`Retrieval\`: The app retrieves splits from storage (e.g., often [with similar embeddings](https://www.pinecone.io/learn/k-nearest-neighbor/) to the input question)
* \`Output\`: An [LLM](/docs/modules/model_io/models/llms/) produces an answer using a prompt that includes the question and the retrieved splits

## Quickstart
Let's load this [blog post](https://lilianweng.github.io/posts/2023-06-23-agent/) on agents as an example \`Document\`.
We'll have a QA app in a few lines of code.
First, set environment variables and install packages required for the guide:
\`\`\`shell
> yarn add cheerio
# Or load env vars in your preferred way:
> export OPENAI_API_KEY="..."
\`\`\`
## 1. Loading, Splitting, Storage
### 1.1 Getting started
Specify a \`Document\` loader.
\`\`\`typescript
// Document loader
import { CheerioWebBaseLoader } from "langchain/document_loaders/web/cheerio";
const loader = new CheerioWebBaseLoader(
"https://lilianweng.github.io/posts/2023-06-23-agent/"
);
const data = await loader.load();
\`\`\`
Split the \`Document\` into chunks for embedding and vector storage.
\`\`\`typescript
import { RecursiveCharacterTextSplitter } from "langchain/text_splitter";
const textSplitter = new RecursiveCharacterTextSplitter({
chunkSize: 500,
chunkOverlap: 0,
});
const splitDocs = await textSplitter.splitDocuments(data);
\`\`\`
Embed and store the splits in a vector database (for demo purposes we use an unoptimized, in-memory example but you can [browse integrations here](/docs/modules/data_connection/vectorstores/integrations/)):
\`\`\`typescript
import { OpenAIEmbeddings } from "langchain/embeddings/openai";
import { MemoryVectorStore } from "langchain/vectorstores/memory";
const embeddings = new OpenAIEmbeddings();
const vectorStore = await MemoryVectorStore.fromDocuments(splitDocs, embeddings);
\`\`\`
Here are the three pieces together:

### 1.2 Going Deeper
#### 1.2.1 Integrations
\`Document Loaders\`
* Browse document loader integrations [here](/docs/modules/data_connection/document_loaders/).
* See further documentation on loaders [here](/docs/modules/data_connection/document_loaders/).
\`Document Transformers\`
* All can ingest loaded \`Documents\` and process them (e.g., split).
* See further documentation on transformers [here](/docs/modules/data_connection/document_transformers/).
\`Vectorstores\`
* Browse vectorstore integrations [here](/docs/modules/data_connection/vectorstores/integrations/).
* See further documentation on vectorstores [here](/docs/modules/data_connection/vectorstores/).
## 2. Retrieval
### 2.1 Getting started
Retrieve [relevant splits](https://www.pinecone.io/learn/what-is-similarity-search/) for any question using \`similarity_search\`.
\`\`\`typescript
const relevantDocs = await vectorStore.similaritySearch("What is task decomposition?");
console.log(relevantDocs.length);
// 4
\`\`\`
### 2.2 Going Deeper
#### 2.2.1 Retrieval
Vectorstores are commonly used for retrieval.
But, they are not the only option.
For example, SVMs (see thread [here](https://twitter.com/karpathy/status/1647025230546886658?s=20)) can also be used.
LangChain [has many retrievers and retrieval methods](/docs/modules/data_connection/retrievers/) including, but not limited to, vectorstores.
All retrievers implement some common methods, such as \`getRelevantDocuments()\`.
## 3. QA
### 3.1 Getting started
Distill the retrieved documents into an answer using an LLM (e.g., \`gpt-3.5-turbo\`) with \`RetrievalQA\` chain.
\`\`\`typescript
import { RetrievalQAChain } from "langchain/chains";
import { ChatOpenAI } from "langchain/chat_models/openai";
const model = new ChatOpenAI({ model: "gpt-3.5-turbo" });
const chain = RetrievalQAChain.fromLLM(model, vectorstore.asRetriever());
const response = await chain.call({
query: "What is task decomposition?"
});
console.log(response);
/*
{
text: 'Task decomposition refers to the process of breaking down a larger task into smaller, more manageable subgoals. By decomposing a task, it becomes easier for an agent or system to handle complex tasks efficiently. Task decomposition can be done through various methods such as using prompting or task-specific instructions, or through human inputs. It helps in planning and organizing the steps required to complete a task effectively.'
}
*/
\`\`\`
### 3.2 Going Deeper
#### 3.2.1 Integrations
\`LLMs\`
* Browse LLM integrations and further documentation [here](/docs/modules/model_io/models/).
#### 3.2.2 Customizing the prompt
The prompt in \`RetrievalQA\` chain can be customized as follows.
\`\`\`typescript
import { RetrievalQAChain } from "langchain/chains";
import { ChatOpenAI } from "langchain/chat_models/openai";
import { PromptTemplate } from "langchain/prompts";
const model = new ChatOpenAI({ model: "gpt-3.5-turbo" });
const template = \`Use the following pieces of context to answer the question at the end.
If you don't know the answer, just say that you don't know, don't try to make up an answer.
Use three sentences maximum and keep the answer as concise as possible.
Always say "thanks for asking!" at the end of the answer.
{context}
Question: {question}
Helpful Answer:\`;
const chain = RetrievalQAChain.fromLLM(model, vectorstore.asRetriever(), {
prompt: PromptTemplate.fromTemplate(template),
});
const response = await chain.call({
query: "What is task decomposition?"
});
console.log(response);
/*
{
text: 'Task decomposition is the process of breaking down a large task into smaller, more manageable subgoals. This allows for efficient handling of complex tasks and aids in planning and organizing the steps needed to achieve the overall goal. Thanks for asking!'
}
*/
\`\`\`
#### 3.2.3 Returning source documents
The full set of retrieved documents used for answer distillation can be returned using \`return_source_documents=True\`.
\`\`\`typescript
import { RetrievalQAChain } from "langchain/chains";
import { ChatOpenAI } from "langchain/chat_models/openai";
const model = new ChatOpenAI({ model: "gpt-3.5-turbo" });
const chain = RetrievalQAChain.fromLLM(model, vectorstore.asRetriever(), {
returnSourceDocuments: true
});
const response = await chain.call({
query: "What is task decomposition?"
});
console.log(response.sourceDocuments[0]);
/*
Document {
pageContent: 'Task decomposition can be done (1) by LLM with simple prompting like "Steps for XYZ.\\n1.", "What are the subgoals for achieving XYZ?", (2) by using task-specific instructions; e.g. "Write a story outline." for writing a novel, or (3) with human inputs.',
metadata: [Object]
}
*/
\`\`\`
#### 3.2.4 Customizing retrieved docs in the LLM prompt
Retrieved documents can be fed to an LLM for answer distillation in a few different ways.
\`stuff\`
| |
190851
|
, \`refine\`, and \`map-reduce\` chains for passing documents to an LLM prompt are well summarized [here](/docs/modules/chains/document/).
\`stuff\` is commonly used because it simply "stuffs" all retrieved documents into the prompt.
The [loadQAChain](/docs/modules/chains/document/) methods are easy ways to pass documents to an LLM using these various approaches.
\`\`\`typescript
import { loadQAStuffChain } from "langchain/chains";
const stuffChain = loadQAStuffChain(model);
const stuffResult = await stuffChain.call({
input_documents: relevantDocs,
question: "What is task decomposition
});
console.log(stuffResult);
/*
{
text: 'Task decomposition is the process of breaking down a large task into smaller, more manageable subgoals or steps. This allows for efficient handling of complex tasks by focusing on one subgoal at a time. Task decomposition can be done through various methods such as using simple prompting, task-specific instructions, or human inputs.'
}
*/
\`\`\`
## 4. Chat
### 4.1 Getting started
To keep chat history, we use a variant of the previous chain called a \`ConversationalRetrievalQAChain\`.
First, specify a \`Memory buffer\` to track the conversation inputs / outputs.
\`\`\`typescript
import { ConversationalRetrievalQAChain } from "langchain/chains";
import { BufferMemory } from "langchain/memory";
import { ChatOpenAI } from "langchain/chat_models/openai";
const memory = new BufferMemory({
memoryKey: "chat_history",
returnMessages: true,
});
\`\`\`
Next, we initialize and call the chain:
\`\`\`typescript
const model = new ChatOpenAI({ model: "gpt-3.5-turbo" });
const chain = ConversationalRetrievalQAChain.fromLLM(model, vectorstore.asRetriever(), {
memory
});
const result = await chain.call({
question: "What are some of the main ideas in self-reflection?"
});
console.log(result);
/*
{
text: 'Some main ideas in self-reflection include:\n' +
'\n' +
'1. Iterative Improvement: Self-reflection allows autonomous agents to improve by continuously refining past action decisions and correcting mistakes.\n' +
'\n' +
'2. Trial and Error: Self-reflection plays a crucial role in real-world tasks where trial and error are inevitable. It helps agents learn from failed trajectories and make adjustments for future actions.\n' +
'\n' +
'3. Constructive Criticism: Agents engage in constructive self-criticism of their big-picture behavior to identify areas for improvement.\n' +
'\n' +
'4. Decision and Strategy Refinement: Reflection on past decisions and strategies enables agents to refine their approach and make more informed choices.\n' +
'\n' +
'5. Efficiency and Optimization: Self-reflection encourages agents to be smart and efficient in their actions, aiming to complete tasks in the least number of steps.\n' +
'\n' +
'These ideas highlight the importance of self-reflection in enhancing performance and guiding future actions.'
}
*/
\`\`\`
The \`Memory buffer\` has context to resolve \`"it"\` ("self-reflection") in the below question.
\`\`\`typescript
const followupResult = await chain.call({
question: "How does the Reflexion paper handle it?"
});
console.log(followupResult);
/*
{
text: "The Reflexion paper introduces a framework that equips agents with dynamic memory and self-reflection capabilities to improve their reasoning skills. The approach involves showing the agent two-shot examples, where each example consists of a failed trajectory and an ideal reflection on how to guide future changes in the agent's plan. These reflections are then added to the agent's working memory as context for querying a language model. The agent uses this self-reflection information to make decisions on whether to start a new trial or continue with the current plan."
}
*/
\`\`\`
### 4.2 Going deeper
The [documentation](/docs/modules/chains/popular/chat_vector_db) on \`ConversationalRetrievalQAChain\` offers a few extensions, such as streaming and source documents.
# Conversational Retrieval Agents
This is an agent specifically optimized for doing retrieval when necessary while holding a conversation and being able
to answer questions based on previous dialogue in the conversation.
To start, we will set up the retriever we want to use, then turn it into a retriever tool. Next, we will use the high-level constructor for this type of agent.
Finally, we will walk through how to construct a conversational retrieval agent from components.
## The Retriever
To start, we need a retriever to use! The code here is mostly just example code. Feel free to use your own retriever and skip to the next section on creating a retriever tool.
\`\`\`typescript
import { FaissStore } from "langchain/vectorstores/faiss";
import { OpenAIEmbeddings } from "langchain/embeddings/openai";
import { TextLoader } from "langchain/document_loaders/fs/text";
import { RecursiveCharacterTextSplitter } from "langchain/text_splitter";
const loader = new TextLoader("state_of_the_union.txt");
const docs = await loader.load();
const splitter = new RecursiveCharacterTextSplitter({
chunkSize: 1000,
chunkOverlap: 0
});
const texts = await splitter.splitDocuments(docs);
const vectorStore = await FaissStore.fromDocuments(texts, new OpenAIEmbeddings());
const retriever = vectorStore.asRetriever();
\`\`\`
## Retriever Tool
Now we need to create a tool for our retriever. The main things we need to pass in are a \`name\` for the retriever as well as a \`description\`. These will both be used by the language model, so they should be informative.
\`\`\`typescript
import { createRetrieverTool } from "langchain/agents/toolkits";
const tool = createRetrieverTool(retriever, {
name: "search_state_of_union",
description: "Searches and returns documents regarding the state-of-the-union.",
});
\`\`\`
## Agent Constructor
Here, we will use the high level \`create_conversational_retrieval_agent\` API to construct the agent.
Notice that beside the list of tools, the only thing we need to pass in is a language model to use.
Under the hood, this agent is using the OpenAIFunctionsAgent, so we need to use an ChatOpenAI model.
\`\`\`typescript
import { createConversationalRetrievalAgent } from "langchain/agents/toolkits";
import { ChatOpenAI } from "langchain/chat_models/openai";
const model = new ChatOpenAI({
temperature: 0,
});
const executor = await createConversationalRetrievalAgent(model, [tool], {
verbose: true,
});
\`\`\`
We can now try it out!
\`\`\`typescript
const result = await executor.call({
input: "Hi, I'm Bob!"
});
console.log(result);
/*
{
output: 'Hello Bob! How can I assist you today?',
intermediateSteps: []
}
*/
const result2 = await executor.call({
input: "What's my name?"
});
console.log(result2);
/*
{ output: 'Your name is Bob.', intermediateSteps: [] }
*/
const result3 = await executor.call({
input: "What did the president say about Ketanji Brown Jackson in the most recent state of the union?"
});
console.log(result3);
/*
{
output: "In the most recent state of the union, President Biden mentioned Ketanji Brown Jackson. He nominated her as a Circuit Court of Appeals judge and described her as one of the nation's top legal minds who will continue Justice Breyer's legacy of excellence. He mentioned that she has received a broad range of support, including from the Fraternal Order of Police and former judges appointed by Democrats and Republicans.",
intermediateSteps: [
{...}
]
}
*/
const result4 = await executor.call({
input: "How long ago did he nominate her?"
});
console.log(result4);
/*
{
output: 'President Biden nominated Ketanji Brown Jackson four days before the most recent state of the union address.',
intermediateSteps: []
}
*/
\`\`\`
Note that for the final call, the agent used previously retrieved information to answer the query and did not need to call the tool again!
Here's a trace showing how the agent fetches documents to answer the question with the retrieval tool:
https://smith.langchain.com/public/1e2b1887-ca44-4210-913b-a69c1b8a8e7e/r
## Creating from components
What actually is going on underneath the hood? Let's take a look so we can understand how to modify things going forward.
### Memory
In this example, we want the agent to remember not only previous conversations, but also previous intermediate steps.
For that, we can use \`OpenAIAgentTokenBufferMemory\`
| |
190852
|
. Note that if you want to change whether the agent remembers intermediate steps,
how the long the retained buffer is, or anything like that you should change this part.
\`\`\`typescript
import { OpenAIAgentTokenBufferMemory } from "langchain/agents/toolkits";
const memory = new OpenAIAgentTokenBufferMemory({
llm: model,
memoryKey: "chat_history",
outputKey: "output"
});
\`\`\`
You should make sure \`memoryKey\` is set to \`"chat_history"\` and \`outputKey\` is set to \`"output"\` for the OpenAI functions agent.
This memory also has \`returnMessages\` set to \`true\` by default.
You can also load messages from prior conversations into this memory by initializing it with a pre-loaded chat history:
\`\`\`typescript
import { ChatOpenAI } from "langchain/chat_models/openai";
import { OpenAIAgentTokenBufferMemory } from "langchain/agents/toolkits";
import { HumanMessage, AIMessage } from "langchain/schema";
import { ChatMessageHistory } from "langchain/memory";
const previousMessages = [
new HumanMessage("My name is Bob"),
new AIMessage("Nice to meet you, Bob!"),
];
const chatHistory = new ChatMessageHistory(previousMessages);
const memory = new OpenAIAgentTokenBufferMemory({
llm: new ChatOpenAI({}),
memoryKey: "chat_history",
outputKey: "output",
chatHistory,
});
\`\`\`
### Agent executor
We can recreate the agent executor directly with the \`initializeAgentExecutorWithOptions\` method.
This allows us to customize the agent's system message by passing in a \`prefix\` into \`agentArgs\`.
Importantly, we must pass in \`return_intermediate_steps: true\` since we are recording that with our memory object.
\`\`\`typescript
import { initializeAgentExecutorWithOptions } from "langchain/agents";
const executor = await initializeAgentExecutorWithOptions(tools, llm, {
agentType: "openai-functions",
memory,
returnIntermediateSteps: true,
agentArgs: {
prefix:
prefix ??
\`Do your best to answer the questions. Feel free to use any tools available to look up relevant information, only if necessary.\`,
},
});
\`\`\`
`;
| |
190861
|
# Development Instructions
This project uses the testing, build and release standards specified
by the PyPA organization and documented at
https://packaging.python.org.
## Setup
Set up a virtual environment and install the project's requirements
and dev requirements:
```
python3 -m venv venv # Only need to do this once
source venv/bin/activate # Do this each time you use a new shell for the project
pip install -r requirements.txt
pip install -r requirements_dev.txt
pre-commit install # install the precommit hooks
```
You can also install `chromadb` the `pypi` package locally and in editable mode with `pip install -e .`.
## Running Chroma
Chroma can be run via 3 modes:
1. Standalone and in-memory:
```python
import chromadb
api = chromadb.Client()
print(api.heartbeat())
```
2. Standalone and in-memory with persistence:
This by default saves your db and your indexes to a `.chroma` directory and can also load from them.
```python
import chromadb
api = chromadb.PersistentClient(path="/path/to/persist/directory")
print(api.heartbeat())
```
3. With a persistent backend and a small frontend client
Run `chroma run --path /chroma_db_path`
```python
import chromadb
api = chromadb.HttpClient(host="localhost", port="8000")
print(api.heartbeat())
```
## Local dev setup for distributed chroma
We use tilt for providing local dev setup. Tilt is an open source project
##### Requirement
- Docker
- Local Kubernetes cluster (Recommended: [OrbStack](https://orbstack.dev/) for mac, [Kind](https://kind.sigs.k8s.io/) for linux)
- [Tilt](https://docs.tilt.dev/)
For starting the distributed Chroma in the workspace, use `tilt up`. It will create all the required resources and build the necessary Docker image in the current kubectl context.
Once done, it will expose Chroma on port 8000. You can also visit the Tilt dashboard UI at http://localhost:10350/. To clean and remove all the resources created by Tilt, use `tilt down`.
## Testing
Unit tests are in the `/chromadb/test` directory.
To run unit tests using your current environment, run `pytest`.
## Manual Build
To manually build a distribution, run `python -m build`.
The project's source and wheel distributions will be placed in the `dist` directory.
## Manual Release
Not yet implemented.
## Versioning
This project uses PyPA's `setuptools_scm` module to determine the
version number for build artifacts, meaning the version number is
derived from Git rather than hardcoded in the repository. For full
details, see the
[documentation for setuptools_scm](https://github.com/pypa/setuptools_scm/).
In brief, version numbers are generated as follows:
- If the current git head is tagged, the version number is exactly the
tag (e.g, `0.0.1`).
- If the the current git head is a clean checkout, but is not tagged,
the version number is a patch version increment of the most recent
tag, plus `devN` where N is the number of commits since the most
recent tag. For example, if there have been 5 commits since the
`0.0.1` tag, the generated version will be `0.0.2-dev5`.
- If the current head is not a clean checkout, a `+dirty` local
version will be appended to the version number. For example,
`0.0.2-dev5+dirty`.
At any point, you can manually run `python -m setuptools_scm` to see
what version would be assigned given your current state.
## Continuous Integration
This project uses Github Actions to run unit tests automatically upon
every commit to the main branch. See the documentation for Github
Actions and the flow definitions in `.github/workflows` for details.
## Continuous Delivery
Not yet implemented.
| |
190904
|
<p align="center">
<a href="https://trychroma.com"><img src="https://user-images.githubusercontent.com/891664/227103090-6624bf7d-9524-4e05-9d2c-c28d5d451481.png" alt="Chroma logo"></a>
</p>
<p align="center">
<b>Chroma - the open-source embedding database</b>. <br />
This package is for the the Python HTTP client-only library for Chroma. This client connects to the Chroma Server. If that it not what you are looking for, you might want to check out the <a href="https://github.com/chroma-core/chroma ">full library</a>.
</p>
```bash
pip install chromadb-client # python http-client only library
```
To connect to your server and perform operations using the client only library, you can do the following:
```python
import chromadb
# Example setup of the client to connect to your chroma server
client = chromadb.HttpClient(host="localhost", port=8000)
collection = client.create_collection("all-my-documents")
collection.add(
documents=["This is document1", "This is document2"],
metadatas=[{"source": "notion"}, {"source": "google-docs"}], # filter on these!
ids=["doc1", "doc2"], # unique for each doc
embeddings = [[1.2, 2.1, ...], [1.2, 2.1, ...]]
)
results = collection.query(
query_texts=["This is a query document"],
n_results=2,
# where={"metadata_field": "is_equal_to_this"}, # optional filter
# where_document={"$contains":"search_string"} # optional filter
)
```
## License
[Apache 2.0](./LICENSE)
| |
190974
|
## chromadb
Chroma is the open-source embedding database. Chroma makes it easy to build LLM apps by making knowledge, facts, and skills pluggable for LLMs.
This package gives you a JS/TS interface to talk to a backend Chroma DB over REST.
[Learn more about Chroma](https://github.com/chroma-core/chroma)
- [💬 Community Discord](https://discord.gg/MMeYNTmh3x)
- [📖 Documentation](https://docs.trychroma.com/)
- [💡 Colab Example](https://colab.research.google.com/drive/1QEzFyqnoFxq7LUGyP1vzR4iLt9PpCDXv?usp=sharing)
- [🏠 Homepage](https://www.trychroma.com/)
## Getting started
Chroma needs to be running in order for this client to talk to it. Please see the [🧪 Usage Guide](https://docs.trychroma.com/guides) to learn how to quickly stand this up.
## Small example
```js
import { ChromaClient } from "chromadb";
const chroma = new ChromaClient({ path: "http://localhost:8000" });
const collection = await chroma.createCollection({ name: "test-from-js" });
for (let i = 0; i < 20; i++) {
await collection.add({
ids: ["test-id-" + i.toString()],
embeddings: [1, 2, 3, 4, 5],
documents: ["test"],
});
}
const queryData = await collection.query({
queryEmbeddings: [1, 2, 3, 4, 5],
queryTexts: ["test"],
});
```
## Local development
[View the Development Readme](./DEVELOP.md)
## License
Apache 2.0
| |
190990
|
import {
afterAll,
beforeAll,
beforeEach,
describe,
expect,
test,
} from "@jest/globals";
import { DOCUMENTS, EMBEDDINGS, IDS, METADATAS } from "./data";
import { ChromaValueError, InvalidCollectionError } from "../src/Errors";
import { DefaultEmbeddingFunction } from "../src/embeddings/DefaultEmbeddingFunction";
import { StartedTestContainer } from "testcontainers";
import { ChromaClient } from "../src/ChromaClient";
import { startChromaContainer } from "./startChromaContainer";
describe("get collections", () => {
// connects to the unauthenticated chroma instance started in
// the global jest setup file.
const client = new ChromaClient({
path: process.env.DEFAULT_CHROMA_INSTANCE_URL,
});
beforeEach(async () => {
await client.reset();
// the sleep assures the db is fully reset
// this should be further investigated
await new Promise((r) => setTimeout(r, 1000));
});
test("it should get documents from a collection", async () => {
const collection = await client.createCollection({ name: "test" });
await collection.add({
ids: IDS,
embeddings: EMBEDDINGS,
metadatas: METADATAS,
});
const results = await collection.get({ ids: ["test1"] });
expect(results?.ids).toHaveLength(1);
expect(["test1"]).toEqual(expect.arrayContaining(results.ids));
expect(["test2"]).not.toEqual(expect.arrayContaining(results.ids));
expect(results.included).toEqual(
expect.arrayContaining(["metadatas", "documents"]),
);
const results2 = await collection.get({
where: { test: "test1" },
});
expect(results2?.ids).toHaveLength(1);
expect(["test1"]).toEqual(expect.arrayContaining(results2.ids));
});
test("wrong code returns an error", async () => {
const collection = await client.createCollection({ name: "test" });
await collection.add({
ids: IDS,
embeddings: EMBEDDINGS,
metadatas: METADATAS,
});
try {
await collection.get({
where: {
//@ts-ignore supposed to fail
test: { $contains: "hello" },
},
});
} catch (error: any) {
expect(error).toBeDefined();
expect(error).toBeInstanceOf(ChromaValueError);
expect(error.message).toMatchInlineSnapshot(
`"Expected where operator to be one of $gt, $gte, $lt, $lte, $ne, $eq, $in, $nin, got $contains"`,
);
}
});
test("it should get embedding with matching documents", async () => {
const collection = await client.createCollection({ name: "test" });
await collection.add({
ids: IDS,
embeddings: EMBEDDINGS,
metadatas: METADATAS,
documents: DOCUMENTS,
});
const results2 = await collection.get({
whereDocument: { $contains: "This is a test" },
});
expect(results2?.ids).toHaveLength(1);
expect(["test1"]).toEqual(expect.arrayContaining(results2.ids));
});
test("it should get records not matching", async () => {
const collection = await client.createCollection({ name: "test" });
await collection.add({
ids: IDS,
embeddings: EMBEDDINGS,
metadatas: METADATAS,
documents: DOCUMENTS,
});
const results2 = await collection.get({
whereDocument: { $not_contains: "This is another" },
});
expect(results2?.ids).toHaveLength(2);
expect(["test1", "test3"]).toEqual(expect.arrayContaining(results2.ids));
});
test("test gt, lt, in a simple small way", async () => {
const collection = await client.createCollection({ name: "test" });
await collection.add({
ids: IDS,
embeddings: EMBEDDINGS,
metadatas: METADATAS,
});
const items = await collection.get({
where: { float_value: { $gt: -1.4 } },
});
expect(items.ids).toHaveLength(2);
expect(["test2", "test3"]).toEqual(expect.arrayContaining(items.ids));
});
test("should error on non existing collection", async () => {
const collection = await client.createCollection({ name: "test" });
await client.deleteCollection({ name: "test" });
expect(async () => {
await collection.get({ ids: IDS });
}).rejects.toThrow(InvalidCollectionError);
});
test("it should throw an error if the collection does not exist", async () => {
await expect(
async () =>
await client.getCollection({
name: "test",
embeddingFunction: new DefaultEmbeddingFunction(),
}),
).rejects.toThrow(Error);
});
});
| |
191321
|
from typing import Dict, Optional
import logging
from chromadb.api.client import Client as ClientCreator
from chromadb.api.client import AdminClient as AdminClientCreator
from chromadb.api.async_client import AsyncClient as AsyncClientCreator
from chromadb.auth.token_authn import TokenTransportHeader
import chromadb.config
from chromadb.config import DEFAULT_DATABASE, DEFAULT_TENANT, Settings
from chromadb.api import AdminAPI, AsyncClientAPI, ClientAPI
from chromadb.api.models.Collection import Collection
from chromadb.api.types import (
CollectionMetadata,
Documents,
EmbeddingFunction,
Embeddings,
IDs,
Include,
Metadata,
Where,
QueryResult,
GetResult,
WhereDocument,
UpdateCollectionMetadata,
)
# Re-export types from chromadb.types
__all__ = [
"Collection",
"Metadata",
"Where",
"WhereDocument",
"Documents",
"IDs",
"Embeddings",
"EmbeddingFunction",
"Include",
"CollectionMetadata",
"UpdateCollectionMetadata",
"QueryResult",
"GetResult",
"TokenTransportHeader",
]
logger = logging.getLogger(__name__)
__settings = Settings()
__version__ = "0.5.15"
# Workaround to deal with Colab's old sqlite3 version
def is_in_colab() -> bool:
try:
import google.colab # noqa: F401
return True
except ImportError:
return False
IN_COLAB = is_in_colab()
is_client = False
try:
from chromadb.is_thin_client import is_thin_client
is_client = is_thin_client
except ImportError:
is_client = False
if not is_client:
import sqlite3
if sqlite3.sqlite_version_info < (3, 35, 0):
if IN_COLAB:
# In Colab, hotswap to pysqlite-binary if it's too old
import subprocess
import sys
subprocess.check_call(
[sys.executable, "-m", "pip", "install", "pysqlite3-binary"]
)
__import__("pysqlite3")
sys.modules["sqlite3"] = sys.modules.pop("pysqlite3")
else:
raise RuntimeError(
"\033[91mYour system has an unsupported version of sqlite3. Chroma \
requires sqlite3 >= 3.35.0.\033[0m\n"
"\033[94mPlease visit \
https://docs.trychroma.com/troubleshooting#sqlite to learn how \
to upgrade.\033[0m"
)
def configure(**kwargs) -> None: # type: ignore
"""Override Chroma's default settings, environment variables or .env files"""
global __settings
__settings = chromadb.config.Settings(**kwargs)
def get_settings() -> Settings:
return __settings
def EphemeralClient(
settings: Optional[Settings] = None,
tenant: str = DEFAULT_TENANT,
database: str = DEFAULT_DATABASE,
) -> ClientAPI:
"""
Creates an in-memory instance of Chroma. This is useful for testing and
development, but not recommended for production use.
Args:
tenant: The tenant to use for this client. Defaults to the default tenant.
database: The database to use for this client. Defaults to the default database.
"""
if settings is None:
settings = Settings()
settings.is_persistent = False
# Make sure paramaters are the correct types -- users can pass anything.
tenant = str(tenant)
database = str(database)
return ClientCreator(settings=settings, tenant=tenant, database=database)
def PersistentClient(
path: str = "./chroma",
settings: Optional[Settings] = None,
tenant: str = DEFAULT_TENANT,
database: str = DEFAULT_DATABASE,
) -> ClientAPI:
"""
Creates a persistent instance of Chroma that saves to disk. This is useful for
testing and development, but not recommended for production use.
Args:
path: The directory to save Chroma's data to. Defaults to "./chroma".
tenant: The tenant to use for this client. Defaults to the default tenant.
database: The database to use for this client. Defaults to the default database.
"""
if settings is None:
settings = Settings()
settings.persist_directory = path
settings.is_persistent = True
# Make sure paramaters are the correct types -- users can pass anything.
tenant = str(tenant)
database = str(database)
return ClientCreator(tenant=tenant, database=database, settings=settings)
def HttpClient(
host: str = "localhost",
port: int = 8000,
ssl: bool = False,
headers: Optional[Dict[str, str]] = None,
settings: Optional[Settings] = None,
tenant: str = DEFAULT_TENANT,
database: str = DEFAULT_DATABASE,
) -> ClientAPI:
"""
Creates a client that connects to a remote Chroma server. This supports
many clients connecting to the same server, and is the recommended way to
use Chroma in production.
Args:
host: The hostname of the Chroma server. Defaults to "localhost".
port: The port of the Chroma server. Defaults to "8000".
ssl: Whether to use SSL to connect to the Chroma server. Defaults to False.
headers: A dictionary of headers to send to the Chroma server. Defaults to {}.
settings: A dictionary of settings to communicate with the chroma server.
tenant: The tenant to use for this client. Defaults to the default tenant.
database: The database to use for this client. Defaults to the default database.
"""
if settings is None:
settings = Settings()
# Make sure parameters are the correct types -- users can pass anything.
host = str(host)
port = int(port)
ssl = bool(ssl)
tenant = str(tenant)
database = str(database)
settings.chroma_api_impl = "chromadb.api.fastapi.FastAPI"
if settings.chroma_server_host and settings.chroma_server_host != host:
raise ValueError(
f"Chroma server host provided in settings[{settings.chroma_server_host}] is different to the one provided in HttpClient: [{host}]"
)
settings.chroma_server_host = host
if settings.chroma_server_http_port and settings.chroma_server_http_port != port:
raise ValueError(
f"Chroma server http port provided in settings[{settings.chroma_server_http_port}] is different to the one provided in HttpClient: [{port}]"
)
settings.chroma_server_http_port = port
settings.chroma_server_ssl_enabled = ssl
settings.chroma_server_headers = headers
return ClientCreator(tenant=tenant, database=database, settings=settings)
async def AsyncHttpClient(
host: str = "localhost",
port: int = 8000,
ssl: bool = False,
headers: Optional[Dict[str, str]] = None,
settings: Optional[Settings] = None,
tenant: str = DEFAULT_TENANT,
database: str = DEFAULT_DATABASE,
) -> AsyncClientAPI:
"""
Creates an async client that connects to a remote Chroma server. This supports
many clients connecting to the same server, and is the recommended way to
use Chroma in production.
Args:
host: The hostname of the Chroma server. Defaults to "localhost".
port: The port of the Chroma server. Defaults to "8000".
ssl: Whether to use SSL to connect to the Chroma server. Defaults to False.
headers: A dictionary of headers to send to the Chroma server. Defaults to {}.
settings: A dictionary of settings to communicate with the chroma server.
tenant: The tenant to use for this client. Defaults to the default tenant.
database: The database to use for this client. Defaults to the default database.
"""
if settings is None:
settings = Settings()
# Make sure parameters are the correct types -- users can pass anything.
host = str(host)
port = int(port)
ssl = bool(ssl)
tenant = str(tenant)
database = str(database)
settings.chroma_api_impl = "chromadb.api.async_fastapi.AsyncFastAPI"
if settings.chroma_server_host and settings.chroma_server_host != host:
raise ValueError(
f"Chroma server host provided in settings[{settings.chroma_server_host}] is different to the one provided in HttpClient: [{host}]"
)
settings.chroma_server_host = host
if settings.chroma_server_http_port and settings.chroma_server_http_port != port:
raise ValueError(
f"Chroma server http port provided in settings[{settings.chroma_server_http_port}] is different to the one provided in HttpClient: [{port}]"
)
settings.chroma_server_http_port = port
settings.chroma_server_ssl_enabled = ssl
settings.chroma_server_headers = headers
return await AsyncClientCreator.create(
tenant=tenant, database=database, settings=settings
)
| |
191325
|
from abc import abstractmethod
from typing import Dict, Optional, Type
from overrides import overrides, EnforceOverrides
class ChromaError(Exception, EnforceOverrides):
trace_id: Optional[str] = None
def code(self) -> int:
"""Return an appropriate HTTP response code for this error"""
return 400 # Bad Request
def message(self) -> str:
return ", ".join(self.args)
@classmethod
@abstractmethod
def name(cls) -> str:
"""Return the error name"""
pass
class InvalidDimensionException(ChromaError):
@classmethod
@overrides
def name(cls) -> str:
return "InvalidDimension"
class InvalidCollectionException(ChromaError):
@classmethod
@overrides
def name(cls) -> str:
return "InvalidCollection"
class IDAlreadyExistsError(ChromaError):
@overrides
def code(self) -> int:
return 409 # Conflict
@classmethod
@overrides
def name(cls) -> str:
return "IDAlreadyExists"
class ChromaAuthError(ChromaError):
@overrides
def code(self) -> int:
return 403
@classmethod
@overrides
def name(cls) -> str:
return "AuthError"
@overrides
def message(self) -> str:
return "Forbidden"
class DuplicateIDError(ChromaError):
@classmethod
@overrides
def name(cls) -> str:
return "DuplicateID"
class InvalidArgumentError(ChromaError):
@overrides
def code(self) -> int:
return 400
@classmethod
@overrides
def name(cls) -> str:
return "InvalidArgument"
class InvalidUUIDError(ChromaError):
@classmethod
@overrides
def name(cls) -> str:
return "InvalidUUID"
class InvalidHTTPVersion(ChromaError):
@classmethod
@overrides
def name(cls) -> str:
return "InvalidHTTPVersion"
class AuthorizationError(ChromaError):
@overrides
def code(self) -> int:
return 401
@classmethod
@overrides
def name(cls) -> str:
return "AuthorizationError"
class NotFoundError(ChromaError):
@overrides
def code(self) -> int:
return 404
@classmethod
@overrides
def name(cls) -> str:
return "NotFoundError"
class UniqueConstraintError(ChromaError):
@overrides
def code(self) -> int:
return 409
@classmethod
@overrides
def name(cls) -> str:
return "UniqueConstraintError"
class BatchSizeExceededError(ChromaError):
@overrides
def code(self) -> int:
return 413
@classmethod
@overrides
def name(cls) -> str:
return "BatchSizeExceededError"
class VersionMismatchError(ChromaError):
@overrides
def code(self) -> int:
return 500
@classmethod
@overrides
def name(cls) -> str:
return "VersionMismatchError"
class RateLimitError(ChromaError):
@overrides
def code(self) -> int:
return 429
@classmethod
@overrides
def name(cls) -> str:
return "RateLimitError"
error_types: Dict[str, Type[ChromaError]] = {
"InvalidDimension": InvalidDimensionException,
"InvalidCollection": InvalidCollectionException,
"IDAlreadyExists": IDAlreadyExistsError,
"DuplicateID": DuplicateIDError,
"InvalidUUID": InvalidUUIDError,
"InvalidHTTPVersion": InvalidHTTPVersion,
"AuthorizationError": AuthorizationError,
"NotFoundError": NotFoundError,
"BatchSizeExceededError": BatchSizeExceededError,
"VersionMismatchError": VersionMismatchError,
"RateLimitError": RateLimitError,
"AuthError": ChromaAuthError,
}
| |
191408
|
from typing import Any, Dict, List, Optional, cast
from hypothesis import given, settings, HealthCheck
import pytest
from chromadb.api import ClientAPI
from chromadb.test.property import invariants
from chromadb.api.types import (
Document,
Embedding,
Embeddings,
GetResult,
IDs,
Metadata,
Metadatas,
Where,
WhereDocument,
)
from chromadb.test.conftest import reset, NOT_CLUSTER_ONLY
import chromadb.test.property.strategies as strategies
import hypothesis.strategies as st
import logging
import random
import re
from chromadb.test.utils.wait_for_version_increase import wait_for_version_increase
import numpy as np
def _filter_where_clause(clause: Where, metadata: Optional[Metadata]) -> bool:
"""Return true if the where clause is true for the given metadata map"""
metadata = metadata or dict()
key, expr = list(clause.items())[0]
# Handle the shorthand for equal: {key: val} where val is a simple value
if (
isinstance(expr, str)
or isinstance(expr, bool)
or isinstance(expr, int)
or isinstance(expr, float)
):
return _filter_where_clause({key: {"$eq": expr}}, metadata) # type: ignore[dict-item]
# expr is a list of clauses
if key == "$and":
assert isinstance(expr, list)
return all(_filter_where_clause(clause, metadata) for clause in expr)
if key == "$or":
assert isinstance(expr, list)
return any(_filter_where_clause(clause, metadata) for clause in expr)
# expr is an operator expression
assert isinstance(expr, dict)
op, val = list(expr.items())[0]
assert isinstance(metadata, dict)
if op == "$eq":
return key in metadata and metadata[key] == val
elif op == "$ne":
return key not in metadata or metadata[key] != val
elif op == "$in":
return key in metadata and metadata[key] in val # type: ignore[operator]
elif op == "$nin":
return key not in metadata or metadata[key] not in val # type: ignore[operator]
# The following conditions only make sense for numeric values
assert (
key not in metadata
or isinstance(metadata[key], int)
or isinstance(metadata[key], float)
)
assert isinstance(val, int) or isinstance(val, float)
if op == "$gt":
return key in metadata and metadata[key] > val
elif op == "$gte":
return key in metadata and metadata[key] >= val
elif op == "$lt":
return key in metadata and metadata[key] < val
elif op == "$lte":
return key in metadata and metadata[key] <= val
else:
raise ValueError("Unknown operator: {}".format(key))
def _filter_where_doc_clause(clause: WhereDocument, doc: Document) -> bool:
key, expr = list(clause.items())[0]
if key == "$and":
assert isinstance(expr, list)
return all(_filter_where_doc_clause(clause, doc) for clause in expr)
if key == "$or":
assert isinstance(expr, list)
return any(_filter_where_doc_clause(clause, doc) for clause in expr)
# Simple $contains clause
assert isinstance(expr, str)
if key == "$contains":
if not doc:
return False
# SQLite FTS handles % and _ as word boundaries that are ignored so we need to
# treat them as wildcards
if "%" in expr or "_" in expr:
expr = expr.replace("%", ".").replace("_", ".")
return re.search(expr, doc) is not None
return expr in doc
elif key == "$not_contains":
if not doc:
return True
# SQLite FTS handles % and _ as word boundaries that are ignored so we need to
# treat them as wildcards
if "%" in expr or "_" in expr:
expr = expr.replace("%", ".").replace("_", ".")
return re.search(expr, doc) is None
return expr not in doc
else:
raise ValueError("Unknown operator: {}".format(key))
EMPTY_DICT: Dict[Any, Any] = {}
EMPTY_STRING: str = ""
def _filter_embedding_set(
record_set: strategies.RecordSet, filter: strategies.Filter
) -> IDs:
"""Return IDs from the embedding set that match the given filter object"""
normalized_record_set = invariants.wrap_all(record_set)
ids = set(normalized_record_set["ids"])
filter_ids = filter["ids"]
if filter_ids is not None:
filter_ids = invariants.wrap(filter_ids)
assert filter_ids is not None
# If the filter ids is an empty list then we treat that as get all
if len(filter_ids) != 0:
ids = ids.intersection(filter_ids)
for i in range(len(normalized_record_set["ids"])):
if filter["where"]:
metadatas: Metadatas
if isinstance(normalized_record_set["metadatas"], list):
metadatas = normalized_record_set["metadatas"] # type: ignore[assignment]
else:
metadatas = [EMPTY_DICT] * len(normalized_record_set["ids"])
filter_where: Where = filter["where"]
if not _filter_where_clause(filter_where, metadatas[i]):
ids.discard(normalized_record_set["ids"][i])
if filter["where_document"]:
documents = normalized_record_set["documents"] or [EMPTY_STRING] * len(
normalized_record_set["ids"]
)
if not _filter_where_doc_clause(filter["where_document"], documents[i]):
ids.discard(normalized_record_set["ids"][i])
return list(ids)
collection_st = st.shared(
strategies.collections(add_filterable_data=True, with_hnsw_params=True),
key="coll",
)
recordset_st = st.shared(
strategies.recordsets(collection_st, max_size=1000), key="recordset"
)
@settings(
deadline=90000,
suppress_health_check=[
HealthCheck.function_scoped_fixture,
HealthCheck.large_base_example,
HealthCheck.filter_too_much,
],
) # type: ignore
@given(
collection=collection_st,
record_set=recordset_st,
filters=st.lists(strategies.filters(collection_st, recordset_st), min_size=1),
should_compact=st.booleans(),
)
def test_filterable_metadata_get(
caplog,
client: ClientAPI,
collection: strategies.Collection,
record_set,
filters,
should_compact: bool,
) -> None:
caplog.set_level(logging.ERROR)
reset(client)
coll = client.create_collection(
name=collection.name,
metadata=collection.metadata, # type: ignore
embedding_function=collection.embedding_function,
)
initial_version = coll.get_model()["version"]
coll.add(**record_set)
if not NOT_CLUSTER_ONLY:
# Only wait for compaction if the size of the collection is
# some minimal size
if should_compact and len(invariants.wrap(record_set["ids"])) > 10:
# Wait for the model to be updated
wait_for_version_increase(client, collection.name, initial_version) # type: ignore
for filter in filters:
result_ids = coll.get(**filter)["ids"]
expected_ids = _filter_embedding_set(record_set, filter)
assert sorted(result_ids) == sorted(expected_ids)
| |
191516
|
import logging
from typing import Mapping, Optional, cast
from chromadb.api.types import Documents, EmbeddingFunction, Embeddings
logger = logging.getLogger(__name__)
class OpenAIEmbeddingFunction(EmbeddingFunction[Documents]):
def __init__(
self,
api_key: Optional[str] = None,
model_name: str = "text-embedding-ada-002",
organization_id: Optional[str] = None,
api_base: Optional[str] = None,
api_type: Optional[str] = None,
api_version: Optional[str] = None,
deployment_id: Optional[str] = None,
default_headers: Optional[Mapping[str, str]] = None,
dimensions: Optional[int] = None,
):
"""
Initialize the OpenAIEmbeddingFunction.
Args:
api_key (str, optional): Your API key for the OpenAI API. If not
provided, it will raise an error to provide an OpenAI API key.
organization_id(str, optional): The OpenAI organization ID if applicable
model_name (str, optional): The name of the model to use for text
embeddings. Defaults to "text-embedding-ada-002".
api_base (str, optional): The base path for the API. If not provided,
it will use the base path for the OpenAI API. This can be used to
point to a different deployment, such as an Azure deployment.
api_type (str, optional): The type of the API deployment. This can be
used to specify a different deployment, such as 'azure'. If not
provided, it will use the default OpenAI deployment.
api_version (str, optional): The api version for the API. If not provided,
it will use the api version for the OpenAI API. This can be used to
point to a different deployment, such as an Azure deployment.
deployment_id (str, optional): Deployment ID for Azure OpenAI.
default_headers (Mapping, optional): A mapping of default headers to be sent with each API request.
dimensions (int, optional): The number of dimensions for the embeddings.
Only supported for `text-embedding-3` or later models from OpenAI.
https://platform.openai.com/docs/api-reference/embeddings/create#embeddings-create-dimensions
"""
try:
import openai
except ImportError:
raise ValueError(
"The openai python package is not installed. Please install it with `pip install openai`"
)
if api_key is not None:
openai.api_key = api_key
# If the api key is still not set, raise an error
elif openai.api_key is None:
raise ValueError(
"Please provide an OpenAI API key. You can get one at https://platform.openai.com/account/api-keys"
)
if api_base is not None:
openai.api_base = api_base
if api_version is not None:
openai.api_version = api_version
self._api_type = api_type
if api_type is not None:
openai.api_type = api_type
if organization_id is not None:
openai.organization = organization_id
self._v1 = openai.__version__.startswith("1.")
if self._v1:
if api_type == "azure":
self._client = openai.AzureOpenAI(
api_key=api_key,
api_version=api_version,
azure_endpoint=api_base,
default_headers=default_headers,
).embeddings
else:
self._client = openai.OpenAI(
api_key=api_key, base_url=api_base, default_headers=default_headers
).embeddings
else:
self._client = openai.Embedding
self._model_name = model_name
self._deployment_id = deployment_id
self._dimensions = dimensions or openai.NOT_GIVEN
def __call__(self, input: Documents) -> Embeddings:
"""
Generate the embeddings for the given `input`.
# About ignoring types
We are not enforcing the openai library, therefore, `mypy` has hard times trying
to figure out what the types are for `self._client.create()` which throws an
error when trying to sort the list. If, eventually we include the `openai` lib
we can remove the type ignore tag.
Args:
input (Documents): A list of texts to get embeddings for.
Returns:
Embeddings: The embeddings for the given input sorted by index
"""
# replace newlines, which can negatively affect performance.
input = [t.replace("\n", " ") for t in input]
# Call the OpenAI Embedding API
if self._v1:
embeddings = self._client.create(
input=input,
model=self._deployment_id or self._model_name,
dimensions=self._dimensions,
).data
# Sort resulting embeddings by index
sorted_embeddings = sorted(
embeddings, key=lambda e: e.index # type: ignore
)
# Return just the embeddings
return cast(Embeddings, [result.embedding for result in sorted_embeddings])
else:
if self._api_type == "azure":
embeddings = self._client.create(
input=input, engine=self._deployment_id or self._model_name
)["data"]
else:
embeddings = self._client.create(input=input, model=self._model_name)[
"data"
]
# Sort resulting embeddings by index
sorted_embeddings = sorted(
embeddings, key=lambda e: e["index"] # type: ignore
)
# Return just the embeddings
return cast(
Embeddings, [result["embedding"] for result in sorted_embeddings]
)
| |
191518
|
import logging
from typing import Any, Dict, cast
from chromadb.api.types import Documents, EmbeddingFunction, Embeddings
logger = logging.getLogger(__name__)
class SentenceTransformerEmbeddingFunction(EmbeddingFunction[Documents]):
# Since we do dynamic imports we have to type this as Any
models: Dict[str, Any] = {}
# If you have a beefier machine, try "gtr-t5-large".
# for a full list of options: https://huggingface.co/sentence-transformers, https://www.sbert.net/docs/pretrained_models.html
def __init__(
self,
model_name: str = "all-MiniLM-L6-v2",
device: str = "cpu",
normalize_embeddings: bool = False,
**kwargs: Any,
):
"""Initialize SentenceTransformerEmbeddingFunction.
Args:
model_name (str, optional): Identifier of the SentenceTransformer model, defaults to "all-MiniLM-L6-v2"
device (str, optional): Device used for computation, defaults to "cpu"
normalize_embeddings (bool, optional): Whether to normalize returned vectors, defaults to False
**kwargs: Additional arguments to pass to the SentenceTransformer model.
"""
if model_name not in self.models:
try:
from sentence_transformers import SentenceTransformer
except ImportError:
raise ValueError(
"The sentence_transformers python package is not installed. Please install it with `pip install sentence_transformers`"
)
self.models[model_name] = SentenceTransformer(
model_name, device=device, **kwargs
)
self._model = self.models[model_name]
self._normalize_embeddings = normalize_embeddings
def __call__(self, input: Documents) -> Embeddings:
return cast(
Embeddings,
[
embedding
for embedding in self._model.encode(
list(input),
convert_to_numpy=True,
normalize_embeddings=self._normalize_embeddings,
)
],
)
| |
191566
|
from typing import Optional, Union, TypeVar, List, Dict, Any, Tuple, cast
from numpy.typing import NDArray
import numpy as np
from typing_extensions import TypedDict, Protocol, runtime_checkable
from enum import Enum
from pydantic import Field
import chromadb.errors as errors
from chromadb.types import (
Metadata,
UpdateMetadata,
Vector,
PyVector,
LiteralValue,
LogicalOperator,
WhereOperator,
OperatorExpression,
Where,
WhereDocumentOperator,
WhereDocument,
)
from inspect import signature
from tenacity import retry
# Re-export types from chromadb.types
__all__ = ["Metadata", "Where", "WhereDocument", "UpdateCollectionMetadata"]
META_KEY_CHROMA_DOCUMENT = "chroma:document"
T = TypeVar("T")
OneOrMany = Union[T, List[T]]
# URIs
URI = str
URIs = List[URI]
def maybe_cast_one_to_many_uri(target: OneOrMany[URI]) -> URIs:
if isinstance(target, str):
# One URI
return cast(URIs, [target])
# Already a sequence
return cast(URIs, target)
# IDs
ID = str
IDs = List[ID]
def maybe_cast_one_to_many_ids(target: OneOrMany[ID]) -> IDs:
if isinstance(target, str):
# One ID
return cast(IDs, [target])
# Already a sequence
return cast(IDs, target)
# Embeddings
PyEmbedding = PyVector
PyEmbeddings = List[PyEmbedding]
Embedding = Vector
Embeddings = List[Embedding]
def maybe_cast_one_to_many_embedding(
target: Union[OneOrMany[Embedding], OneOrMany[PyEmbedding]]
) -> Embeddings:
if isinstance(target, List):
# One Embedding
if isinstance(target[0], (int, float)):
return cast(Embeddings, [target])
elif isinstance(target, np.ndarray):
if isinstance(target[0], (np.floating, np.integer)):
return cast(Embeddings, [target])
# Already a sequence
return cast(Embeddings, target)
# Metadatas
Metadatas = List[Metadata]
def maybe_cast_one_to_many_metadata(target: OneOrMany[Metadata]) -> Metadatas:
# One Metadata dict
if isinstance(target, dict):
return cast(Metadatas, [target])
# Already a sequence
return cast(Metadatas, target)
CollectionMetadata = Dict[str, Any]
UpdateCollectionMetadata = UpdateMetadata
# Documents
Document = str
Documents = List[Document]
def is_document(target: Any) -> bool:
if not isinstance(target, str):
return False
return True
def maybe_cast_one_to_many_document(target: OneOrMany[Document]) -> Documents:
# One Document
if is_document(target):
return cast(Documents, [target])
# Already a sequence
return cast(Documents, target)
# Images
ImageDType = Union[np.uint, np.int64, np.float64]
Image = NDArray[ImageDType]
Images = List[Image]
def is_image(target: Any) -> bool:
if not isinstance(target, np.ndarray):
return False
if len(target.shape) < 2:
return False
return True
def maybe_cast_one_to_many_image(target: OneOrMany[Image]) -> Images:
if is_image(target):
return cast(Images, [target])
# Already a sequence
return cast(Images, target)
Parameter = TypeVar("Parameter", Document, Image, Embedding, Metadata, ID)
class IncludeEnum(str, Enum):
documents = "documents"
embeddings = "embeddings"
metadatas = "metadatas"
distances = "distances"
uris = "uris"
data = "data"
# This should ust be List[Literal["documents", "embeddings", "metadatas", "distances"]]
# However, this provokes an incompatibility with the Overrides library and Python 3.7
Include = List[IncludeEnum]
IncludeMetadataDocuments = Field(default=["metadatas", "documents"])
IncludeMetadataDocumentsEmbeddings = Field(
default=["metadatas", "documents", "embeddings"]
)
IncludeMetadataDocumentsEmbeddingsDistances = Field(
default=["metadatas", "documents", "embeddings", "distances"]
)
IncludeMetadataDocumentsDistances = Field(
default=["metadatas", "documents", "distances"]
)
# Re-export types from chromadb.types
LiteralValue = LiteralValue
LogicalOperator = LogicalOperator
WhereOperator = WhereOperator
OperatorExpression = OperatorExpression
Where = Where
WhereDocumentOperator = WhereDocumentOperator
Embeddable = Union[Documents, Images]
D = TypeVar("D", bound=Embeddable, contravariant=True)
Loadable = List[Optional[Image]]
L = TypeVar("L", covariant=True, bound=Loadable)
class GetResult(TypedDict):
ids: List[ID]
embeddings: Optional[
Union[Embeddings, PyEmbeddings, NDArray[Union[np.int32, np.float32]]]
]
documents: Optional[List[Document]]
uris: Optional[URIs]
data: Optional[Loadable]
metadatas: Optional[List[Metadata]]
included: Include
class QueryResult(TypedDict):
ids: List[IDs]
embeddings: Optional[
Union[
List[Embeddings],
List[PyEmbeddings],
List[NDArray[Union[np.int32, np.float32]]],
]
]
documents: Optional[List[List[Document]]]
uris: Optional[List[List[URI]]]
data: Optional[List[Loadable]]
metadatas: Optional[List[List[Metadata]]]
distances: Optional[List[List[float]]]
included: Include
class IndexMetadata(TypedDict):
dimensionality: int
# The current number of elements in the index (total = additions - deletes)
curr_elements: int
# The auto-incrementing ID of the last inserted element, never decreases so
# can be used as a count of total historical size. Should increase by 1 every add.
# Assume cannot overflow
total_elements_added: int
time_created: float
@runtime_checkable
class EmbeddingFunction(Protocol[D]):
def __call__(self, input: D) -> Embeddings:
...
def __init_subclass__(cls) -> None:
super().__init_subclass__()
# Raise an exception if __call__ is not defined since it is expected to be defined
call = getattr(cls, "__call__")
def __call__(self: EmbeddingFunction[D], input: D) -> Embeddings:
result = call(self, input)
return validate_embeddings(
normalize_embeddings(maybe_cast_one_to_many_embedding(result))
)
setattr(cls, "__call__", __call__)
def embed_with_retries(
self, input: D, **retry_kwargs: Dict[str, Any]
) -> Embeddings:
return cast(Embeddings, retry(**retry_kwargs)(self.__call__)(input))
def normalize_embeddings(
embeddings: Union[
OneOrMany[Embedding],
OneOrMany[PyEmbedding],
]
) -> Embeddings:
return cast(Embeddings, [np.array(embedding) for embedding in embeddings])
def validate_embedding_function(
embedding_function: EmbeddingFunction[Embeddable],
) -> None:
function_signature = signature(
embedding_function.__class__.__call__
).parameters.keys()
protocol_signature = signature(EmbeddingFunction.__call__).parameters.keys()
if not function_signature == protocol_signature:
raise ValueError(
f"Expected EmbeddingFunction.__call__ to have the following signature: {protocol_signature}, got {function_signature}\n"
"Please see https://docs.trychroma.com/guides/embeddings for details of the EmbeddingFunction interface.\n"
"Please note the recent change to the EmbeddingFunction interface: https://docs.trychroma.com/deployment/migration#migration-to-0.4.16---november-7,-2023 \n"
)
class DataLoader(Protocol[L]):
def __call__(self, uris: URIs) -> L:
...
| |
191587
|
class AsyncCollection(CollectionCommon["AsyncServerAPI"]):
async def add(
self,
ids: OneOrMany[ID],
embeddings: Optional[
Union[
OneOrMany[Embedding],
OneOrMany[PyEmbedding],
]
] = None,
metadatas: Optional[OneOrMany[Metadata]] = None,
documents: Optional[OneOrMany[Document]] = None,
images: Optional[OneOrMany[Image]] = None,
uris: Optional[OneOrMany[URI]] = None,
) -> None:
"""Add embeddings to the data store.
Args:
ids: The ids of the embeddings you wish to add
embeddings: The embeddings to add. If None, embeddings will be computed based on the documents or images using the embedding_function set for the Collection. Optional.
metadatas: The metadata to associate with the embeddings. When querying, you can filter on this metadata. Optional.
documents: The documents to associate with the embeddings. Optional.
images: The images to associate with the embeddings. Optional.
uris: The uris of the images to associate with the embeddings. Optional.
Returns:
None
Raises:
ValueError: If you don't provide either embeddings or documents
ValueError: If the length of ids, embeddings, metadatas, or documents don't match
ValueError: If you don't provide an embedding function and don't provide embeddings
ValueError: If you provide both embeddings and documents
ValueError: If you provide an id that already exists
"""
(
ids,
embeddings,
metadatas,
documents,
uris,
) = self._validate_and_prepare_embedding_set(
ids, embeddings, metadatas, documents, images, uris
)
await self._client._add(ids, self.id, embeddings, metadatas, documents, uris)
async def count(self) -> int:
"""The total number of embeddings added to the database
Returns:
int: The total number of embeddings added to the database
"""
return await self._client._count(collection_id=self.id)
async def get(
self,
ids: Optional[OneOrMany[ID]] = None,
where: Optional[Where] = None,
limit: Optional[int] = None,
offset: Optional[int] = None,
where_document: Optional[WhereDocument] = None,
include: Include = ["metadatas", "documents"],
) -> GetResult:
"""Get embeddings and their associate data from the data store. If no ids or where filter is provided returns
all embeddings up to limit starting at offset.
Args:
ids: The ids of the embeddings to get. Optional.
where: A Where type dict used to filter results by. E.g. `{"$and": ["color" : "red", "price": {"$gte": 4.20}]}`. Optional.
limit: The number of documents to return. Optional.
offset: The offset to start returning results from. Useful for paging results with limit. Optional.
where_document: A WhereDocument type dict used to filter by the documents. E.g. `{$contains: {"text": "hello"}}`. Optional.
include: A list of what to include in the results. Can contain `"embeddings"`, `"metadatas"`, `"documents"`. Ids are always included. Defaults to `["metadatas", "documents"]`. Optional.
Returns:
GetResult: A GetResult object containing the results.
"""
(
valid_ids,
valid_where,
valid_where_document,
valid_include,
) = self._validate_and_prepare_get_request(ids, where, where_document, include)
get_results = await self._client._get(
self.id,
valid_ids,
valid_where,
None,
limit,
offset,
where_document=valid_where_document,
include=valid_include,
)
return self._transform_get_response(get_results, valid_include)
async def peek(self, limit: int = 10) -> GetResult:
"""Get the first few results in the database up to limit
Args:
limit: The number of results to return.
Returns:
GetResult: A GetResult object containing the results.
"""
return self._transform_peek_response(await self._client._peek(self.id, limit))
async def query(
self,
query_embeddings: Optional[
Union[
OneOrMany[Embedding],
OneOrMany[np.ndarray],
]
] = None,
query_texts: Optional[OneOrMany[Document]] = None,
query_images: Optional[OneOrMany[Image]] = None,
query_uris: Optional[OneOrMany[URI]] = None,
n_results: int = 10,
where: Optional[Where] = None,
where_document: Optional[WhereDocument] = None,
include: Include = ["metadatas", "documents", "distances"],
) -> QueryResult:
"""Get the n_results nearest neighbor embeddings for provided query_embeddings or query_texts.
Args:
query_embeddings: The embeddings to get the closes neighbors of. Optional.
query_texts: The document texts to get the closes neighbors of. Optional.
query_images: The images to get the closes neighbors of. Optional.
n_results: The number of neighbors to return for each query_embedding or query_texts. Optional.
where: A Where type dict used to filter results by. E.g. `{"$and": ["color" : "red", "price": {"$gte": 4.20}]}`. Optional.
where_document: A WhereDocument type dict used to filter by the documents. E.g. `{$contains: {"text": "hello"}}`. Optional.
include: A list of what to include in the results. Can contain `"embeddings"`, `"metadatas"`, `"documents"`, `"distances"`. Ids are always included. Defaults to `["metadatas", "documents", "distances"]`. Optional.
Returns:
QueryResult: A QueryResult object containing the results.
Raises:
ValueError: If you don't provide either query_embeddings, query_texts, or query_images
ValueError: If you provide both query_embeddings and query_texts
ValueError: If you provide both query_embeddings and query_images
ValueError: If you provide both query_texts and query_images
"""
(
valid_query_embeddings,
valid_n_results,
valid_where,
valid_where_document,
) = self._validate_and_prepare_query_request(
query_embeddings,
query_texts,
query_images,
query_uris,
n_results,
where,
where_document,
include,
)
query_results = await self._client._query(
collection_id=self.id,
query_embeddings=valid_query_embeddings,
n_results=valid_n_results,
where=valid_where,
where_document=valid_where_document,
include=include,
)
return self._transform_query_response(query_results, include)
async def modify(
self, name: Optional[str] = None, metadata: Optional[CollectionMetadata] = None
) -> None:
"""Modify the collection name or metadata
Args:
name: The updated name for the collection. Optional.
metadata: The updated metadata for the collection. Optional.
Returns:
None
"""
self._validate_modify_request(metadata)
# Note there is a race condition here where the metadata can be updated
# but another thread sees the cached local metadata.
# TODO: fixme
await self._client._modify(id=self.id, new_name=name, new_metadata=metadata)
self._update_model_after_modify_success(name, metadata)
async def update(
self,
ids: OneOrMany[ID],
embeddings: Optional[
Union[
OneOrMany[Embedding],
OneOrMany[np.ndarray],
]
] = None,
metadatas: Optional[OneOrMany[Metadata]] = None,
documents: Optional[OneOrMany[Document]] = None,
images: Optional[OneOrMany[Image]] = None,
uris: Optional[OneOrMany[URI]] = None,
) -> None:
"""Update the embeddings, metadatas or documents for provided ids.
Args:
ids: The ids of the embeddings to update
embeddings: The embeddings to update. If None, embeddings will be computed based on the documents or images using the embedding_function set for the Collection. Optional.
metadatas: The metadata to associate with the embeddings. When querying, you can filter on this metadata. Optional.
documents: The documents to associate with the embeddings. Optional.
images: The images to associate with the embeddings. Optional.
Returns:
None
"""
(
ids,
embeddings,
metadatas,
documents,
uris,
) = self._validate_and_prepare_update_request(
ids, embeddings, metadatas, documents, images, uris
)
await self._client._update(self.id, ids, embeddings, metadatas, documents, uris)
| |
191592
|
from typing import TYPE_CHECKING, Optional, Union
import numpy as np
from chromadb.api.models.CollectionCommon import CollectionCommon
from chromadb.api.types import (
URI,
CollectionMetadata,
Embedding,
PyEmbedding,
Include,
Metadata,
Document,
Image,
Where,
IDs,
GetResult,
QueryResult,
ID,
OneOrMany,
WhereDocument,
)
import logging
logger = logging.getLogger(__name__)
if TYPE_CHECKING:
from chromadb.api import ServerAPI # noqa: F401
class Collection(CollectionCommon["ServerAPI"]):
def count(self) -> int:
"""The total number of embeddings added to the database
Returns:
int: The total number of embeddings added to the database
"""
return self._client._count(collection_id=self.id)
def add(
self,
ids: OneOrMany[ID],
embeddings: Optional[ # type: ignore[type-arg]
Union[
OneOrMany[Embedding],
OneOrMany[PyEmbedding],
]
] = None,
metadatas: Optional[OneOrMany[Metadata]] = None,
documents: Optional[OneOrMany[Document]] = None,
images: Optional[OneOrMany[Image]] = None,
uris: Optional[OneOrMany[URI]] = None,
) -> None:
"""Add embeddings to the data store.
Args:
ids: The ids of the embeddings you wish to add
embeddings: The embeddings to add. If None, embeddings will be computed based on the documents or images using the embedding_function set for the Collection. Optional.
metadatas: The metadata to associate with the embeddings. When querying, you can filter on this metadata. Optional.
documents: The documents to associate with the embeddings. Optional.
images: The images to associate with the embeddings. Optional.
uris: The uris of the images to associate with the embeddings. Optional.
Returns:
None
Raises:
ValueError: If you don't provide either embeddings or documents
ValueError: If the length of ids, embeddings, metadatas, or documents don't match
ValueError: If you don't provide an embedding function and don't provide embeddings
ValueError: If you provide both embeddings and documents
ValueError: If you provide an id that already exists
"""
(
ids,
embeddings,
metadatas,
documents,
uris,
) = self._validate_and_prepare_embedding_set(
ids, embeddings, metadatas, documents, images, uris
)
self._client._add(ids, self.id, embeddings, metadatas, documents, uris)
def get(
self,
ids: Optional[OneOrMany[ID]] = None,
where: Optional[Where] = None,
limit: Optional[int] = None,
offset: Optional[int] = None,
where_document: Optional[WhereDocument] = None,
include: Include = ["metadatas", "documents"],
) -> GetResult:
"""Get embeddings and their associate data from the data store. If no ids or where filter is provided returns
all embeddings up to limit starting at offset.
Args:
ids: The ids of the embeddings to get. Optional.
where: A Where type dict used to filter results by. E.g. `{"$and": [{"color" : "red"}, {"price": {"$gte": 4.20}}]}`. Optional.
limit: The number of documents to return. Optional.
offset: The offset to start returning results from. Useful for paging results with limit. Optional.
where_document: A WhereDocument type dict used to filter by the documents. E.g. `{$contains: {"text": "hello"}}`. Optional.
include: A list of what to include in the results. Can contain `"embeddings"`, `"metadatas"`, `"documents"`. Ids are always included. Defaults to `["metadatas", "documents"]`. Optional.
Returns:
GetResult: A GetResult object containing the results.
"""
(
valid_ids,
valid_where,
valid_where_document,
valid_include,
) = self._validate_and_prepare_get_request(ids, where, where_document, include)
get_results = self._client._get(
self.id,
valid_ids,
valid_where,
None,
limit,
offset,
where_document=valid_where_document,
include=valid_include,
)
return self._transform_get_response(get_results, include)
def peek(self, limit: int = 10) -> GetResult:
"""Get the first few results in the database up to limit
Args:
limit: The number of results to return.
Returns:
GetResult: A GetResult object containing the results.
"""
return self._transform_peek_response(self._client._peek(self.id, limit))
def query(
self,
query_embeddings: Optional[ # type: ignore[type-arg]
Union[
OneOrMany[Embedding],
OneOrMany[PyEmbedding],
]
] = None,
query_texts: Optional[OneOrMany[Document]] = None,
query_images: Optional[OneOrMany[Image]] = None,
query_uris: Optional[OneOrMany[URI]] = None,
n_results: int = 10,
where: Optional[Where] = None,
where_document: Optional[WhereDocument] = None,
include: Include = ["metadatas", "documents", "distances"],
) -> QueryResult:
"""Get the n_results nearest neighbor embeddings for provided query_embeddings or query_texts.
Args:
query_embeddings: The embeddings to get the closes neighbors of. Optional.
query_texts: The document texts to get the closes neighbors of. Optional.
query_images: The images to get the closes neighbors of. Optional.
query_uris: The URIs to be used with data loader. Optional.
n_results: The number of neighbors to return for each query_embedding or query_texts. Optional.
where: A Where type dict used to filter results by. E.g. `{"$and": [{"color" : "red"}, {"price": {"$gte": 4.20}}]}`. Optional.
where_document: A WhereDocument type dict used to filter by the documents. E.g. `{$contains: {"text": "hello"}}`. Optional.
include: A list of what to include in the results. Can contain `"embeddings"`, `"metadatas"`, `"documents"`, `"distances"`. Ids are always included. Defaults to `["metadatas", "documents", "distances"]`. Optional.
Returns:
QueryResult: A QueryResult object containing the results.
Raises:
ValueError: If you don't provide either query_embeddings, query_texts, or query_images
ValueError: If you provide both query_embeddings and query_texts
ValueError: If you provide both query_embeddings and query_images
ValueError: If you provide both query_texts and query_images
"""
(
valid_query_embeddings,
valid_n_results,
valid_where,
valid_where_document,
) = self._validate_and_prepare_query_request(
query_embeddings,
query_texts,
query_images,
query_uris,
n_results,
where,
where_document,
include,
)
query_results = self._client._query(
collection_id=self.id,
query_embeddings=valid_query_embeddings,
n_results=valid_n_results,
where=valid_where,
where_document=valid_where_document,
include=include,
)
return self._transform_query_response(query_results, include)
def modify(
self, name: Optional[str] = None, metadata: Optional[CollectionMetadata] = None
) -> None:
"""Modify the collection name or metadata
Args:
name: The updated name for the collection. Optional.
metadata: The updated metadata for the collection. Optional.
Returns:
None
"""
self._validate_modify_request(metadata)
# Note there is a race condition here where the metadata can be updated
# but another thread sees the cached local metadata.
# TODO: fixme
self._client._modify(id=self.id, new_name=name, new_metadata=metadata)
self._update_model_after_modify_success(name, metadata)
| |
191752
|
use chroma_distance::DistanceFunction;
use criterion::{criterion_group, criterion_main, Criterion};
fn distance_metrics(c: &mut Criterion) {
c.bench_function("distance_metrics", |b| {
let mut x: Vec<f32> = Vec::with_capacity(786);
for _ in 0..x.capacity() {
x.push(rand::random());
}
let mut y: Vec<f32> = Vec::with_capacity(786);
for _ in 0..y.capacity() {
y.push(rand::random());
}
b.iter(|| {
let d = DistanceFunction::Cosine;
std::hint::black_box(DistanceFunction::distance(&d, &x, &y));
});
});
}
criterion_group!(benches, distance_metrics,);
criterion_main!(benches);
| |
191774
|
fn main() -> Result<(), Box<dyn std::error::Error>> {
// Tell cargo to rerun this build script if the bindings change.
println!("cargo:rerun-if-changed=bindings.cpp");
// Compile the hnswlib bindings.
cc::Build::new()
.cpp(true)
.file("bindings.cpp")
.flag("-std=c++11")
.flag("-Ofast")
.flag("-DHAVE_CXX0X")
.flag("-fPIC")
.flag("-ftree-vectorize")
.flag("-w")
.compile("bindings");
Ok(())
}
| |
192089
|
---
title: "🔍 Troubleshooting"
---
This page is a list of common gotchas or issues and how to fix them.
If you don't see your problem listed here, please also search the [Github Issues](https://github.com/chroma-core/chroma/issues).
## Using .get or .query, embeddings say `None`
This is actually not an error. Embeddings are quite large and heavy to send back. Most application don't use the underlying embeddings and so, by default, chroma does not send them back.
To send them back: add `include=["embeddings", "documents", "metadatas", "distances"]` to your query to return all information.
For example:
```python
results = collection.query(
query_texts="hello",
n_results=1,
include=["embeddings", "documents", "metadatas", "distances"],
)
```
{% note type="tip" %}
We may change `None` to something else to more clearly communicate why they were not returned.
{% /note %}
## Build error when running `pip install chromadb`
If you encounter an error like this during setup
```
Failed to build hnswlib
ERROR: Could not build wheels for hnswlib, which is required to install pyproject.toml-based projects
```
Try these few tips from the [community](https://github.com/chroma-core/chroma/issues/221):
1. If you get the error: `clang: error: the clang compiler does not support '-march=native'`, set this ENV variable, `export HNSWLIB_NO_NATIVE=1`
2. If on Mac, install/update xcode dev tools, `xcode-select --install`
3. If on Windows, try [these steps](https://github.com/chroma-core/chroma/issues/250#issuecomment-1540934224)
## SQLite
Chroma requires SQLite > 3.35, if you encounter issues with having too low of a SQLite version please try the following.
1. Install the latest version of Python 3.10, sometimes lower versions of python are bundled with older versions of SQLite.
2. If you are on a Linux system, you can install pysqlite3-binary, `pip install pysqlite3-binary` and then override the default
sqlite3 library before running Chroma with the steps [here](https://gist.github.com/defulmere/8b9695e415a44271061cc8e272f3c300).
Alternatively you can compile SQLite from scratch and replace the library in your python installation with the latest version as documented [here](https://github.com/coleifer/pysqlite3#building-a-statically-linked-library).
3. If you are on Windows, you can manually download the latest version of SQLite from https://www.sqlite.org/download.html and
replace the DLL in your python installation's DLLs folder with the latest version. You can find your python installation path by running `os.path.dirname(sys.executable)` in python.
4. If you are using a Debian based Docker container, older Debian versions do not have an up to date SQLite, please use `bookworm` or higher.
## Illegal instruction (core dumped)
If you encounter an error like this during setup and are using Docker - you may have built the library on a machine with a different CPU architecture than the one you are running it on. Try rebuilding the Docker image on the machine you are running it on.
## My data directory is too large
If you were using Chroma prior to v0.5.6, you may be able to significantly shrink your database by [vacuuming it](/reference/cli#vacuuming). After vacuuming once, automatic pruning (a new feature in v0.5.6) is enabled and will keep your database size in check.
| |
192104
|
---
title: OpenAI
---
{% tabs group="code-lang" hideContent=true %}
{% tab label="Python" %}
{% /tab %}
{% tab label="Javascript" %}
{% /tab %}
{% /tabs %}
Chroma provides a convenient wrapper around OpenAI's embedding API. This embedding function runs remotely on OpenAI's servers, and requires an API key. You can get an API key by signing up for an account at [OpenAI](https://openai.com/api/).
The following OpenAI Embedding Models are supported:
- `text-embedding-ada-002`
- `text-embedding-3-small`
- `text-embedding-3-large`
{% note type="default" title="More Info" %}
Visit OpenAI Embeddings [documentation](https://platform.openai.com/docs/guides/embeddings) for more information.
{% /note %}
{% tabs group="code-lang" hideTabs=true %}
{% tab label="Python" %}
This embedding function relies on the `openai` python package, which you can install with `pip install openai`.
You can pass in an optional `model_name` argument, which lets you choose which OpenAI embeddings model to use. By default, Chroma uses `text-embedding-ada-002`.
```python
import chromadb.utils.embedding_functions as embedding_functions
openai_ef = embedding_functions.OpenAIEmbeddingFunction(
api_key="YOUR_API_KEY",
model_name="text-embedding-3-small"
)
```
To use the OpenAI embedding models on other platforms such as Azure, you can use the `api_base` and `api_type` parameters:
```python
import chromadb.utils.embedding_functions as embedding_functions
openai_ef = embedding_functions.OpenAIEmbeddingFunction(
api_key="YOUR_API_KEY",
api_base="YOUR_API_BASE_PATH",
api_type="azure",
api_version="YOUR_API_VERSION",
model_name="text-embedding-3-small"
)
```
{% /tab %}
{% tab label="Javascript" %}
You can pass in an optional `model` argument, which lets you choose which OpenAI embeddings model to use. By default, Chroma uses `text-embedding-ada-002`.
```javascript
const {OpenAIEmbeddingFunction} = require('chromadb');
const embeddingFunction = new OpenAIEmbeddingFunction({
openai_api_key: "apiKey",
model: "text-embedding-3-small"
})
// use directly
const embeddings = embeddingFunction.generate(["document1","document2"])
// pass documents to query for .add and .query
const collection = await client.createCollection({
name: "name",
embeddingFunction: embeddingFunction
})
const collection = await client.getCollection({
name: "name",
embeddingFunction: embeddingFunction
})
```
{% /tab %}
{% /tabs %}
| |
192109
|
---
title: 🦜️🔗 Langchain
---
## Langchain - Python
- [LangChain + Chroma](https://blog.langchain.dev/langchain-chroma/) on the LangChain blog
- [Harrison's `chroma-langchain` demo repo](https://github.com/hwchase17/chroma-langchain)
- [question answering over documents](https://github.com/hwchase17/chroma-langchain/blob/master/qa.ipynb) - ([Replit version](https://replit.com/@swyx/LangChainChromaStarter#main.py))
- [to use Chroma as a persistent database](https://github.com/hwchase17/chroma-langchain/blob/master/persistent-qa.ipynb)
- Tutorials
- [Chroma and LangChain tutorial](https://github.com/grumpyp/chroma-langchain-tutorial) - The demo showcases how to pull data from the English Wikipedia using their API. The project also demonstrates how to vectorize data in chunks and get embeddings using OpenAI embeddings model.
- [Create a Voice-based ChatGPT Clone That Can Search on the Internet and local files](https://betterprogramming.pub/how-to-create-a-voice-based-chatgpt-clone-that-can-search-on-the-internet-24d7f570ea8)
- [LangChain's Chroma Documentation](https://python.langchain.com/docs/integrations/vectorstores/chroma)
## Langchain - JS
- [LangChainJS Chroma Documentation](https://js.langchain.com/docs/modules/indexes/vector_stores/integrations/chroma)
| |
192117
|
---
title: Hugging Face
---
{% tabs group="code-lang" hideContent=true %}
{% tab label="Python" %}
{% /tab %}
{% tab label="Javascript" %}
{% /tab %}
{% /tabs %}
Chroma also provides a convenient wrapper around HuggingFace's embedding API. This embedding function runs remotely on HuggingFace's servers, and requires an API key. You can get an API key by signing up for an account at [HuggingFace](https://huggingface.co/).
{% tabs group="code-lang" hideTabs=true %}
{% tab label="Python" %}
```python
import chromadb.utils.embedding_functions as embedding_functions
huggingface_ef = embedding_functions.HuggingFaceEmbeddingFunction(
api_key="YOUR_API_KEY",
model_name="sentence-transformers/all-MiniLM-L6-v2"
)
```
You can pass in an optional `model_name` argument, which lets you choose which HuggingFace model to use. By default, Chroma uses `sentence-transformers/all-MiniLM-L6-v2`. You can see a list of all available models [here](https://huggingface.co/models).
{% /tab %}
{% tab label="Javascript" %}
{% /tab %}
{% /tabs %}
| |
192121
|
---
title: '🧬 Embeddings'
---
Embeddings are the A.I-native way to represent any kind of data, making them the perfect fit for working with all kinds of A.I-powered tools and algorithms. They can represent text, images, and soon audio and video. There are many options for creating embeddings, whether locally using an installed library, or by calling an API.
Chroma provides lightweight wrappers around popular embedding providers, making it easy to use them in your apps. You can set an embedding function when you create a Chroma collection, which will be used automatically, or you can call them directly yourself.
{% special_table %}
{% /special_table %}
| | Python | JS |
|--------------|-----------|---------------|
| [OpenAI](/integrations/openai) | ✅ | ✅ |
| [Google Generative AI](/integrations/google-gemini) | ✅ | ✅ |
| [Cohere](/integrations/cohere) | ✅ | ✅ |
| [Hugging Face](/integrations/hugging-face) | ✅ | ➖ |
| [Instructor](/integrations/instructor) | ✅ | ➖ |
| [Hugging Face Embedding Server](/integrations/hugging-face-server) | ✅ | ✅ |
| [Jina AI](/integrations/jinaai) | ✅ | ✅ |
We welcome pull requests to add new Embedding Functions to the community.
***
## Default: all-MiniLM-L6-v2
By default, Chroma uses the [Sentence Transformers](https://www.sbert.net/) `all-MiniLM-L6-v2` model to create embeddings. This embedding model can create sentence and document embeddings that can be used for a wide variety of tasks. This embedding function runs locally on your machine, and may require you download the model files (this will happen automatically).
```python
from chromadb.utils import embedding_functions
default_ef = embedding_functions.DefaultEmbeddingFunction()
```
{% note type="default" %}
Embedding functions can be linked to a collection and used whenever you call `add`, `update`, `upsert` or `query`. You can also use them directly which can be handy for debugging.
```py
val = default_ef(["foo"])
```
-> [[0.05035809800028801, 0.0626462921500206, -0.061827320605516434...]]
{% /note %}
<!--
## Transformers.js
Chroma can use [Transformers.js](https://github.com/xenova/transformers.js) to create embeddings locally on the machine. Transformers uses the 'Xenova/all-MiniLM-L6-v2' model. Make sure you have installed Transformers.js by running ```npm install @xenova/transformers``` from the commandline.
```javascript
const {ChromaClient} = require('chromadb');
const client = new ChromaClient({path: "http://localhost:8000"});
const {TransformersEmbeddingFunction} = require('chromadb');
const embedder = new TransformersEmbeddingFunction();
(async () => {
// create the collection called name
const collection = await client.getOrCreateCollection({name: "name", embeddingFunction: embedder})
// add documents to the collection
await collection.add({
ids: ["id1", "id2", "id3"],
metadatas: [{"chapter": "3", "verse": "16"}, {"chapter": "3", "verse": "5"}, {"chapter": "29", "verse": "11"}],
documents: ["lorem ipsum...", "doc2", "doc3"],
})
// query the collection
const results = await collection.query({
nResults: 2,
queryTexts: ["lorem ipsum"]
})
})();
``` -->
{% tabs group="code-lang" hideTabs=true %}
{% tab label="Python" %}
## Sentence Transformers
Chroma can also use any [Sentence Transformers](https://www.sbert.net/) model to create embeddings.
```python
sentence_transformer_ef = embedding_functions.SentenceTransformerEmbeddingFunction(model_name="all-MiniLM-L6-v2")
```
You can pass in an optional `model_name` argument, which lets you choose which Sentence Transformers model to use. By default, Chroma uses `all-MiniLM-L6-v2`. You can see a list of all available models [here](https://www.sbert.net/docs/pretrained_models.html).
{% /tab %}
{% tab label="Javascript" %}
{% /tab %}
{% /tabs %}
***
## Custom Embedding Functions
{% tabs group="code-lang" hideContent=true %}
{% tab label="Python" %}
{% /tab %}
{% tab label="Javascript" %}
{% /tab %}
{% /tabs %}
{% tabs group="code-lang" hideTabs=true %}
{% tab label="Python" %}
You can create your own embedding function to use with Chroma, it just needs to implement the `EmbeddingFunction` protocol.
```python
from chromadb import Documents, EmbeddingFunction, Embeddings
class MyEmbeddingFunction(EmbeddingFunction):
def __call__(self, input: Documents) -> Embeddings:
# embed the documents somehow
return embeddings
```
We welcome contributions! If you create an embedding function that you think would be useful to others, please consider [submitting a pull request](https://github.com/chroma-core/chroma) to add it to Chroma's `embedding_functions` module.
{% /tab %}
{% tab label="Javascript" %}
You can create your own embedding function to use with Chroma, it just needs to implement the `EmbeddingFunction` protocol. The `.generate` method in a class is strictly all you need.
```javascript
class MyEmbeddingFunction {
private api_key: string;
constructor(api_key: string) {
this.api_key = api_key;
}
public async generate(texts: string[]): Promise<number[][]> {
// do things to turn texts into embeddings with an api_key perhaps
return embeddings;
}
}
```
{% /tab %}
{% /tabs %}
| |
192128
|
---
title: "🧪 Usage Guide"
---
{% tabs group="code-lang" hideContent=true %}
{% tab label="Python" %}
{% /tab %}
{% tab label="Javascript" %}
{% /tab %}
{% /tabs %}
---
## Initiating a persistent Chroma client
{% tabs group="code-lang" hideTabs=true %}
{% tab label="Python" %}
```python
import chromadb
```
You can configure Chroma to save and load the database from your local machine. Data will be persisted automatically and loaded on start (if it exists).
```python
client = chromadb.PersistentClient(path="/path/to/save/to")
```
The `path` is where Chroma will store its database files on disk, and load them on start.
{% /tab %}
{% tab label="Javascript" %}
```js
// CJS
const { ChromaClient } = require("chromadb");
// ESM
import { ChromaClient } from "chromadb";
```
{% note type="note" title="Connecting to the backend" %}
To connect with the JS client, you must connect to a backend running Chroma. See [Running Chroma in client-server mode](#running-chroma-in-client-server-mode) for how to do this.
{% /note %}
```js
const client = new ChromaClient();
```
{% /tab %}
{% /tabs %}
{% tabs group="code-lang" hideTabs=true %}
{% tab label="Python" %}
The client object has a few useful convenience methods.
```python
client.heartbeat() # returns a nanosecond heartbeat. Useful for making sure the client remains connected.
client.reset() # Empties and completely resets the database. ⚠️ This is destructive and not reversible.
```
{% /tab %}
{% tab label="Javascript" %}
The client object has a few useful convenience methods.
```javascript
await client.reset() # Empties and completely resets the database. ⚠️ This is destructive and not reversible.
```
{% /tab %}
{% /tabs %}
## Running Chroma in client-server mode
{% tabs group="code-lang" hideTabs=true %}
{% tab label="Python" %}
Chroma can also be configured to run in client/server mode. In this mode, the Chroma client connects to a Chroma server running in a separate process.
To start the Chroma server locally, run the following command:
```bash
chroma run --path /db_path
```
Then use the Chroma HTTP client to connect to the server:
```python
import chromadb
chroma_client = chromadb.HttpClient(host='localhost', port=8000)
```
That's it! Chroma's API will run in `client-server` mode with just this change.
---
Chroma also provides an async HTTP client. The behaviors and method signatures are identical to the synchronous client, but all methods that would block are now async. To use it, call `AsyncHttpClient` instead:
```python
import asyncio
import chromadb
async def main():
client = await chromadb.AsyncHttpClient()
collection = await client.create_collection(name="my_collection")
await collection.add(
documents=["hello world"],
ids=["id1"]
)
asyncio.run(main())
```
<!-- #### Run Chroma inside your application
To run the Chroma docker from inside your application code, create a docker-compose file or add to the existing one you have.
1. Download [`docker-compose.server.example.yml`](https://github.com/chroma-core/chroma/blob/main/docker-compose.server.example.yml) file and [`config`](https://github.com/chroma-core/chroma/tree/main/config) folder along with both the files inside from [GitHub Repo](https://github.com/chroma-core/chroma)
2. Rename `docker-compose.server.example.yml` to `docker-compose.yml`
3. Install docker on your local machine. [`Docker Engine`](https://docs.docker.com/engine/install/) or [`Docker Desktop`](https://docs.docker.com/desktop/install/)
4. Install docker compose [`Docker Compose`](https://docs.docker.com/compose/install/)
Use following command to manage Dockerized Chroma:
- __Command to Start Chroma__: `docker-compose up -d`
- __Command to Stop Chroma__: `docker-compose down`
- __Command to Stop Chroma and delete volumes__
This is distructive command. With this command volumes created earlier will be deleted along with data stored.: `docker-compose down -v` -->
#### Using the Python HTTP-only client
If you are running Chroma in client-server mode, you may not need the full Chroma library. Instead, you can use the lightweight client-only library.
In this case, you can install the `chromadb-client` package. This package is a lightweight HTTP client for the server with a minimal dependency footprint.
```python
pip install chromadb-client
```
```python
import chromadb
# Example setup of the client to connect to your chroma server
client = chromadb.HttpClient(host='localhost', port=8000)
# Or for async usage:
async def main():
client = await chromadb.AsyncHttpClient(host='localhost', port=8000)
```
Note that the `chromadb-client` package is a subset of the full Chroma library and does not include all the dependencies. If you want to use the full Chroma library, you can install the `chromadb` package instead.
Most importantly, there is no default embedding function. If you add() documents without embeddings, you must have manually specified an embedding function and installed the dependencies for it.
{% /tab %}
{% tab label="Javascript" %}
To run Chroma in client server mode, first install the chroma library and CLI via pypi:
```bash
pip install chromadb
```
Then start the Chroma server locally:
```bash
chroma run --path /db_path
```
The JS client then talks to the chroma server backend.
```js
// CJS
const { ChromaClient } = require("chromadb");
// ESM
import { ChromaClient } from "chromadb";
const client = new ChromaClient();
```
You can also run the Chroma server in a docker container, or deployed to a cloud provider. See the [deployment docs](./deployment/docker) for more information.
{% /tab %}
{% /tabs %}
## Using co
| |
192129
|
llections
Chroma lets you manage collections of embeddings, using the `collection` primitive.
### Creating, inspecting, and deleting Collections
Chroma uses collection names in the url, so there are a few restrictions on naming them:
- The length of the name must be between 3 and 63 characters.
- The name must start and end with a lowercase letter or a digit, and it can contain dots, dashes, and underscores in between.
- The name must not contain two consecutive dots.
- The name must not be a valid IP address.
Chroma collections are created with a name and an optional embedding function. If you supply an embedding function, you must supply it every time you get the collection.
{% tabs group="code-lang" hideTabs=true %}
{% tab label="Python" %}
```python
collection = client.create_collection(name="my_collection", embedding_function=emb_fn)
collection = client.get_collection(name="my_collection", embedding_function=emb_fn)
```
{% note type="caution" %}
If you later wish to `get_collection`, you MUST do so with the embedding function you supplied while creating the collection
{% /note %}
The embedding function takes text as input, and performs tokenization and embedding. If no embedding function is supplied, Chroma will use [sentence transformer](https://www.sbert.net/index.html) as a default.
{% /tab %}
{% tab label="Javascript" %}
```js
// CJS
const { ChromaClient } = require("chromadb");
// ESM
import { ChromaClient } from "chromadb";
```
The JS client talks to a chroma server backend. This can run on your local computer or be easily deployed to AWS.
```js
let collection = await client.createCollection({
name: "my_collection",
embeddingFunction: emb_fn,
});
let collection2 = await client.getCollection({
name: "my_collection",
embeddingFunction: emb_fn,
});
```
{% note type="caution" %}
If you later wish to `getCollection`, you MUST do so with the embedding function you supplied while creating the collection
{% /note %}
The embedding function takes text as input, and performs tokenization and embedding.
{% /tab %}
{% /tabs %}
You can learn more about [🧬 embedding functions](./guides/embeddings), and how to create your own.
{% tabs group="code-lang" hideTabs=true %}
{% tab label="Python" %}
Existing collections can be retrieved by name with `.get_collection`, and deleted with `.delete_collection`. You can also use `.get_or_create_collection` to get a collection if it exists, or create it if it doesn't.
```python
collection = client.get_collection(name="test") # Get a collection object from an existing collection, by name. Will raise an exception if it's not found.
collection = client.get_or_create_collection(name="test") # Get a collection object from an existing collection, by name. If it doesn't exist, create it.
client.delete_collection(name="my_collection") # Delete a collection and all associated embeddings, documents, and metadata. ⚠️ This is destructive and not reversible
```
{% /tab %}
{% tab label="Javascript" %}
Existing collections can be retrieved by name with `.getCollection`, and deleted with `.deleteCollection`.
```javascript
const collection = await client.getCollection({ name: "test" }); // Get a collection object from an existing collection, by name. Will raise an exception of it's not found.
collection = await client.getOrCreateCollection({ name: "test" }); // Get a collection object from an existing collection, by name. If it doesn't exist, create it.
await client.deleteCollection(collection); // Delete a collection and all associated embeddings, documents, and metadata. ⚠️ This is destructive and not reversible
```
{% /tab %}
{% /tabs %}
{% tabs group="code-lang" hideTabs=true %}
{% tab label="Python" %}
Collections have a few useful convenience methods.
```python
collection.peek() # returns a list of the first 10 items in the collection
collection.count() # returns the number of items in the collection
collection.modify(name="new_name") # Rename the collection
```
{% /tab %}
{% tab label="Javascript" %}
There are a few useful convenience methods for working with Collections.
```javascript
await collection.peek(); // returns a list of the first 10 items in the collection
await collection.count(); // returns the number of items in the collection
```
{% /tab %}
{% /tabs %}
### Changing the distance function
{% tabs group="code-lang" hideTabs=true %}
{% tab label="Python" %}
`create_collection` also takes an optional `metadata` argument which can be used to customize the distance method of the embedding space by setting the value of `hnsw:space`.
```python
collection = client.create_collection(
name="collection_name",
metadata={"hnsw:space": "cosine"} # l2 is the default
)
```
{% /tab %}
{% tab label="Javascript" %}
`createCollection` also takes an optional `metadata` argument which can be used to customize the distance method of the embedding space by setting the value of `hnsw:space`
```js
let collection = client.createCollection({
name: "collection_name",
metadata: { "hnsw:space": "cosine" },
});
```
{% /tab %}
{% /tabs %}
Valid options for `hnsw:space` are "l2", "ip, "or "cosine". The **default** is "l2" which is the squared L2 norm.
{% special_table %}
{% /special_table %}
| Distance | parameter | Equation |
| ----------------- | :-------: | ------------------------------------------------------------------------------------------------------------------------------------------------------------------: |
| Squared L2 | `l2` | {% math latexText="d = \\sum\\left(A_i-B_i\\right)^2" %}{% /math %} |
| Inner product | `ip` | {% math latexText="d = 1.0 - \\sum\\left(A_i \\times B_i\\right) " %}{% /math %} |
| Cosine similarity | `cosine` | {% math latexText="d = 1.0 - \\frac{\\sum\\left(A_i \\times B_i\\right)}{\\sqrt{\\sum\\left(A_i^2\\right)} \\cdot \\sqrt{\\sum\\left(B_i^2\\right)}}" %}{% /math %} |
### Adding data to a C
| |
192130
|
ollection
{% tabs group="code-lang" hideTabs=true %}
{% tab label="Python" %}
Add data to Chroma with `.add`.
Raw documents:
```python
collection.add(
documents=["lorem ipsum...", "doc2", "doc3", ...],
metadatas=[{"chapter": "3", "verse": "16"}, {"chapter": "3", "verse": "5"}, {"chapter": "29", "verse": "11"}, ...],
ids=["id1", "id2", "id3", ...]
)
```
{% /tab %}
{% tab label="Javascript" %}
Add data to Chroma with `.addRecords`.
Raw documents:
```javascript
await collection.add({
ids: ["id1", "id2", "id3", ...],
metadatas: [{"chapter": "3", "verse": "16"}, {"chapter": "3", "verse": "5"}, {"chapter": "29", "verse": "11"}, ...],
documents: ["lorem ipsum...", "doc2", "doc3", ...],
})
// input order
// ids - required
// embeddings - optional
// metadata - optional
// documents - optional
```
{% /tab %}
{% /tabs %}
If Chroma is passed a list of `documents`, it will automatically tokenize and embed them with the collection's embedding function (the default will be used if none was supplied at collection creation). Chroma will also store the `documents` themselves. If the documents are too large to embed using the chosen embedding function, an exception will be raised.
Each document must have a unique associated `id`. Trying to `.add` the same ID twice will result in only the initial value being stored. An optional list of `metadata` dictionaries can be supplied for each document, to store additional information and enable filtering.
Alternatively, you can supply a list of document-associated `embeddings` directly, and Chroma will store the associated documents without embedding them itself.
{% tabs group="code-lang" hideTabs=true %}
{% tab label="Python" %}
```python
collection.add(
documents=["doc1", "doc2", "doc3", ...],
embeddings=[[1.1, 2.3, 3.2], [4.5, 6.9, 4.4], [1.1, 2.3, 3.2], ...],
metadatas=[{"chapter": "3", "verse": "16"}, {"chapter": "3", "verse": "5"}, {"chapter": "29", "verse": "11"}, ...],
ids=["id1", "id2", "id3", ...]
)
```
{% /tab %}
{% tab label="Javascript" %}
```javascript
await collection.add({
ids: ["id1", "id2", "id3", ...],
embeddings: [[1.1, 2.3, 3.2], [4.5, 6.9, 4.4], [1.1, 2.3, 3.2], ...],
metadatas: [{"chapter": "3", "verse": "16"}, {"chapter": "3", "verse": "5"}, {"chapter": "29", "verse": "11"}, ...],
documents: ["lorem ipsum...", "doc2", "doc3", ...],
})
```
{% /tab %}
{% /tabs %}
If the supplied `embeddings` are not the same dimension as the collection, an exception will be raised.
You can also store documents elsewhere, and just supply a list of `embeddings` and `metadata` to Chroma. You can use the `ids` to associate the embeddings with your documents stored elsewhere.
{% tabs group="code-lang" hideTabs=true %}
{% tab label="Python" %}
```python
collection.add(
embeddings=[[1.1, 2.3, 3.2], [4.5, 6.9, 4.4], [1.1, 2.3, 3.2], ...],
metadatas=[{"chapter": "3", "verse": "16"}, {"chapter": "3", "verse": "5"}, {"chapter": "29", "verse": "11"}, ...],
ids=["id1", "id2", "id3", ...]
)
```
{% /tab %}
{% tab label="Javascript" %}
```javascript
await collection.add({
ids: ["id1", "id2", "id3", ...],
embeddings: [[1.1, 2.3, 3.2], [4.5, 6.9, 4.4], [1.1, 2.3, 3.2], ...],
metadatas: [{"chapter": "3", "verse": "16"}, {"chapter": "3", "verse": "5"}, {"chapter": "29", "verse": "11"}, ...],
})
```
{% /tab %}
{% /tabs %}
### Querying a Collect
| |
192131
|
ion
You can query by a set of `query_embeddings`.
{% tabs group="code-lang" hideTabs=true %}
{% tab label="Python" %}
Chroma collections can be queried in a variety of ways, using the `.query` method.
```python
collection.query(
query_embeddings=[[11.1, 12.1, 13.1],[1.1, 2.3, 3.2], ...],
n_results=10,
where={"metadata_field": "is_equal_to_this"},
where_document={"$contains":"search_string"}
)
```
{% /tab %}
{% tab label="Javascript" %}
Chroma collections can be queried in a variety of ways, using the `.queryRecords` method.
```javascript
const result = await collection.query({
queryEmbeddings: [[11.1, 12.1, 13.1],[1.1, 2.3, 3.2], ...],
nResults: 10,
where: {"metadata_field": "is_equal_to_this"},
})
// input order
// queryEmbeddings - optional, exactly one of queryEmbeddings and queryTexts must be provided
// queryTexts - optional
// n_results - required
// where - optional
```
{% /tab %}
{% /tabs %}
The query will return the `n_results` closest matches to each `query_embedding`, in order.
An optional `where` filter dictionary can be supplied to filter by the `metadata` associated with each document.
Additionally, an optional `where_document` filter dictionary can be supplied to filter by contents of the document.
If the supplied `query_embeddings` are not the same dimension as the collection, an exception will be raised.
You can also query by a set of `query_texts`. Chroma will first embed each `query_text` with the collection's embedding function, and then perform the query with the generated embedding.
{% tabs group="code-lang" hideTabs=true %}
{% tab label="Python" %}
```python
collection.query(
query_texts=["doc10", "thus spake zarathustra", ...],
n_results=10,
where={"metadata_field": "is_equal_to_this"},
where_document={"$contains":"search_string"}
)
```
You can also retrieve items from a collection by `id` using `.get`.
```python
collection.get(
ids=["id1", "id2", "id3", ...],
where={"style": "style1"}
)
```
{% /tab %}
{% tab label="Javascript" %}
```javascript
await collection.query({
nResults: 10, // n_results
where: {"metadata_field": "is_equal_to_this"}, // where
queryTexts: ["doc10", "thus spake zarathustra", ...], // query_text
})
```
You can also retrieve records from a collection by `id` using `.getRecords`.
```javascript
await collection.get( {
ids: ["id1", "id2", "id3", ...], //ids
where: {"style": "style1"} // where
})
```
{% /tab %}
{% /tabs %}
`.get` also supports the `where` and `where_document` filters. If no `ids` are supplied, it will return all items in the collection that match the `where` and `where_document` filters.
##### Choosing which data is returned
When using get or query you can use the include parameter to specify which data you want returned - any of `embeddings`, `documents`, `metadatas`, and for query, `distances`. By default, Chroma will return the `documents`, `metadatas` and in the case of query, the `distances` of the results. `embeddings` are excluded by default for performance and the `ids` are always returned. You can specify which of these you want returned by passing an array of included field names to the includes parameter of the query or get method. Note that embeddings will be returned as a 2-d numpy array in `.get` and a python list of 2-d numpy arrays in `.query`.
{% tabs group="code-lang" hideTabs=true %}
{% tab label="Python" %}
```python
# Only get documents and ids
collection.get(
include=["documents"]
)
collection.query(
query_embeddings=[[11.1, 12.1, 13.1],[1.1, 2.3, 3.2], ...],
include=["documents"]
)
```
{% /tab %}
{% tab label="Javascript" %}
```javascript
# Only get documents and ids
collection.get(
{include=["documents"]}
)
collection.get({
queryEmbeddings=[[11.1, 12.1, 13.1],[1.1, 2.3, 3.2], ...],
include=["documents"]
})
```
{% /tab %}
{% /tabs %}
### Using Where filters
Chroma supports filtering queries by `metadata` and `document` contents. The `where` filter is used to filter by `metadata`, and the `where_document` filter is used to filter by `document` contents.
##### Filtering by metadata
In order to filter on metadata, you must supply a `where` filter dictionary to the query. The dictionary must have the following structure:
```python
{
"metadata_field": {
<Operator>: <Value>
}
}
```
Filtering metadata supports the following operators:
- `$eq` - equal to (string, int, float)
- `$ne` - not equal to (string, int, float)
- `$gt` - greater than (int, float)
- `$gte` - greater than or equal to (int, float)
- `$lt` - less than (int, float)
- `$lte` - less than or equal to (int, float)
Using the $eq operator is equivalent to using the `where` filter.
```python
{
"metadata_field": "search_string"
}
# is equivalent to
{
"metadata_field": {
"$eq": "search_string"
}
}
```
{% note type="note" %}
Where filters only search embeddings where the key exists. If you search `collection.get(where={"version": {"$ne": 1}})`. Metadata that does not have the key `version` will not be returned.
{% /note %}
##### Filtering by document contents
In order to filter on document contents, you must supply a `where_document` filter dictionary to the query. We support two filtering keys: `$contains` and `$not_contains`. The dictionary must have the following structure:
```python
# Filtering for a search_string
{
"$contains": "search_string"
}
```
```python
# Filtering for not contains
{
"$not_contains": "search_string"
}
```
##### Using logical operators
You can also use the logical operators `$and` and `$or` to combine multiple filters.
An `$and` operator will return results that match all of the filters in the list.
```python
{
"$and": [
{
"metadata_field": {
<Operator>: <Value>
}
},
{
"metadata_field": {
<Operator>: <Value>
}
}
]
}
```
An `$or` operator will return results that match any of the filters in the list.
```python
{
"$or": [
{
"metadata_field": {
<Operator>: <Value>
}
},
{
"metadata_field": {
<Operator>: <Value>
}
}
]
}
```
##### Using inclusion operators (`$in` and `$nin`)
The following inclusion operators are supported:
- `$in` - a value is in predefined list (string, int, float, bool)
- `$nin` - a value is not in predefined list (string, int, float, bool)
An `$in` operator will return results where the metadata attribute is part of a provided list:
```json
{
"metadata_field": {
"$in": ["value1", "value2", "value3"]
}
}
```
An `$nin` operator will return results where the metadata attribute is not part of a provided list:
```json
{
"metadata_field": {
"$nin": ["value1", "value2", "value3"]
}
}
```
{% tabs group="code-lang" hideTabs=true %}
{% tab label="Python" %}
{% note type="note" title="Practical examples" %}
For additional examples and a demo how to use the inclusion operators, please see provided notebook [here](https://github.com/chroma-core/chroma/blob/main/examples/basic_functionality/in_not_in_filtering.ipynb)
{% /note %}
{% /tab %}
{% tab label="Javascript" %}
{% /tab %}
{% /tabs %}
### Updating data in a
| |
192132
|
collection
{% tabs group="code-lang" hideTabs=true %}
{% tab label="Python" %}
Any property of records in a collection can be updated using `.update`.
```python
collection.update(
ids=["id1", "id2", "id3", ...],
embeddings=[[1.1, 2.3, 3.2], [4.5, 6.9, 4.4], [1.1, 2.3, 3.2], ...],
metadatas=[{"chapter": "3", "verse": "16"}, {"chapter": "3", "verse": "5"}, {"chapter": "29", "verse": "11"}, ...],
documents=["doc1", "doc2", "doc3", ...],
)
```
{% /tab %}
{% tab label="Javascript" %}
Any property of records in a collection can be updated using `.updateRecords`.
```javascript
collection.update(
{
ids: ["id1", "id2", "id3", ...],
embeddings: [[1.1, 2.3, 3.2], [4.5, 6.9, 4.4], [1.1, 2.3, 3.2], ...],
metadatas: [{"chapter": "3", "verse": "16"}, {"chapter": "3", "verse": "5"}, {"chapter": "29", "verse": "11"}, ...],
documents: ["doc1", "doc2", "doc3", ...],
},
)
```
{% /tab %}
{% /tabs %}
If an `id` is not found in the collection, an error will be logged and the update will be ignored. If `documents` are supplied without corresponding `embeddings`, the embeddings will be recomputed with the collection's embedding function.
If the supplied `embeddings` are not the same dimension as the collection, an exception will be raised.
Chroma also supports an `upsert` operation, which updates existing items, or adds them if they don't yet exist.
{% tabs group="code-lang" hideTabs=true %}
{% tab label="Python" %}
```python
collection.upsert(
ids=["id1", "id2", "id3", ...],
embeddings=[[1.1, 2.3, 3.2], [4.5, 6.9, 4.4], [1.1, 2.3, 3.2], ...],
metadatas=[{"chapter": "3", "verse": "16"}, {"chapter": "3", "verse": "5"}, {"chapter": "29", "verse": "11"}, ...],
documents=["doc1", "doc2", "doc3", ...],
)
```
{% /tab %}
{% tab label="Javascript" %}
```javascript
await collection.upsert({
ids: ["id1", "id2", "id3"],
embeddings: [
[1.1, 2.3, 3.2],
[4.5, 6.9, 4.4],
[1.1, 2.3, 3.2],
],
metadatas: [
{ chapter: "3", verse: "16" },
{ chapter: "3", verse: "5" },
{ chapter: "29", verse: "11" },
],
documents: ["doc1", "doc2", "doc3"],
});
```
{% /tab %}
{% /tabs %}
If an `id` is not present in the collection, the corresponding items will be created as per `add`. Items with existing `id`s will be updated as per `update`.
### Deleting data from a collection
Chroma supports deleting items from a collection by `id` using `.delete`. The embeddings, documents, and metadata associated with each item will be deleted.
⚠️ Naturally, this is a destructive operation, and cannot be undone.
{% tabs group="code-lang" hideTabs=true %}
{% tab label="Python" %}
```python
collection.delete(
ids=["id1", "id2", "id3",...],
where={"chapter": "20"}
)
```
{% /tab %}
{% tab label="Javascript" %}
```javascript
await collection.delete({
ids: ["id1", "id2", "id3",...], //ids
where: {"chapter": "20"} //where
})
```
{% /tab %}
{% /tabs %}
`.delete` also supports the `where` filter. If no `ids` are supplied, it will delete all items in the collection that match the `where` filter.
| |
192142
|
overhaul - April 20, 2024
**If you are not using Chroma's [built-in auth system](https://docs.trychroma.com/deployment/auth), you do not need to take any action.**
This release overhauls and simplifies our authentication and authorization systems.
If you are you using Chroma's built-in auth system, you will need to update your configuration and
any code you wrote to implement your own authentication or authorization providers.
This change is mostly to pay down some of Chroma's technical debt and make future changes easier,
but it also changes and simplifies user configuration.
If you are not using Chroma's built-in auth system, you do not need to take any action.
Previously, Chroma's authentication and authorization relied on many objects with many configuration options, including:
- `chroma_server_auth_provider`
- `chroma_server_auth_configuration_provider`
- `chroma_server_auth_credentials_provider`
- `chroma_client_auth_credentials_provider`
- `chroma_client_auth_protocol_adapter`
and others.
We have consolidated these into three classes:
- `ClientAuthProvider`
- `ServerAuthenticationProvider`
- `ServerAuthorizationProvider`
`ClientAuthProvider`s are now responsible for their own configuration and credential management. Credentials can be given to them with the `chroma_client_auth_credentials` setting. The value for `chroma_client_auth_credentials` depends on the `ServerAuthenticationProvider`; for `TokenAuthenticationServerProvider` it should just be the token, and for `BasicAuthenticationServerProvider` it should be `username:password`.
`ServerAuthenticationProvider`s are responsible for turning a request's authorization information into a `UserIdentity` containing any information necessary to make an authorization decision. They are now responsible for their own configuration and credential management. Configured via the `chroma_server_authn_credentials` and `chroma_server_authn_credentials_file` settings.
`ServerAuthorizationProvider`s are responsible for turning information about the request and the `UserIdentity` which issued the request into an authorization decision. Configured via the `chroma_server_authz_config` and `chroma_server_authz_config_file` settings.
_Either `_authn_credentials` or `authn_credentials_file` can be set, never both. Same for `authz_config` and `authz_config_file`. The value of the config (or data in the config file) will depend on your authn and authz providers. See [here](https://github.com/chroma-core/chroma/tree/main/examples/basic_functionality/authz) for more information._
The two auth systems Chroma ships with are `Basic` and `Token`. We have a small migration guide for each.
#### Basic
If you're using `Token` auth, your server configuration might look like:
```yaml
CHROMA_SERVER_AUTH_CREDENTIALS="admin:admin"
CHROMA_SERVER_AUTH_CREDENTIALS_FILE="./example_file"
CHROMA_SERVER_AUTH_CREDENTIALS_PROVIDER="chromadb.auth.providers.HtpasswdConfigurationServerAuthCredentialsProvider"
CHROMA_SERVER_AUTH_PROVIDER="chromadb.auth.basic.BasicAuthServerProvider"
```
_Note: Only one of `AUTH_CREDENTIALS` and `AUTH_CREDENTIALS_FILE` can be set, but this guide shows how to migrate both._
And your corresponding client configation:
```yaml
CHROMA_CLIENT_AUTH_PROVIDER="chromadb.auth.token.TokenAuthClientProvider"
CHROMA_CLIENT_AUTH_CREDENTIALS="admin:admin"
```
To migrate to the new server configuration, simply change it to:
```yaml
CHROMA_SERVER_AUTHN_PROVIDER="chromadb.auth.token_authn.TokenAuthenticationServerProvider"
CHROMA_SERVER_AUTHN_CREDENTIALS="test-token"
CHROMA_SERVER_AUTHN_CREDENTIALS_FILE="./example_file"
```
New client configuration:
```yaml
CHROMA_CLIENT_AUTH_CREDENTIALS="test-token"
CHROMA_CLIENT_AUTH_PROVIDER="chromadb.auth.basic_authn.BasicAuthClientProvider"
```
#### Token
If you're using `Token` auth, your server configuration might look like:
```yaml
CHROMA_SERVER_AUTH_CREDENTIALS="test-token"
CHROMA_SERVER_AUTH_CREDENTIALS_FILE="./example_file"
CHROMA_SERVER_AUTH_CREDENTIALS_PROVIDER="chromadb.auth.token.TokenConfigServerAuthCredentialsProvider"
CHROMA_SERVER_AUTH_PROVIDER="chromadb.auth.token.TokenAuthServerProvider"
CHROMA_SERVER_AUTH_TOKEN_TRANSPORT_HEADER="AUTHORIZATION"
```
_Note: Only one of `AUTH_CREDENTIALS` and `AUTH_CREDENTIALS_FILE` can be set, but this guide shows how to migrate both._
And your corresponding client configation:
```yaml
CHROMA_CLIENT_AUTH_PROVIDER="chromadb.auth.token.TokenAuthClientProvider"
CHROMA_CLIENT_AUTH_CREDENTIALS="test-token"
CHROMA_CLIENT_AUTH_TOKEN_TRANSPORT_HEADER="AUTHORIZATION"
```
To migrate to the new server configuration, simply change it to:
```yaml
CHROMA_SERVER_AUTHN_PROVIDER="chromadb.auth.token_authn.TokenAuthenticationServerProvider"
CHROMA_SERVER_AUTHN_CREDENTIALS="test-token"
CHROMA_SERVER_AUTHN_CREDENTIALS_FILE="./example_file"
CHROMA_AUTH_TOKEN_TRANSPORT_HEADER="AUTHORIZATION"
```
New client configuration:
```yaml
CHROMA_CLIENT_AUTH_CREDENTIALS="test-token"
CHROMA_CLIENT_AUTH_PROVIDER="chromadb.auth.token_authn.TokenAuthClientProvider"
CHROMA_AUTH_TOKEN_TRANSPORT_HEADER="AUTHORIZATION"
```
#### Reference of changed configuration values
- Overall config
- `chroma_client_auth_token_transport_header`: renamed to `chroma_auth_token_transport_header`.
- `chroma_server_auth_token_transport_header`: renamed to `chroma_auth_token_transport_header`.
- Client config
- `chroma_client_auth_credentials_provider`: deleted. Functionality is now in `chroma_client_auth_provider`.
- `chroma_client_auth_protocol_adapter`: deleted. Functionality is now in `chroma_client_auth_provider`.
- `chroma_client_auth_credentials_file`: deleted. Functionality is now in `chroma_client_auth_credentials`.
- These changes also apply to the Typescript client.
- Server authn
- `chroma_server_auth_provider`: Renamed to `chroma_server_authn_provider`.
- `chroma_server_auth_configuration_provider`: deleted. Functionality is now in `chroma_server_authn_provider`.
- `chroma_server_auth_credentials_provider`: deleted. Functionality is now in `chroma_server_authn_provider`.
- `chroma_server_auth_credentials_file`: renamed to `chroma_server_authn_credentials_file`.
- `chroma_server_auth_credentials`: renamed to `chroma_server_authn_credentials`.
- `chroma_server_auth_configuration_file`: renamed to `chroma_server_authn_configuration_file`.
- Server authz
- `chroma_server_authz_ignore_paths`: deleted. Functionality is now in `chroma_server_auth_ignore_paths`.
To see the full changes, you can read the [PR](https://github.com/chroma-core/chroma/pull/1970/files) or reach out to the Chroma team on [Discord](https://discord.gg/MMeYNTmh3x).
### Migration to 0.4.16 - November 7, 2023
This release adds support for multi-modal embeddings, with an accompanying change to the definitions of `EmbeddingFunction`.
This change mainly affects users who have implemented their own `EmbeddingFunction` classes. If you are using Chroma's built-in embedding functions, you do not need to take any action.
**EmbeddingFunction**
Previously, `EmbeddingFunction`s were defined as:
```python
class EmbeddingFunction(Protocol):
def __call__(self, texts: Documents) -> Embeddings:
...
```
After this update, `EmbeddingFunction`s are defined as:
```python
Embeddable = Union[Documents, Images]
D = TypeVar("D", bound=Embeddable, contravariant=True)
class EmbeddingFunction(Protocol[D]):
def __call__(self, input: D) -> Embeddings:
...
```
The key differences are:
- `EmbeddingFunction` is now generic, and takes a type parameter `D` which is a subtype of `Embeddable`. This allows us to define `EmbeddingFunction`s which can embed multiple modalities.
- `__call__` now takes a single argument, `input`, to support data of any type `D`. The `texts` argument has been removed.
### Migr
| |
192143
|
ation from >0.4.0 to 0.4.0 - July 17, 2023
What's new in this version?
- New easy way to create clients
- Changed storage method
- `.persist()` removed, `.reset()` no longer on by default
**New Clients**
```python
### in-memory ephemeral client
# before
import chromadb
client = chromadb.Client()
# after
import chromadb
client = chromadb.EphemeralClient()
### persistent client
# before
import chromadb
from chromadb.config import Settings
client = chromadb.Client(Settings(
chroma_db_impl="duckdb+parquet",
persist_directory="/path/to/persist/directory" # Optional, defaults to .chromadb/ in the current directory
))
# after
import chromadb
client = chromadb.PersistentClient(path="/path/to/persist/directory")
### http client (to talk to server backend)
# before
import chromadb
from chromadb.config import Settings
client = chromadb.Client(Settings(chroma_api_impl="rest",
chroma_server_host="localhost",
chroma_server_http_port="8000"
))
# after
import chromadb
client = chromadb.HttpClient(host="localhost", port="8000")
```
You can still also access the underlying `.Client()` method. If you want to turn off telemetry, all clients support custom settings:
```python
import chromadb
from chromadb.config import Settings
client = chromadb.PersistentClient(
path="/path/to/persist/directory",
settings=Settings(anonymized_telemetry=False))
```
**New data layout**
This version of Chroma drops `duckdb` and `clickhouse` in favor of `sqlite` for metadata storage. This means migrating data over. We have created a migration CLI utility to do this.
If you upgrade to `0.4.0` and try to access data stored in the old way, you will see this error message
> You are using a deprecated configuration of Chroma. Please pip install chroma-migrate and run `chroma-migrate` to upgrade your configuration. See https://docs.trychroma.com/deployment/migration for more information or join our discord at https://discord.gg/8g5FESbj for help!
Here is how to install and use the CLI:
```
pip install chroma-migrate
chroma-migrate
```

If you need any help with this migration, please reach out! We are on [Discord](https://discord.com/channels/1073293645303795742/1129286514845691975) ready to help.
**Persist & Reset**
`.persist()` was in the old version of Chroma because writes were only flushed when forced to. Chroma `0.4.0` saves all writes to disk instantly and so `persist` is no longer needed.
`.reset()`, which resets the entire database, used to by enabled-by-default which felt wrong. `0.4.0` has it disabled-by-default. You can enable it again by passing `allow_reset=True` to a Settings object. For example:
```python
import chromadb
from chromadb.config import Settings
client = chromadb.PersistentClient(path="./path/to/chroma", settings=Settings(allow_reset=True))
```
| |
192180
|
---
title: "📖 API Cheatsheet"
---
# 📖 API Cheatsheet
{% note type="note" %}
This is a quick cheatsheet of the API. For full API docs, refer to the JS and Python docs in the sidebar.
{% /note %}
---
{% tabs group="code-lang" hideContent=true %}
{% tab label="Python" %}
{% /tab %}
{% tab label="Javascript" %}
{% /tab %}
{% /tabs %}
---
{% tabs group="code-lang" hideTabs=true %}
{% tab label="Python" %}
## Initialize client - Python
### In-memory chroma
```python
import chromadb
client = chromadb.Client()
```
### In-memory chroma with saving/loading to disk
In this mode, Chroma will persist data between sessions. On load - it will load up the data in the directory you specify. And as you add data - it will save to that directory.
```python
import chromadb
client = chromadb.PersistentClient(path="/path/to/data")
```
### Run chroma just as a client to talk to a backend service
You can run Chroma a standalone Chroma server using the Chroma command line. Run `chroma run --path /db_path` to run a server.
Then update your API initialization and then use the API the same way as before.
```python
import chromadb
chroma_client = chromadb.HttpClient(host="localhost", port=8000)
```
## Methods on Client
### Methods related to Collections
{% note type="note" title="Collection naming" %}
Collections are similar to AWS s3 buckets in their naming requirements because they are used in URLs in the REST API. Here's the [full list](/usage-guide#creating-inspecting-and-deleting-collections).
{% /note %}
```python
# list all collections
client.list_collections()
# make a new collection
collection = client.create_collection("testname")
# get an existing collection
collection = client.get_collection("testname")
# get a collection or create if it doesn't exist already
collection = client.get_or_create_collection("testname")
# delete a collection
client.delete_collection("testname")
```
### Utility methods
```python
# resets entire database - this *cant* be undone!
client.reset()
# returns timestamp to check if service is up
client.heartbeat()
```
## Methods on Collection
```python
# change the name or metadata on a collection
collection.modify(name="testname2")
# get the number of items in a collection
collection.count()
# add new items to a collection
# either one at a time
collection.add(
embeddings=[1.5, 2.9, 3.4],
metadatas={"uri": "img9.png", "style": "style1"},
documents="doc1000101",
ids="uri9",
)
# or many, up to 100k+!
collection.add(
embeddings=[[1.5, 2.9, 3.4], [9.8, 2.3, 2.9]],
metadatas=[{"style": "style1"}, {"style": "style2"}],
ids=["uri9", "uri10"],
)
collection.add(
documents=["doc1000101", "doc288822"],
metadatas=[{"style": "style1"}, {"style": "style2"}],
ids=["uri9", "uri10"],
)
# update items in a collection
collection.update()
# upsert items. new items will be added, existing items will be updated.
collection.upsert(
ids=["id1", "id2", "id3", ...],
embeddings=[[1.1, 2.3, 3.2], [4.5, 6.9, 4.4], [1.1, 2.3, 3.2], ...],
metadatas=[{"chapter": "3", "verse": "16"}, {"chapter": "3", "verse": "5"}, {"chapter": "29", "verse": "11"}, ...],
documents=["doc1", "doc2", "doc3", ...],
)
# get items from a collection
collection.get()
# convenience, get first 5 items from a collection
collection.peek()
# do nearest neighbor search to find similar embeddings or documents, supports filtering
collection.query(
query_embeddings=[[1.1, 2.3, 3.2], [5.1, 4.3, 2.2]],
n_results=2,
where={"style": "style2"}
)
# delete items
collection.delete()
```
{% /tab %}
{% tab label="Javascript" %}
### Run the backend
Run `chroma run --path /db_path` to run the Chroma backend as a standalone server on your local computer.
## Initialize client - JS
```javascript
// CJS
const { ChromaClient } = require("chromadb");
// ESM
import { ChromaClient } from 'chromadb'
const client = new ChromaClient();
```
## Methods on Client
### Methods related to Collections
{% note type="note" title="Collection naming" %}
Collections are similar to AWS s3 buckets in their naming requirements because they are used in URLs in the REST API. Here's the [full list](/usage-guide#creating-inspecting-and-deleting-collections).
{% /note %}
```javascript
// list all collections
await client.listCollections();
// make a new collection
const collection = await client.createCollection({ name: "testname" });
// get an existing collection
const collection = await client.getCollection({ name: "testname" });
// delete a collection
await client.deleteCollection({ name: "testname" });
```
### Utility methods
```javascript
// resets entire database - this *cant* be undone!
await client.reset();
```
## Methods on Collection
```javascript
// get the number of items in a collection
await collection.count()
// add new items to a collection
// either one at a time
await collection.add({
ids: "id1",
embeddings: [1.5, 2.9, 3.4],
metadatas: {"source": "my_source"},
documents: "This is a document",
})
// or many, up to 100k+!
await collection.add({
ids: ["uri9", "uri10"],
embeddings: [[1.5, 2.9, 3.4], [9.8, 2.3, 2.9]],
metadatas: [{"style": "style1"}, {"style": "style2"}],
documents: ["This is a document", 'that is a document']
})
// including just documents
await collection.add({
ids: ["uri9", "uri10"],
metadatas: [{"style": "style1"}, {"style": "style2"}],
documents: ["doc1000101", "doc288822"],
})
// or use upsert, so records will be updated if they already exist
// (instead of throwing an error)
await collection.upsert({
ids: "id1",
embeddings: [1.5, 2.9, 3.4],
metadatas: {"source": "my_source"},
documents: "This is a document",
})
// get items from a collection
await collection.get()
// convenience, get first 5 items from a collection
await collection.peek()
// do nearest neighbor search to find similar embeddings or documents, supports filtering
await collection.query({
queryEmbeddings: [[1.1, 2.3, 3.2], [5.1, 4.3, 2.2]],
nResults: 2,
where: {"style": "style2"}
})
// delete items
await collection.delete()
```
{% /tab %}
{% /tabs %}
| |
192181
|
---
title: Collection
---
# Collection Objects
```python
class Collection(BaseModel)
```
# count
```python
def count() -> int
```
The total number of embeddings added to the database
**Returns**:
- `int` - The total number of embeddings added to the database
# add
```python
def add(ids: OneOrMany[ID],
embeddings: Optional[OneOrMany[Embedding]] = None,
metadatas: Optional[OneOrMany[Metadata]] = None,
documents: Optional[OneOrMany[Document]] = None) -> None
```
Add embeddings to the data store.
**Arguments**:
- `ids` - The ids of the embeddings you wish to add
- `embeddings` - The embeddings to add. If None, embeddings will be computed based on the documents using the embedding_function set for the Collection. Optional.
- `metadatas` - The metadata to associate with the embeddings. When querying, you can filter on this metadata. Optional.
- `documents` - The documents to associate with the embeddings. Optional.
**Returns**:
None
**Raises**:
- `ValueError` - If you don't provide either embeddings or documents
- `ValueError` - If the length of ids, embeddings, metadatas, or documents don't match
- `ValueError` - If you don't provide an embedding function and don't provide embeddings
- `DuplicateIDError` - If you provide an id that already exists
# get
```python
def get(ids: Optional[OneOrMany[ID]] = None,
where: Optional[Where] = None,
limit: Optional[int] = None,
offset: Optional[int] = None,
where_document: Optional[WhereDocument] = None,
include: Include = ["metadatas", "documents"]) -> GetResult
```
Get embeddings and their associate data from the data store. If no ids or where filter is provided returns
all embeddings up to limit starting at offset.
**Arguments**:
- `ids` - The ids of the embeddings to get. Optional.
- `where` - A Where type dict used to filter results by. E.g. `{"color" : "red", "price": 4.20}`. Optional.
- `limit` - The number of documents to return. Optional.
- `offset` - The offset to start returning results from. Useful for paging results with limit. Optional.
- `where_document` - A WhereDocument type dict used to filter by the documents. E.g. `{$contains: {"text": "hello"}}`. Optional.
- `include` - A list of what to include in the results. Can contain `"embeddings"`, `"metadatas"`, `"documents"`. Ids are always included. Defaults to `["metadatas", "documents"]`. Optional.
**Returns**:
- `GetResult` - A GetResult object containing the results.
# peek
```python
def peek(limit: int = 10) -> GetResult
```
Get the first few results in the database up to limit
**Arguments**:
- `limit` - The number of results to return.
**Returns**:
- `GetResult` - A GetResult object containing the results.
# query
```python
def query(
query_embeddings: Optional[OneOrMany[Embedding]] = None,
query_texts: Optional[OneOrMany[Document]] = None,
n_results: int = 10,
where: Optional[Where] = None,
where_document: Optional[WhereDocument] = None,
include: Include = ["metadatas", "documents",
"distances"]) -> QueryResult
```
Get the n_results nearest neighbor embeddings for provided query_embeddings or query_texts.
**Arguments**:
- `query_embeddings` - The embeddings to get the closes neighbors of. Optional.
- `query_texts` - The document texts to get the closes neighbors of. Optional.
- `n_results` - The number of neighbors to return for each query_embedding or query_texts. Optional.
- `where` - A Where type dict used to filter results by. E.g. `{"color" : "red", "price": 4.20}`. Optional.
- `where_document` - A WhereDocument type dict used to filter by the documents. E.g. `{$contains: {"text": "hello"}}`. Optional.
- `include` - A list of what to include in the results. Can contain `"embeddings"`, `"metadatas"`, `"documents"`, `"distances"`. Ids are always included. Defaults to `["metadatas", "documents", "distances"]`. Optional.
**Returns**:
- `QueryResult` - A QueryResult object containing the results.
**Raises**:
- `ValueError` - If you don't provide either query_embeddings or query_texts
- `ValueError` - If you provide both query_embeddings and query_texts
# modify
```python
def modify(name: Optional[str] = None,
metadata: Optional[CollectionMetadata] = None) -> None
```
Modify the collection name or metadata
**Arguments**:
- `name` - The updated name for the collection. Optional.
- `metadata` - The updated metadata for the collection. Optional.
**Returns**:
None
# update
```python
def update(ids: OneOrMany[ID],
embeddings: Optional[OneOrMany[Embedding]] = None,
metadatas: Optional[OneOrMany[Metadata]] = None,
documents: Optional[OneOrMany[Document]] = None) -> None
```
Update the embeddings, metadatas or documents for provided ids.
**Arguments**:
- `ids` - The ids of the embeddings to update
- `embeddings` - The embeddings to add. If None, embeddings will be computed based on the documents using the embedding_function set for the Collection. Optional.
- `metadatas` - The metadata to associate with the embeddings. When querying, you can filter on this metadata. Optional.
- `documents` - The documents to associate with the embeddings. Optional.
**Returns**:
None
# upsert
```python
def upsert(ids: OneOrMany[ID],
embeddings: Optional[OneOrMany[Embedding]] = None,
metadatas: Optional[OneOrMany[Metadata]] = None,
documents: Optional[OneOrMany[Document]] = None) -> None
```
Update the embeddings, metadatas or documents for provided ids, or create them if they don't exist.
**Arguments**:
- `ids` - The ids of the embeddings to update
- `embeddings` - The embeddings to add. If None, embeddings will be computed based on the documents using the embedding_function set for the Collection. Optional.
- `metadatas` - The metadata to associate with the embeddings. When querying, you can filter on this metadata. Optional.
- `documents` - The documents to associate with the embeddings. Optional.
**Returns**:
None
# delete
```python
def delete(ids: Optional[IDs] = None,
where: Optional[Where] = None,
where_document: Optional[WhereDocument] = None) -> None
```
Delete the embeddings based on ids and/or a where filter
**Arguments**:
- `ids` - The ids of the embeddings to delete
- `where` - A Where type dict used to filter the delection by. E.g. `{"color" : "red", "price": 4.20}`. Optional.
- `where_document` - A WhereDocument type dict used to filter the deletion by the document content. E.g. `{$contains: {"text": "hello"}}`. Optional.
**Returns**:
None
| |
192185
|
---
title: Client
---
## configure
```python
def configure(**kwargs) -> None
```
Override Chroma's default settings, environment variables or .env files
## EphemeralClient
```python
def EphemeralClient(settings: Optional[Settings] = None,
tenant: str = DEFAULT_TENANT,
database: str = DEFAULT_DATABASE) -> ClientAPI
```
Creates an in-memory instance of Chroma. This is useful for testing and
development, but not recommended for production use.
**Arguments**:
- `tenant` - The tenant to use for this client. Defaults to the default tenant.
- `database` - The database to use for this client. Defaults to the default database.
## PersistentClient
```python
def PersistentClient(path: str = "./chroma",
settings: Optional[Settings] = None,
tenant: str = DEFAULT_TENANT,
database: str = DEFAULT_DATABASE) -> ClientAPI
```
Creates a persistent instance of Chroma that saves to disk. This is useful for
testing and development, but not recommended for production use.
**Arguments**:
- `path` - The directory to save Chroma's data to. Defaults to "./chroma".
- `tenant` - The tenant to use for this client. Defaults to the default tenant.
- `database` - The database to use for this client. Defaults to the default database.
## HttpClient
```python
def HttpClient(host: str = "localhost",
port: int = 8000,
ssl: bool = False,
headers: Optional[Dict[str, str]] = None,
settings: Optional[Settings] = None,
tenant: str = DEFAULT_TENANT,
database: str = DEFAULT_DATABASE) -> ClientAPI
```
Creates a client that connects to a remote Chroma server. This supports
many clients connecting to the same server, and is the recommended way to
use Chroma in production.
**Arguments**:
- `host` - The hostname of the Chroma server. Defaults to "localhost".
- `port` - The port of the Chroma server. Defaults to "8000".
- `ssl` - Whether to use SSL to connect to the Chroma server. Defaults to False.
- `headers` - A dictionary of headers to send to the Chroma server. Defaults to {}.
- `settings` - A dictionary of settings to communicate with the chroma server.
- `tenant` - The tenant to use for this client. Defaults to the default tenant.
- `database` - The database to use for this client. Defaults to the default database.
## AsyncHttpClient
```python
async def AsyncHttpClient(host: str = "localhost",
port: int = 8000,
ssl: bool = False,
headers: Optional[Dict[str, str]] = None,
settings: Optional[Settings] = None,
tenant: str = DEFAULT_TENANT,
database: str = DEFAULT_DATABASE) -> AsyncClientAPI
```
Creates an async client that connects to a remote Chroma server. This supports
many clients connecting to the same server, and is the recommended way to
use Chroma in production.
**Arguments**:
- `host` - The hostname of the Chroma server. Defaults to "localhost".
- `port` - The port of the Chroma server. Defaults to "8000".
- `ssl` - Whether to use SSL to connect to the Chroma server. Defaults to False.
- `headers` - A dictionary of headers to send to the Chroma server. Defaults to {}.
- `settings` - A dictionary of settings to communicate with the chroma server.
- `tenant` - The tenant to use for this client. Defaults to the default tenant.
- `database` - The database to use for this client. Defaults to the default database.
## CloudClient
```python
def CloudClient(tenant: str,
database: str,
api_key: Optional[str] = None,
settings: Optional[Settings] = None,
*,
cloud_host: str = "api.trychroma.com",
cloud_port: int = 8000,
enable_ssl: bool = True) -> ClientAPI
```
Creates a client to connect to a tennant and database on the Chroma cloud.
**Arguments**:
- `tenant` - The tenant to use for this client.
- `database` - The database to use for this client.
- `api_key` - The api key to use for this client.
## Client
```python
def Client(settings: Settings = __settings,
tenant: str = DEFAULT_TENANT,
database: str = DEFAULT_DATABASE) -> ClientAPI
```
Return a running chroma.API instance
tenant: The tenant to use for this client. Defaults to the default tenant.
database: The database to use for this client. Defaults to the default database.
## AdminClient
```python
def AdminClient(settings: Settings = Settings()) -> AdminAPI
```
Creates an admin client that can be used to create tenants and databases.
# BaseClient Methods
```python
class BaseAPI(ABC)
```
## heartbeat
```python
def heartbeat() -> int
```
Get the current time in nanoseconds since epoch.
Used to check if the server is alive.
**Returns**:
- `int` - The current time in nanoseconds since epoch
## count\_collections
```python
def count_collections() -> int
```
Count the number of collections.
**Returns**:
- `int` - The number of collections.
**Examples**:
```python
client.count_collections()
# 1
```
## delete\_collection
```python
def delete_collection(name: str) -> None
```
Delete a collection with the given name.
**Arguments**:
- `name` - The name of the collection to delete.
**Raises**:
- `ValueError` - If the collection does not exist.
**Examples**:
```python
client.delete_collection("my_collection")
```
## reset
```python
def reset() -> bool
```
Resets the database. This will delete all collections and entries.
**Returns**:
- `bool` - True if the database was reset successfully.
## get\_version
```python
def get_version() -> str
```
Get the version of Chroma.
**Returns**:
- `str` - The version of Chroma
## get\_settings
```python
def get_settings() -> Settings
```
Get the settings used to initialize.
**Returns**:
- `Settings` - The settings used to initialize.
## get\_max\_batch\_size
```python
def get_max_batch_size() -> int
```
Return the maximum number of records that can be created or mutated in a single call.
| |
192193
|
import argparse
import os
from typing import List
import google.generativeai as genai
import chromadb
from chromadb.utils import embedding_functions
model = genai.GenerativeModel("gemini-pro")
def build_prompt(query: str, context: List[str]) -> str:
"""
Builds a prompt for the LLM. #
This function builds a prompt for the LLM. It takes the original query,
and the returned context, and asks the model to answer the question based only
on what's in the context, not what's in its weights.
Args:
query (str): The original query.
context (List[str]): The context of the query, returned by embedding search.
Returns:
A prompt for the LLM (str).
"""
base_prompt = {
"content": "I am going to ask you a question, which I would like you to answer"
" based only on the provided context, and not any other information."
" If there is not enough information in the context to answer the question,"
' say "I am not sure", then try to make a guess.'
" Break your answer up into nicely readable paragraphs.",
}
user_prompt = {
"content": f" The question is '{query}'. Here is all the context you have:"
f'{(" ").join(context)}',
}
# combine the prompts to output a single prompt string
system = f"{base_prompt['content']} {user_prompt['content']}"
return system
def get_gemini_response(query: str, context: List[str]) -> str:
"""
Queries the Gemini API to get a response to the question.
Args:
query (str): The original query.
context (List[str]): The context of the query, returned by embedding search.
Returns:
A response to the question.
"""
response = model.generate_content(build_prompt(query, context))
return response.text
def main(
collection_name: str = "documents_collection", persist_directory: str = "."
) -> None:
# Check if the GOOGLE_API_KEY environment variable is set. Prompt the user to set it if not.
google_api_key = None
if "GOOGLE_API_KEY" not in os.environ:
gapikey = input("Please enter your Google API Key: ")
genai.configure(api_key=gapikey)
google_api_key = gapikey
else:
google_api_key = os.environ["GOOGLE_API_KEY"]
# Instantiate a persistent chroma client in the persist_directory.
# This will automatically load any previously saved collections.
# Learn more at docs.trychroma.com
client = chromadb.PersistentClient(path=persist_directory)
# create embedding function
embedding_function = embedding_functions.GoogleGenerativeAIEmbeddingFunction(
api_key=google_api_key, task_type="RETRIEVAL_QUERY"
)
# Get the collection.
collection = client.get_collection(
name=collection_name, embedding_function=embedding_function
)
# We use a simple input loop.
while True:
# Get the user's query
query = input("Query: ")
if len(query) == 0:
print("Please enter a question. Ctrl+C to Quit.\n")
continue
print("\nThinking...\n")
# Query the collection to get the 5 most relevant results
results = collection.query(
query_texts=[query], n_results=5, include=["documents", "metadatas"]
)
sources = "\n".join(
[
f"{result['filename']}: line {result['line_number']}"
for result in results["metadatas"][0] # type: ignore
]
)
# Get the response from Gemini
response = get_gemini_response(query, results["documents"][0]) # type: ignore
# Output, with sources
print(response)
print("\n")
print(f"Source documents:\n{sources}")
print("\n")
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Load documents from a directory into a Chroma collection"
)
parser.add_argument(
"--persist_directory",
type=str,
default="chroma_storage",
help="The directory where you want to store the Chroma collection",
)
parser.add_argument(
"--collection_name",
type=str,
default="documents_collection",
help="The name of the Chroma collection",
)
# Parse arguments
args = parser.parse_args()
main(
collection_name=args.collection_name,
persist_directory=args.persist_directory,
)
| |
192194
|
import os
import argparse
from tqdm import tqdm
import chromadb
from chromadb.utils import embedding_functions
import google.generativeai as genai
def main(
documents_directory: str = "documents",
collection_name: str = "documents_collection",
persist_directory: str = ".",
) -> None:
# Read all files in the data directory
documents = []
metadatas = []
files = os.listdir(documents_directory)
for filename in files:
with open(f"{documents_directory}/{filename}", "r") as file:
for line_number, line in enumerate(
tqdm((file.readlines()), desc=f"Reading {filename}"), 1
):
# Strip whitespace and append the line to the documents list
line = line.strip()
# Skip empty lines
if len(line) == 0:
continue
documents.append(line)
metadatas.append({"filename": filename, "line_number": line_number})
# Instantiate a persistent chroma client in the persist_directory.
# Learn more at docs.trychroma.com
client = chromadb.PersistentClient(path=persist_directory)
google_api_key = None
if "GOOGLE_API_KEY" not in os.environ:
gapikey = input("Please enter your Google API Key: ")
genai.configure(api_key=gapikey)
google_api_key = gapikey
else:
google_api_key = os.environ["GOOGLE_API_KEY"]
# create embedding function
embedding_function = embedding_functions.GoogleGenerativeAIEmbeddingFunction(
api_key=google_api_key
)
# If the collection already exists, we just return it. This allows us to add more
# data to an existing collection.
collection = client.get_or_create_collection(
name=collection_name, embedding_function=embedding_function
)
# Create ids from the current count
count = collection.count()
print(f"Collection already contains {count} documents")
ids = [str(i) for i in range(count, count + len(documents))]
# Load the documents in batches of 100
for i in tqdm(
range(0, len(documents), 100), desc="Adding documents", unit_scale=100
):
collection.add(
ids=ids[i : i + 100],
documents=documents[i : i + 100],
metadatas=metadatas[i : i + 100], # type: ignore
)
new_count = collection.count()
print(f"Added {new_count - count} documents")
if __name__ == "__main__":
# Read the data directory, collection name, and persist directory
parser = argparse.ArgumentParser(
description="Load documents from a directory into a Chroma collection"
)
# Add arguments
parser.add_argument(
"--data_directory",
type=str,
default="documents",
help="The directory where your text files are stored",
)
parser.add_argument(
"--collection_name",
type=str,
default="documents_collection",
help="The name of the Chroma collection",
)
parser.add_argument(
"--persist_directory",
type=str,
default="chroma_storage",
help="The directory where you want to store the Chroma collection",
)
# Parse arguments
args = parser.parse_args()
main(
documents_directory=args.data_directory,
collection_name=args.collection_name,
persist_directory=args.persist_directory,
)
| |
192210
|
import asyncio
import sys
import uuid
from pathlib import Path
import chromadb
import xai_sdk
from pypdf import PdfReader
from langchain_text_splitters import RecursiveCharacterTextSplitter, SentenceTransformersTokenTextSplitter
from tqdm import tqdm
from chromadb.utils.embedding_functions.sentence_transformer_embedding_function import \
SentenceTransformerEmbeddingFunction
def chunk_pdf(document_name: str) -> list[tuple[str, int]]:
"""
Chunks a PDF document
Args:
document_name (str): The name of the PDF document to chunk
Returns:
A list of chunks and the page number they are from
"""
file_path = f"./docs/{document_name}"
reader = PdfReader(file_path)
chunks_with_page_numbers = []
character_splitter = RecursiveCharacterTextSplitter(
separators=["\n\n", "\n", ".", " ", ""],
chunk_size=1000,
chunk_overlap=0)
token_splitter = SentenceTransformersTokenTextSplitter(chunk_overlap=0,
tokens_per_chunk=256)
for page_number, page in tqdm(enumerate(reader.pages, start=1),
total=len(reader.pages),
desc="Chunking Pages"):
page_text = page.extract_text().strip()
if not page_text:
continue
split_texts = character_splitter.split_text(page_text)
for text in split_texts:
token_split_texts = token_splitter.split_text(text)
for chunk in token_split_texts:
chunks_with_page_numbers.append((chunk, page_number))
print()
return chunks_with_page_numbers
def load_data(collection: chromadb.Collection) -> None:
pdfs = [file.name for file in Path("./docs").rglob('*.pdf')]
for file in pdfs:
if len(collection.get(where={"document_name": file}, limit=1)["ids"]) > 0:
continue
chunks = chunk_pdf(file)
collection.add(
ids=[str(uuid.uuid4()) for _ in range(len(chunks))],
documents=[chunk[0] for chunk in chunks],
metadatas=[{"document_name": file, "page_number": chunk[1]} for chunk in chunks],
)
async def main():
chroma_client = chromadb.PersistentClient(path="./chroma_data")
embedding_function = SentenceTransformerEmbeddingFunction()
collection = chroma_client.get_or_create_collection(
name="context_collection",
embedding_function=embedding_function,
)
load_data(collection)
client = xai_sdk.Client()
conversation = client.chat.create_conversation()
print("Enter an empty message to quit.\n")
while True:
user_input = input("Human: ")
print("")
if not user_input:
return
context = collection.query(query_texts=[user_input], include=["documents"], n_results=5)["documents"][0]
prompt_context = '\n\n'.join(context)
prompt = f"User query: {user_input}. Answer using this context:\n\n {prompt_context}"
token_stream, _ = conversation.add_response(prompt)
print("Grok: ", end="")
async for token in token_stream:
print(token, end="")
sys.stdout.flush()
print("\n")
if __name__ == "__main__":
asyncio.run(main())
| |
192217
|
{
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"id": "initial_id",
"metadata": {
"collapsed": true,
"ExecuteTime": {
"end_time": "2023-08-30T12:48:38.227653Z",
"start_time": "2023-08-30T12:48:27.744069Z"
}
},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"Number of requested results 10 is greater than number of elements in index 3, updating n_results = 3\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"{'ids': [['1', '3']], 'distances': [[0.28824201226234436, 1.017508625984192]], 'metadatas': [[{'author': 'john'}, {'author': 'jill'}]], 'embeddings': None, 'documents': [['Article by john', 'Article by Jill']]}\n",
"{'ids': ['1', '3'], 'embeddings': None, 'metadatas': [{'author': 'john'}, {'author': 'jill'}], 'documents': ['Article by john', 'Article by Jill']}\n"
]
}
],
"source": [
"import chromadb\n",
"\n",
"from chromadb.utils import embedding_functions\n",
"\n",
"sentence_transformer_ef = embedding_functions.SentenceTransformerEmbeddingFunction(model_name=\"all-MiniLM-L6-v2\")\n",
"\n",
"\n",
"client = chromadb.Client()\n",
"# client.heartbeat()\n",
"# client.reset()\n",
"collection = client.get_or_create_collection(\"test-where-list\", embedding_function=sentence_transformer_ef)\n",
"collection.add(documents=[\"Article by john\", \"Article by Jack\", \"Article by Jill\"],\n",
" metadatas=[{\"author\": \"john\"}, {\"author\": \"jack\"}, {\"author\": \"jill\"}], ids=[\"1\", \"2\", \"3\"])\n",
"\n",
"query = [\"Give me articles by john\"]\n",
"res = collection.query(query_texts=query,where={'author': {'$in': ['john', 'jill']}}, n_results=10)\n",
"print(res)\n",
"\n",
"res_get = collection.get(where={'author': {'$in': ['john', 'jill']}})\n",
"print(res_get)\n"
]
},
{
"cell_type": "markdown",
"source": [
"# Interactions with existing Where operators"
],
"metadata": {
"collapsed": false
},
"id": "752cef843ba2f900"
},
{
"cell_type": "code",
"execution_count": 2,
"outputs": [
{
"data": {
"text/plain": "{'ids': [['1']],\n 'distances': [[0.28824201226234436]],\n 'metadatas': [[{'article_type': 'blog', 'author': 'john'}]],\n 'embeddings': None,\n 'documents': [['Article by john']]}"
},
"execution_count": 2,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"collection.upsert(documents=[\"Article by john\", \"Article by Jack\", \"Article by Jill\"],\n",
" metadatas=[{\"author\": \"john\",\"article_type\":\"blog\"}, {\"author\": \"jack\",\"article_type\":\"social\"}, {\"author\": \"jill\",\"article_type\":\"paper\"}], ids=[\"1\", \"2\", \"3\"])\n",
"\n",
"collection.query(query_texts=query,where={\"$and\":[{\"author\": {'$in': ['john', 'jill']}},{\"article_type\":{\"$eq\":\"blog\"}}]}, n_results=3)"
],
"metadata": {
"collapsed": false,
"ExecuteTime": {
"end_time": "2023-08-30T12:48:49.974353Z",
"start_time": "2023-08-30T12:48:49.938985Z"
}
},
"id": "ca56cda318f9e94d"
},
{
"cell_type": "code",
"execution_count": 3,
"outputs": [
{
"data": {
"text/plain": "{'ids': [['1', '3']],\n 'distances': [[0.28824201226234436, 1.017508625984192]],\n 'metadatas': [[{'article_type': 'blog', 'author': 'john'},\n {'article_type': 'paper', 'author': 'jill'}]],\n 'embeddings': None,\n 'documents': [['Article by john', 'Article by Jill']]}"
},
"execution_count": 3,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"collection.query(query_texts=query,where={\"$or\":[{\"author\": {'$in': ['john']}},{\"article_type\":{\"$in\":[\"paper\"]}}]}, n_results=3)"
],
"metadata": {
"collapsed": false,
"ExecuteTime": {
"end_time": "2023-08-30T12:48:53.501431Z",
"start_time": "2023-08-30T12:48:53.481571Z"
}
},
"id": "f10e79ec90c797c1"
},
{
"cell_type": "code",
"execution_count": null,
"outputs": [],
"source": [],
"metadata": {
"collapsed": false
},
"id": "d97b8b6dd96261d0"
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 2
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython2",
"version": "2.7.6"
}
},
"nbformat": 4,
"nbformat_minor": 5
}
| |
192219
|
{
"cells": [
{
"attachments": {},
"cell_type": "markdown",
"metadata": {},
"source": [
"# Local Peristence Demo\n",
"This notebook demonstrates how to configure Chroma to persist to disk, then load it back in. "
]
},
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [],
"source": [
"import chromadb"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [],
"source": [
"# Create a new Chroma client with persistence enabled. \n",
"persist_directory = \"db\"\n",
"\n",
"client = chromadb.PersistentClient(path=persist_directory)\n",
"\n",
"# Create a new chroma collection\n",
"collection_name = \"peristed_collection\"\n",
"collection = client.get_or_create_collection(name=collection_name)"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"outputs": [],
"source": [
"# Add some data to the collection\n",
"collection.add(\n",
" embeddings=[\n",
" [1.1, 2.3, 3.2],\n",
" [4.5, 6.9, 4.4],\n",
" [1.1, 2.3, 3.2],\n",
" [4.5, 6.9, 4.4],\n",
" [1.1, 2.3, 3.2],\n",
" [4.5, 6.9, 4.4],\n",
" [1.1, 2.3, 3.2],\n",
" [4.5, 6.9, 4.4],\n",
" ],\n",
" metadatas=[\n",
" {\"uri\": \"img1.png\", \"style\": \"style1\"},\n",
" {\"uri\": \"img2.png\", \"style\": \"style2\"},\n",
" {\"uri\": \"img3.png\", \"style\": \"style1\"},\n",
" {\"uri\": \"img4.png\", \"style\": \"style1\"},\n",
" {\"uri\": \"img5.png\", \"style\": \"style1\"},\n",
" {\"uri\": \"img6.png\", \"style\": \"style1\"},\n",
" {\"uri\": \"img7.png\", \"style\": \"style1\"},\n",
" {\"uri\": \"img8.png\", \"style\": \"style1\"},\n",
" ],\n",
" documents=[\"doc1\", \"doc2\", \"doc3\", \"doc4\", \"doc5\", \"doc6\", \"doc7\", \"doc8\"],\n",
" ids=[\"id1\", \"id2\", \"id3\", \"id4\", \"id5\", \"id6\", \"id7\", \"id8\"],\n",
")"
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {},
"outputs": [],
"source": [
"# Create a new client with the same settings\n",
"client = chromadb.PersistentClient(path=persist_directory)\n",
"\n",
"# Load the collection\n",
"collection = client.get_collection(collection_name)"
]
},
{
"cell_type": "code",
"execution_count": 5,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"{'ids': [['id1']], 'distances': [[5.1159076593562386e-15]], 'metadatas': [[{'style': 'style1', 'uri': 'img1.png'}]], 'embeddings': None, 'documents': [['doc1']]}\n"
]
}
],
"source": [
"# Query the collection\n",
"results = collection.query(\n",
" query_embeddings=[[1.1, 2.3, 3.2]],\n",
" n_results=1\n",
")\n",
"\n",
"print(results)"
]
},
{
"cell_type": "code",
"execution_count": 6,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"{'ids': ['id1', 'id2', 'id3', 'id4', 'id5', 'id6', 'id7', 'id8'],\n",
" 'embeddings': [[1.100000023841858, 2.299999952316284, 3.200000047683716],\n",
" [4.5, 6.900000095367432, 4.400000095367432],\n",
" [1.100000023841858, 2.299999952316284, 3.200000047683716],\n",
" [4.5, 6.900000095367432, 4.400000095367432],\n",
" [1.100000023841858, 2.299999952316284, 3.200000047683716],\n",
" [4.5, 6.900000095367432, 4.400000095367432],\n",
" [1.100000023841858, 2.299999952316284, 3.200000047683716],\n",
" [4.5, 6.900000095367432, 4.400000095367432]],\n",
" 'metadatas': [{'style': 'style1', 'uri': 'img1.png'},\n",
" {'style': 'style2', 'uri': 'img2.png'},\n",
" {'style': 'style1', 'uri': 'img3.png'},\n",
" {'style': 'style1', 'uri': 'img4.png'},\n",
" {'style': 'style1', 'uri': 'img5.png'},\n",
" {'style': 'style1', 'uri': 'img6.png'},\n",
" {'style': 'style1', 'uri': 'img7.png'},\n",
" {'style': 'style1', 'uri': 'img8.png'}],\n",
" 'documents': ['doc1', 'doc2', 'doc3', 'doc4', 'doc5', 'doc6', 'doc7', 'doc8']}"
]
},
"execution_count": 6,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"collection.get(include=[\"embeddings\", \"metadatas\", \"documents\"])"
]
},
{
"cell_type": "code",
"execution_count": 7,
"metadata": {},
"outputs": [],
"source": [
"# Clean up\n",
"! rm -rf db"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "chroma",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.8"
},
"orig_nbformat": 4,
"vscode": {
"interpreter": {
"hash": "88f09714c9334832bac29166716f9f6a879ee2a4ed4822c1d4120cb2393b58dd"
}
}
},
"nbformat": 4,
"nbformat_minor": 2
}
| |
192220
|
{
"cells": [
{
"cell_type": "markdown",
"id": "eae631e46b4c1115",
"metadata": {
"collapsed": false
},
"source": [
"# Chroma Authentication\n",
"\n",
"This tutorial aims to explain how authentication can be setup in Chroma.\n",
"\n",
"> **Important**: The concept of authentication is only applicable to Client/Server deployments. If you are using Chroma in a standalone mode, authentication is not applicable.\n",
"\n",
"## Concepts\n",
"\n",
"### Architecture Overview\n",
"\n",
"\n",
"\n",
"### Authentication Flow (Sequence)\n",
"\n",
"The authentication sequence is applied for every request. It is important to understand that credential computation or retrieval (e.g. from external auth providers) is only done once for the first authenticated request. Subsequent requests will use the same credentials.\n",
"\n",
"The authentication flow is as follows:\n",
"\n",
"\n",
"\n",
"### Preemptive Authentication\n",
"\n",
"In its current release the authentication in Chroma works in a preemptive mode. This means that the client is responsible for sending the authentication information on every request. The server will not challenge the client for authentication.\n",
"\n",
"> **Warning**: There are security risks involved with preemptive authentication in that the client might unintentionally send credentials to malicious or unintended server. When deploying authentication users are encouraged to use HTTPS (always verify server certs), to use secure providers (e.g. JWT) \n",
"> and apply good security practices.\n",
"\n",
"### Authentication Provider\n",
"\n",
"Authentication in Chroma is handled by Authentication Providers. Providers are pluggable modules that allow Chroma to abstract the authentication mechanism from the rest of the system.\n",
"\n",
"Chroma ships with the following build-in providers:\n",
"- Basic authentication\n",
"- Token-based authentication\n",
"\n",
"### Client-side Authentication\n",
"\n",
"Client-side authentication refers to the process of preparing and communicating credentials information on the client-side and sending that information the Chroma server.\n",
"\n",
"### Server-side Authentication and Authorization\n",
"\n",
"- Server-side *authentication* means validating a request's credentials and identifying the user who sent it.\n",
"- Server-side *authorization* means checking whether the identity associated with a request is allowed to make the request. This depends on authentication, since it depends on knowing the identity of the requestor.\n"
]
},
{
"cell_type": "markdown",
"id": "87d45f79aed65e21",
"metadata": {
"collapsed": false
},
"source": [
"## Configuration\n",
"\n",
"### Server Configuration\n",
"\n",
"In order for the server to provide authentication and authorization., it needs several pieces of information.\n",
"\n",
"- `CHROMA_SERVER_AUTHN_PROVIDER` - Configures which authentication provider class to use. In this case we are using `chromadb.auth.basic_authn.BasicAuthenticationServerProvider`. This authentication provider expects requests to have a header of the form `{\"Authorization\": \"Basic <base64-encoded username:password>\"}`.\n",
"- `CHROMA_SERVER_AUTHN_CREDENTIALS_FILE` - The path to the credentials file for the authentication provider. As you'll see below, we will put these credentials in `./server.htpasswd`.\n",
"- `CHROMA_SERVER_AUTHN_CREDENTIALS` - Instead of a file, you can pass authentication configuration directly. We do not recommend this but it is suitable for testing and certain deployment environments.\n",
"\n",
"### Client Configuration\n",
"\n",
"Similarly on the client side we need to provide the following configuration parameters:\n",
"\n",
"- `CHROMA_CLIENT_AUTH_PROVIDER` - Configures which authentication provider class to use. In this case we are using `chromadb.auth.basic_authn.BasicAuthClientProvider`, which is built to work well with the server-side `BasicAuthenticationServerProvider`.\n",
"- `CHROMA_CLIENT_AUTH_CREDENTIALS` - Auth credentials to be included as headers in each request to Chroma. In this case we are using `admin:admin`.\n"
]
},
{
"cell_type": "markdown",
"id": "af49d8c78f2f7347",
"metadata": {
"collapsed": false
},
"source": [
"## Setting Up\n",
"\n",
"### Before You Begin\n",
"\n",
"Make sure you have either `chromadb` or `chromadb-client` installed. You can do that by running the following command:\n",
"\n",
"```bash\n",
"pip install chromadb\n",
"```\n",
"or\n",
"\n",
"```bash\n",
"pip install chromadb-client\n",
"```\n",
"\n",
"Make sure Chroma Server is running. Use one of the following methods to start the server:\n",
"\n",
"From the command line:\n",
"\n",
"> Note: The below options will configure the server to use Basic Authentication with the username `admin` and password `admin`.\n",
"\n",
"```bash\n",
"export CHROMA_USER=admin\n",
"export CHROMA_PASSWORD=admin\n",
"docker run --rm --entrypoint htpasswd httpd:2 -Bbn ${CHROMA_USER} ${CHROMA_PASSWORD} > server.htpasswd\n",
"CHROMA_SERVER_AUTHN_CREDENTIALS_FILE=\"./server.htpasswd\" \\\n",
"CHROMA_SERVER_AUTHN_PROVIDER=\"chromadb.auth.basic_authn.BasicAuthenticationServerProvider\" \\\n",
"uvicorn chromadb.app:app --workers 1 --host 0.0.0.0 --port 8000 --proxy-headers --log-config log_config.yml\n",
"```\n",
"\n",
"With Docker Compose:\n",
"\n",
"> Note: You need to clone the git repository first and run the command from the repository root.\n",
"\n",
"```bash\n",
"export CHROMA_USER=admin\n",
"export CHROMA_PASSWORD=admin\n",
"docker run --rm --entrypoint htpasswd httpd:2 -Bbn ${CHROMA_USER} ${CHROMA_PASSWORD} > server.htpasswd\n",
"cat << EOF > .env\n",
"CHROMA_SERVER_AUTH_CREDENTIALS_FILE=\"/chroma/server.htpasswd\"\n",
"CHROMA_SERVER_AUTH_PROVIDER=\"chromadb.auth.basic_authn.BasicAuthenticationServerProvider\"\n",
"EOF\n",
"docker-compose up -d --build \n",
"```\n"
]
},
{
"cell_type": "markdown",
"id": "fc77d909233f2645",
"metadata": {
"collapsed": false
},
"source": [
"## Basic Authentication"
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "8f9307acce25f672",
"metadata": {
"ExecuteTime": {
"end_time": "2023-08-22T00:33:16.354523Z",
"start_time": "2023-08-22T00:33:15.715736Z"
},
"collapsed": false
},
"outputs": [
{
"data": {
"text/plain": [
"[]"
]
},
"execution_count": 2,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"import chromadb\n",
"from chromadb import Settings\n",
"\n",
"client = chromadb.HttpClient(\n",
" settings=Settings(chroma_client_auth_provider=\"chromadb.auth.basic_authn.BasicAuthClientProvider\",\n",
" chroma_client_auth_credentials=\"admin:admin\"))\n",
"client.heartbeat() # this should work with or without authentication - it is a public endpoint\n",
"\n",
"client.get_version() # this should work with or without authentication - it is a public endpoint\n",
"\n",
"client.list_collections() # this is a protected endpoint and requires authentication\n",
"\n"
]
},
{
"cell_type": "markdown",
"id": "6b75f04e59cb1d42",
"metadata": {
"collapsed": false
},
"source": [
"#### Verifying Authentication (Negative Test)"
]
},
{
"cell_type": "code",
"execution_count": 3,
| |
192230
|
{
"cells": [
{
"attachments": {},
"cell_type": "markdown",
"metadata": {},
"source": [
" # Alternative Embeddings\n",
" \n",
" This notebook demonstrates how to use alternative embedding functions.\n",
" "
]
},
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [],
"source": [
"import chromadb"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [],
"source": [
"client = chromadb.Client()"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"outputs": [],
"source": [
"from chromadb.utils import embedding_functions"
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {},
"outputs": [],
"source": [
"# Using OpenAI Embeddings. This assumes you have the openai package installed\n",
"openai_ef = embedding_functions.OpenAIEmbeddingFunction(\n",
" api_key=\"OPENAI_KEY\", # Replace with your own OpenAI API key\n",
" model_name=\"text-embedding-ada-002\"\n",
")"
]
},
{
"cell_type": "code",
"execution_count": 5,
"metadata": {},
"outputs": [],
"source": [
"# Create a new chroma collection\n",
"openai_collection = client.get_or_create_collection(name=\"openai_embeddings\", embedding_function=openai_ef)"
]
},
{
"cell_type": "code",
"execution_count": 6,
"metadata": {},
"outputs": [],
"source": [
"openai_collection.add(\n",
" documents=[\"This is a document\", \"This is another document\"],\n",
" metadatas=[{\"source\": \"my_source\"}, {\"source\": \"my_source\"}],\n",
" ids=[\"id1\", \"id2\"]\n",
")"
]
},
{
"cell_type": "code",
"execution_count": 7,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"{'ids': [['id1', 'id2']],\n",
" 'distances': [[0.1385088860988617, 0.2017185091972351]],\n",
" 'metadatas': [[{'source': 'my_source'}, {'source': 'my_source'}]],\n",
" 'embeddings': None,\n",
" 'documents': [['This is a document', 'This is another document']]}"
]
},
"execution_count": 7,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"results = openai_collection.query(\n",
" query_texts=[\"This is a query document\"],\n",
" n_results=2\n",
")\n",
"results"
]
},
{
"cell_type": "code",
"execution_count": 9,
"metadata": {},
"outputs": [],
"source": [
"# Using Cohere Embeddings. This assumes you have the cohere package installed\n",
"cohere_ef = embedding_functions.CohereEmbeddingFunction(\n",
" api_key=\"COHERE_API_KEY\", \n",
" model_name=\"large\"\n",
")"
]
},
{
"cell_type": "code",
"execution_count": 10,
"metadata": {},
"outputs": [],
"source": [
"# Create a new chroma collection\n",
"cohere_collection = client.create_collection(name=\"cohere_embeddings\", embedding_function=cohere_ef)"
]
},
{
"cell_type": "code",
"execution_count": 11,
"metadata": {},
"outputs": [],
"source": [
"cohere_collection.add(\n",
" documents=[\"This is a document\", \"This is another document\"],\n",
" metadatas=[{\"source\": \"my_source\"}, {\"source\": \"my_source\"}],\n",
" ids=[\"id1\", \"id2\"]\n",
")"
]
},
{
"cell_type": "code",
"execution_count": 12,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"{'ids': [['id1', 'id2']],\n",
" 'embeddings': None,\n",
" 'documents': [['This is a document', 'This is another document']],\n",
" 'metadatas': [[{'source': 'my_source'}, {'source': 'my_source'}]],\n",
" 'distances': [[4343.1328125, 5653.28759765625]]}"
]
},
"execution_count": 12,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"results = cohere_collection.query(\n",
" query_texts=[\"This is a query document\"],\n",
" n_results=2\n",
")\n",
"results"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Using Instructor models. The embedding function requires the InstructorEmbedding package. \n",
"# To install it, run pip install InstructorEmbedding\n",
"\n",
"\n",
"#uses base model and cpu\n",
"instructor_ef = embedding_functions.InstructorEmbeddingFunction() \n",
"\n",
"# For task specific embeddings, add an instruction\n",
"# instructor_ef = embedding_functions.InstructorEmbeddingFunction(\n",
"# instruction=\"Represent the Wikipedia document for retrieval: \"\n",
"# )\n",
"\n",
"# Uses hkunlp/instructor-xl model and GPU\n",
"#instructor_ef = embedding_functions.InstructorEmbeddingFunction(model_name=\"hkunlp/instructor-xl\", device=\"cuda\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Create a collection with the instructor embedding function\n",
"instructor_collection = client.create_collection(name=\"instructor_embeddings\", embedding_function=instructor_ef)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"instructor_collection.add(\n",
" documents=[\"This is a document\", \"This is another document\"],\n",
" metadatas=[{\"source\": \"my_source\"}, {\"source\": \"my_source\"}],\n",
" ids=[\"id1\", \"id2\"]\n",
")\n",
"\n",
"# Adding documents with an instruction\n",
"# instructor_ef = embedding_functions.InstructorEmbeddingFunction(\n",
"# instruction=\"Represent the Science sentence: \"\n",
"# )\n",
"# instructor_collection = client.create_collection(name=\"instructor_embeddings\", embedding_function=instructor_ef)\n",
"# instructor_collection.add(documents=[\"Parton energy loss in QCD matter\"], ids=[\"id1\"])"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"results = instructor_collection.query(\n",
" query_texts=[\"This is a query document\"],\n",
" n_results=2\n",
")\n",
"results\n",
"\n",
"# Querying with an instruction\n",
"# instructor_ef = embedding_functions.InstructorEmbeddingFunction(instruction=\"Represent the Wikipedia question for retrieving supporting documents: \")\n",
"# instructor_collection = client.get_collection(name=\"instructor_embeddings\", embedding_function=instructor_ef)\n",
"# results = instructor_collection.query(query_texts=[\"where is the food stored in a yam plant\"])"
]
},
{
"cell_type": "code",
"execution_count": 5,
"metadata": {},
"outputs": [],
"source": [
"# Using HuggingFace models. The embedding function a huggingface api_key\n",
"huggingface_ef = embedding_functions.HuggingFaceEmbeddingFunction(\n",
" api_key=\"HUGGINGFACE_API_KEY\", # Replace with your own HuggingFace API key\n",
" model_name=\"sentence-transformers/all-MiniLM-L6-v2\"\n",
")"
]
},
{
"cell_type": "code",
"execution_count": 6,
"metadata": {},
"outputs": [],
"source": [
| |
192234
|
{
"cells": [
{
"attachments": {},
"cell_type": "markdown",
"metadata": {},
"source": [
"# Where Filtering\n",
"This notebook demonstrates how to use where filtering to filter the data returned from get or query."
]
},
{
"cell_type": "code",
"execution_count": 1,
"metadata": {
"ExecuteTime": {
"end_time": "2023-08-11T18:29:08.687703Z",
"start_time": "2023-08-11T18:29:07.757276Z"
}
},
"outputs": [],
"source": [
"import chromadb"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [],
"source": [
"client = chromadb.Client()"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"outputs": [],
"source": [
"# Create a new chroma collection\n",
"collection_name = \"filter_example_collection\"\n",
"collection = client.create_collection(name=collection_name)"
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {},
"outputs": [],
"source": [
"# Add some data to the collection\n",
"collection.add(\n",
" embeddings=[\n",
" [1.1, 2.3, 3.2],\n",
" [4.5, 6.9, 4.4],\n",
" [1.1, 2.3, 3.2],\n",
" [4.5, 6.9, 4.4],\n",
" [1.1, 2.3, 3.2],\n",
" [4.5, 6.9, 4.4],\n",
" [1.1, 2.3, 3.2],\n",
" [4.5, 6.9, 4.4],\n",
" ],\n",
" metadatas=[\n",
" {\"status\": \"read\"},\n",
" {\"status\": \"unread\"},\n",
" {\"status\": \"read\"},\n",
" {\"status\": \"unread\"},\n",
" {\"status\": \"read\"},\n",
" {\"status\": \"unread\"},\n",
" {\"status\": \"read\"},\n",
" {\"status\": \"unread\"},\n",
" ],\n",
" documents=[\"A document that discusses domestic policy\", \"A document that discusses international affairs\", \"A document that discusses kittens\", \"A document that discusses dogs\", \"A document that discusses chocolate\", \"A document that is sixth that discusses government\", \"A document that discusses international affairs\", \"A document that discusses global affairs\"],\n",
" ids=[\"id1\", \"id2\", \"id3\", \"id4\", \"id5\", \"id6\", \"id7\", \"id8\"],\n",
")"
]
},
{
"cell_type": "code",
"execution_count": 5,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"{'ids': ['id7'],\n",
" 'embeddings': None,\n",
" 'metadatas': [{'status': 'read'}],\n",
" 'documents': ['A document that discusses international affairs'],\n",
" 'uris': None,\n",
" 'data': None}"
]
},
"execution_count": 5,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"# Get documents that are read and about affairs\n",
"collection.get(where={\"status\": \"read\"}, where_document={\"$contains\": \"affairs\"})"
]
},
{
"cell_type": "code",
"execution_count": 6,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"{'ids': ['id1', 'id8'],\n",
" 'embeddings': None,\n",
" 'metadatas': [{'status': 'read'}, {'status': 'unread'}],\n",
" 'documents': ['A document that discusses domestic policy',\n",
" 'A document that discusses global affairs'],\n",
" 'uris': None,\n",
" 'data': None}"
]
},
"execution_count": 6,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"# Get documents that are about global affairs or domestic policy\n",
"collection.get(where_document={\"$or\": [{\"$contains\": \"global affairs\"}, {\"$contains\": \"domestic policy\"}]})"
]
},
{
"cell_type": "code",
"execution_count": 7,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"{'ids': [['id7', 'id2', 'id8']],\n",
" 'distances': [[16.740001678466797, 87.22000122070312, 87.22000122070312]],\n",
" 'metadatas': [[{'status': 'read'},\n",
" {'status': 'unread'},\n",
" {'status': 'unread'}]],\n",
" 'embeddings': None,\n",
" 'documents': [['A document that discusses international affairs',\n",
" 'A document that discusses international affairs',\n",
" 'A document that discusses global affairs']],\n",
" 'uris': None,\n",
" 'data': None}"
]
},
"execution_count": 7,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"# Get 5 closest vectors to [0, 0, 0] that are about affairs\n",
"# Outputs 3 docs because collection only has 3 docs about affairs\n",
"collection.query(query_embeddings=[[0, 0, 0]], where_document={\"$contains\": \"affairs\"}, n_results=5)"
]
},
{
"cell_type": "code",
"execution_count": 8,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"{'ids': [['id5', 'id3', 'id7', 'id8', 'id4']],\n",
" 'distances': [[16.740001678466797,\n",
" 16.740001678466797,\n",
" 16.740001678466797,\n",
" 87.22000122070312,\n",
" 87.22000122070312]],\n",
" 'metadatas': [[{'status': 'read'},\n",
" {'status': 'read'},\n",
" {'status': 'read'},\n",
" {'status': 'unread'},\n",
" {'status': 'unread'}]],\n",
" 'embeddings': None,\n",
" 'documents': [['A document that discusses chocolate',\n",
" 'A document that discusses kittens',\n",
" 'A document that discusses international affairs',\n",
" 'A document that discusses global affairs',\n",
" 'A document that discusses dogs']],\n",
" 'uris': None,\n",
" 'data': None}"
]
},
"execution_count": 8,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"collection.query(query_embeddings=[[0, 0, 0]], where_document={\"$not_contains\": \"domestic policy\"}, n_results=5)"
]
},
{
"cell_type": "markdown",
"metadata": {
"collapsed": false
},
"source": [
"# Where Filtering With Logical Operators\n",
"This section demonstrates how one can use the logical operators in `where` filtering.\n",
"\n",
"Chroma currently supports: `$and` and `$or`operators.\n",
"\n",
"> Note: Logical operators can be nested"
]
},
{
"cell_type": "code",
"execution_count": 9,
"metadata": {
"ExecuteTime": {
"end_time": "2023-08-11T18:45:52.663345Z",
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.