code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
"""simple docstring"""
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : Optional[Any] = ""
lowerCAmelCase : str = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
lowerCAmelCase : str = None # compression type in fsspec. ex: "gzip"
lowerCAmelCase : str = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self : Union[str, Any] ,_snake_case : str = "" ,_snake_case : Optional[str] = None ,_snake_case : Optional[dict] = None ,**_snake_case : int ) -> Any:
"""simple docstring"""
super().__init__(self ,**_snake_case )
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
lowercase__ : Dict = fsspec.open(
_snake_case ,mode='''rb''' ,protocol=_snake_case ,compression=self.compression ,client_kwargs={
'''requote_redirect_url''': False, # see https://github.com/huggingface/datasets/pull/5459
'''trust_env''': True, # Enable reading proxy env variables.
**(target_options or {}).pop('''client_kwargs''' ,{} ), # To avoid issues if it was already passed.
} ,**(target_options or {}) ,)
lowercase__ : Optional[Any] = os.path.basename(self.file.path.split('''::''' )[0] )
lowercase__ : List[Any] = (
self.compressed_name[: self.compressed_name.rindex('''.''' )]
if '''.''' in self.compressed_name
else self.compressed_name
)
lowercase__ : int = None
@classmethod
def UpperCAmelCase ( cls : List[Any] ,_snake_case : str ) -> List[Any]:
"""simple docstring"""
return super()._strip_protocol(_snake_case ).lstrip('''/''' )
def UpperCAmelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
if self.dir_cache is None:
lowercase__ : Any = {**self.file.fs.info(self.file.path ), '''name''': self.uncompressed_name}
lowercase__ : int = {f['''name''']: f}
def UpperCAmelCase ( self : Union[str, Any] ,_snake_case : str ) -> Dict:
"""simple docstring"""
return self.file.open().read()
def UpperCAmelCase ( self : Tuple ,_snake_case : str ,_snake_case : str = "rb" ,_snake_case : Any=None ,_snake_case : Tuple=True ,_snake_case : str=None ,**_snake_case : Optional[int] ,) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : Optional[int] = self._strip_protocol(_snake_case )
if mode != "rb":
raise ValueError(f"""Tried to read with mode {mode} on file {self.file.path} opened with mode 'rb'""" )
return self.file.open()
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : Dict = "bz2"
lowerCAmelCase : List[Any] = "bz2"
lowerCAmelCase : Union[str, Any] = ".bz2"
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : str = "gzip"
lowerCAmelCase : Any = "gzip"
lowerCAmelCase : Optional[Any] = ".gz"
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : Dict = "lz4"
lowerCAmelCase : int = "lz4"
lowerCAmelCase : Optional[int] = ".lz4"
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : str = "xz"
lowerCAmelCase : Any = "xz"
lowerCAmelCase : Any = ".xz"
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : int = "zstd"
lowerCAmelCase : str = "zstd"
lowerCAmelCase : Tuple = ".zst"
def __init__( self : Optional[int] ,_snake_case : str ,_snake_case : str = "rb" ,_snake_case : Optional[str] = None ,_snake_case : Optional[dict] = None ,_snake_case : int = DEFAULT_BLOCK_SIZE ,**_snake_case : List[str] ,) -> List[str]:
"""simple docstring"""
super().__init__(
fo=_snake_case ,mode=_snake_case ,target_protocol=_snake_case ,target_options=_snake_case ,block_size=_snake_case ,**_snake_case ,)
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
lowercase__ : Optional[Any] = self.file.__enter__
class __A :
'''simple docstring'''
def __init__( self : List[Any] ,_snake_case : List[str] ) -> List[str]:
"""simple docstring"""
lowercase__ : List[str] = file_
def __enter__( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
self._file.__enter__()
return self
def __exit__( self : Any ,*_snake_case : Any ,**_snake_case : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
self._file.__exit__(*_snake_case ,**_snake_case )
def __iter__( self : str ) -> Union[str, Any]:
"""simple docstring"""
return iter(self._file )
def UpperCAmelCase ( self : str ) -> List[Any]:
"""simple docstring"""
return next(self._file )
def __getattr__( self : Any ,_snake_case : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
return getattr(self._file ,_snake_case )
def fixed_enter(*_snake_case : Dict ,**_snake_case : str ):
return WrappedFile(_enter(*_snake_case ,**_snake_case ) )
lowercase__ : Union[str, Any] = fixed_enter
| 16
|
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
_snake_case = logging.get_logger(__name__)
_snake_case = {
'''tensor(bool)''': np.bool_,
'''tensor(int8)''': np.inta,
'''tensor(uint8)''': np.uinta,
'''tensor(int16)''': np.intaa,
'''tensor(uint16)''': np.uintaa,
'''tensor(int32)''': np.intaa,
'''tensor(uint32)''': np.uintaa,
'''tensor(int64)''': np.intaa,
'''tensor(uint64)''': np.uintaa,
'''tensor(float16)''': np.floataa,
'''tensor(float)''': np.floataa,
'''tensor(double)''': np.floataa,
}
class _snake_case :
def __init__( self: Tuple , __lowerCamelCase: Tuple=None , **__lowerCamelCase: Union[str, Any] ) -> Dict:
logger.info("`diffusers.OnnxRuntimeModel` is experimental and might change in the future." )
__UpperCAmelCase : Union[str, Any] = model
__UpperCAmelCase : Optional[Any] = kwargs.get("model_save_dir" , __lowerCamelCase )
__UpperCAmelCase : str = kwargs.get("latest_model_name" , __lowerCamelCase )
def __call__( self: int , **__lowerCamelCase: Optional[Any] ) -> int:
__UpperCAmelCase : Optional[Any] = {k: np.array(__lowerCamelCase ) for k, v in kwargs.items()}
return self.model.run(__lowerCamelCase , __lowerCamelCase )
@staticmethod
def _lowerCamelCase ( __lowerCamelCase: Union[str, Path] , __lowerCamelCase: Union[str, Any]=None , __lowerCamelCase: Tuple=None ) -> List[str]:
if provider is None:
logger.info("No onnxruntime provider specified, using CPUExecutionProvider" )
__UpperCAmelCase : Any = "CPUExecutionProvider"
return ort.InferenceSession(__lowerCamelCase , providers=[provider] , sess_options=__lowerCamelCase )
def _lowerCamelCase ( self: Dict , __lowerCamelCase: Union[str, Path] , __lowerCamelCase: Optional[str] = None , **__lowerCamelCase: Union[str, Any] ) -> Optional[Any]:
__UpperCAmelCase : Tuple = file_name if file_name is not None else ONNX_WEIGHTS_NAME
__UpperCAmelCase : str = self.model_save_dir.joinpath(self.latest_model_name )
__UpperCAmelCase : Any = Path(__lowerCamelCase ).joinpath(__lowerCamelCase )
try:
shutil.copyfile(__lowerCamelCase , __lowerCamelCase )
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
__UpperCAmelCase : str = self.model_save_dir.joinpath(__lowerCamelCase )
if src_path.exists():
__UpperCAmelCase : List[str] = Path(__lowerCamelCase ).joinpath(__lowerCamelCase )
try:
shutil.copyfile(__lowerCamelCase , __lowerCamelCase )
except shutil.SameFileError:
pass
def _lowerCamelCase ( self: Any , __lowerCamelCase: Union[str, os.PathLike] , **__lowerCamelCase: Any , ) -> List[Any]:
if os.path.isfile(__lowerCamelCase ):
logger.error(f'''Provided path ({save_directory}) should be a directory, not a file''' )
return
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
# saving model weights/files
self._save_pretrained(__lowerCamelCase , **__lowerCamelCase )
@classmethod
def _lowerCamelCase ( cls: Optional[Any] , __lowerCamelCase: Union[str, Path] , __lowerCamelCase: Optional[Union[bool, str, None]] = None , __lowerCamelCase: Optional[Union[str, None]] = None , __lowerCamelCase: bool = False , __lowerCamelCase: Optional[str] = None , __lowerCamelCase: Optional[str] = None , __lowerCamelCase: Optional[str] = None , __lowerCamelCase: Optional["ort.SessionOptions"] = None , **__lowerCamelCase: Union[str, Any] , ) -> Optional[Any]:
__UpperCAmelCase : Tuple = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(__lowerCamelCase ):
__UpperCAmelCase : Optional[int] = OnnxRuntimeModel.load_model(
os.path.join(__lowerCamelCase , __lowerCamelCase ) , provider=__lowerCamelCase , sess_options=__lowerCamelCase )
__UpperCAmelCase : Union[str, Any] = Path(__lowerCamelCase )
# load model from hub
else:
# download model
__UpperCAmelCase : Optional[Any] = hf_hub_download(
repo_id=__lowerCamelCase , filename=__lowerCamelCase , use_auth_token=__lowerCamelCase , revision=__lowerCamelCase , cache_dir=__lowerCamelCase , force_download=__lowerCamelCase , )
__UpperCAmelCase : Any = Path(__lowerCamelCase ).parent
__UpperCAmelCase : List[Any] = Path(__lowerCamelCase ).name
__UpperCAmelCase : Dict = OnnxRuntimeModel.load_model(__lowerCamelCase , provider=__lowerCamelCase , sess_options=__lowerCamelCase )
return cls(model=__lowerCamelCase , **__lowerCamelCase )
@classmethod
def _lowerCamelCase ( cls: Optional[int] , __lowerCamelCase: Union[str, Path] , __lowerCamelCase: bool = True , __lowerCamelCase: Optional[str] = None , __lowerCamelCase: Optional[str] = None , **__lowerCamelCase: Tuple , ) -> Optional[Any]:
__UpperCAmelCase : int = None
if len(str(__lowerCamelCase ).split("@" ) ) == 2:
__UpperCAmelCase , __UpperCAmelCase : Any = model_id.split("@" )
return cls._from_pretrained(
model_id=__lowerCamelCase , revision=__lowerCamelCase , cache_dir=__lowerCamelCase , force_download=__lowerCamelCase , use_auth_token=__lowerCamelCase , **__lowerCamelCase , )
| 157
| 0
|
from pathlib import Path
import cva
import numpy as np
from matplotlib import pyplot as plt
def lowerCAmelCase__ ( a__ , a__ , a__ , a__ , a__ ) ->np.ndarray:
'''simple docstring'''
_UpperCamelCase = cva.getAffineTransform(a__ , a__ )
return cva.warpAffine(a__ , a__ , (rows, cols) )
if __name__ == "__main__":
# read original image
lowerCamelCase__ = cva.imread(
str(Path(__file__).resolve().parent.parent / '''image_data''' / '''lena.jpg''')
)
# turn image in gray scale value
lowerCamelCase__ = cva.cvtColor(image, cva.COLOR_BGR2GRAY)
# get image shape
lowerCamelCase__,lowerCamelCase__ = gray_img.shape
# set different points to rotate image
lowerCamelCase__ = np.array([[50, 50], [200, 50], [50, 200]], np.floataa)
lowerCamelCase__ = np.array([[10, 100], [200, 50], [100, 250]], np.floataa)
lowerCamelCase__ = np.array([[50, 50], [150, 50], [120, 200]], np.floataa)
lowerCamelCase__ = np.array([[10, 100], [80, 50], [180, 250]], np.floataa)
# add all rotated images in a list
lowerCamelCase__ = [
gray_img,
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
]
# plot different image rotations
lowerCamelCase__ = plt.figure(1)
lowerCamelCase__ = ['''Original''', '''Rotation 1''', '''Rotation 2''', '''Rotation 3''']
for i, image in enumerate(images):
plt.subplot(2, 2, i + 1), plt.imshow(image, '''gray''')
plt.title(titles[i])
plt.axis('''off''')
plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95)
plt.show()
| 63
|
from abc import ABC, abstractmethod
from typing import Optional, Union
from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit
from ..utils.typing import NestedDataStructureLike, PathLike
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self : List[str] , lowercase_ : Optional[NestedDataStructureLike[PathLike]] = None , lowercase_ : Optional[NamedSplit] = None , lowercase_ : Optional[Features] = None , lowercase_ : str = None , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : Optional[int] = None , **lowercase_ : Dict , ) -> Tuple:
"""simple docstring"""
_UpperCamelCase = path_or_paths
_UpperCamelCase = split if split or isinstance(lowercase_ , lowercase_) else "train"
_UpperCamelCase = features
_UpperCamelCase = cache_dir
_UpperCamelCase = keep_in_memory
_UpperCamelCase = streaming
_UpperCamelCase = num_proc
_UpperCamelCase = kwargs
@abstractmethod
def __UpperCAmelCase ( self : Any) -> Union[Dataset, DatasetDict, IterableDataset, IterableDatasetDict]:
"""simple docstring"""
pass
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self : List[Any] , lowercase_ : Optional[Features] = None , lowercase_ : str = None , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : Optional[int] = None , **lowercase_ : Union[str, Any] , ) -> str:
"""simple docstring"""
_UpperCamelCase = features
_UpperCamelCase = cache_dir
_UpperCamelCase = keep_in_memory
_UpperCamelCase = streaming
_UpperCamelCase = num_proc
_UpperCamelCase = kwargs
@abstractmethod
def __UpperCAmelCase ( self : Any) -> Union[Dataset, IterableDataset]:
"""simple docstring"""
pass
| 63
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
A_ : List[Any] = {
"""configuration_falcon""": ["""FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FalconConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : str = [
"""FALCON_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FalconForCausalLM""",
"""FalconModel""",
"""FalconPreTrainedModel""",
"""FalconForSequenceClassification""",
"""FalconForTokenClassification""",
"""FalconForQuestionAnswering""",
]
if TYPE_CHECKING:
from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_falcon import (
FALCON_PRETRAINED_MODEL_ARCHIVE_LIST,
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
FalconPreTrainedModel,
)
else:
import sys
A_ : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 215
|
'''simple docstring'''
from torch import nn
class lowercase ( nn.Module ):
"""simple docstring"""
def __init__( self ,a_ ,a_ ) -> List[Any]:
super().__init__()
_UpperCAmelCase : Dict = class_size
_UpperCAmelCase : Union[str, Any] = embed_size
# self.mlp1 = nn.Linear(embed_size, embed_size)
# self.mlp2 = (nn.Linear(embed_size, class_size))
_UpperCAmelCase : List[Any] = nn.Linear(a_ ,a_ )
def _snake_case ( self ,a_ ) -> Tuple:
# hidden_state = nn.functional.relu(self.mlp1(hidden_state))
# hidden_state = self.mlp2(hidden_state)
_UpperCAmelCase : Optional[int] = self.mlp(a_ )
return logits
| 215
| 1
|
"""simple docstring"""
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
_A = get_logger()
_A = None
class _lowerCamelCase ( TensorFormatter[Mapping, "jax.Array", Mapping] ):
def __init__( self : Dict , UpperCamelCase : Any=None , UpperCamelCase : int=None , **UpperCamelCase : str ) -> List[Any]:
"""simple docstring"""
super().__init__(features=UpperCamelCase )
import jax
from jaxlib.xla_client import Device
if isinstance(UpperCamelCase , UpperCamelCase ):
raise ValueError(
f"""Expected {device} to be a `str` not {type(UpperCamelCase )}, as `jaxlib.xla_extension.Device` """
"""is not serializable neither with `pickle` nor with `dill`. Instead you can surround """
"""the device with `str()` to get its string identifier that will be internally mapped """
"""to the actual `jaxlib.xla_extension.Device`.""" )
lowerCAmelCase__ : Dict = device if isinstance(UpperCamelCase , UpperCamelCase ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
lowerCAmelCase__ : Optional[int] = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
f"""Device with string identifier {self.device} not listed among the available """
f"""devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default """
f"""device: {str(jax.devices()[0] )}.""" )
lowerCAmelCase__ : int = str(jax.devices()[0] )
lowerCAmelCase__ : Optional[Any] = jnp_array_kwargs
@staticmethod
def _lowerCAmelCase ( ) -> Dict[str, "jaxlib.xla_extension.Device"]:
"""simple docstring"""
import jax
return {str(UpperCamelCase ): device for device in jax.devices()}
def _lowerCAmelCase ( self : List[str] , UpperCamelCase : Dict ) -> str:
"""simple docstring"""
import jax
import jax.numpy as jnp
if isinstance(UpperCamelCase , UpperCamelCase ) and column:
if all(
isinstance(UpperCamelCase , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(UpperCamelCase , axis=0 )
return column
def _lowerCAmelCase ( self : Union[str, Any] , UpperCamelCase : int ) -> str:
"""simple docstring"""
import jax
import jax.numpy as jnp
if isinstance(UpperCamelCase , (str, bytes, type(UpperCamelCase )) ):
return value
elif isinstance(UpperCamelCase , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
lowerCAmelCase__ : List[str] = {}
if isinstance(UpperCamelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
lowerCAmelCase__ : Union[str, Any] = {"""dtype""": jnp.intaa}
else:
lowerCAmelCase__ : List[str] = {"""dtype""": jnp.intaa}
elif isinstance(UpperCamelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
lowerCAmelCase__ : List[str] = {"""dtype""": jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(UpperCamelCase , PIL.Image.Image ):
lowerCAmelCase__ : Union[str, Any] = np.asarray(UpperCamelCase )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
lowerCAmelCase__ : int = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(UpperCamelCase , **{**default_dtype, **self.jnp_array_kwargs} )
def _lowerCAmelCase ( self : List[Any] , UpperCamelCase : Dict ) -> List[Any]:
"""simple docstring"""
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(UpperCamelCase , torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(UpperCamelCase , """__array__""" ) and not isinstance(UpperCamelCase , jax.Array ):
lowerCAmelCase__ : int = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(UpperCamelCase , np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(UpperCamelCase ) for substruct in data_struct] )
elif isinstance(UpperCamelCase , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(UpperCamelCase ) for substruct in data_struct] )
return self._tensorize(UpperCamelCase )
def _lowerCAmelCase ( self : int , UpperCamelCase : dict ) -> Optional[Any]:
"""simple docstring"""
return map_nested(self._recursive_tensorize , UpperCamelCase , map_list=UpperCamelCase )
def _lowerCAmelCase ( self : Optional[Any] , UpperCamelCase : pa.Table ) -> Mapping:
"""simple docstring"""
lowerCAmelCase__ : str = self.numpy_arrow_extractor().extract_row(UpperCamelCase )
lowerCAmelCase__ : Union[str, Any] = self.python_features_decoder.decode_row(UpperCamelCase )
return self.recursive_tensorize(UpperCamelCase )
def _lowerCAmelCase ( self : List[Any] , UpperCamelCase : pa.Table ) -> "jax.Array":
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = self.numpy_arrow_extractor().extract_column(UpperCamelCase )
lowerCAmelCase__ : List[str] = self.python_features_decoder.decode_column(UpperCamelCase , pa_table.column_names[0] )
lowerCAmelCase__ : Any = self.recursive_tensorize(UpperCamelCase )
lowerCAmelCase__ : Optional[Any] = self._consolidate(UpperCamelCase )
return column
def _lowerCAmelCase ( self : List[Any] , UpperCamelCase : pa.Table ) -> Mapping:
"""simple docstring"""
lowerCAmelCase__ : str = self.numpy_arrow_extractor().extract_batch(UpperCamelCase )
lowerCAmelCase__ : Optional[int] = self.python_features_decoder.decode_batch(UpperCamelCase )
lowerCAmelCase__ : int = self.recursive_tensorize(UpperCamelCase )
for column_name in batch:
lowerCAmelCase__ : Any = self._consolidate(batch[column_name] )
return batch
| 212
|
"""simple docstring"""
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
_A = logging.getLogger(__name__)
class _lowerCamelCase ( a_ ):
def __init__( self : List[Any] , UpperCamelCase : Dict=-1 ) -> List[Any]:
"""simple docstring"""
# in NER datasets, the last column is usually reserved for NER label
lowerCAmelCase__ : Optional[int] = label_idx
def _lowerCAmelCase ( self : Optional[int] , UpperCamelCase : Optional[int] , UpperCamelCase : Union[Split, str] ) -> List[InputExample]:
"""simple docstring"""
if isinstance(UpperCamelCase , UpperCamelCase ):
lowerCAmelCase__ : int = mode.value
lowerCAmelCase__ : Dict = os.path.join(UpperCamelCase , f"""{mode}.txt""" )
lowerCAmelCase__ : str = 1
lowerCAmelCase__ : List[Any] = []
with open(UpperCamelCase , encoding="""utf-8""" ) as f:
lowerCAmelCase__ : str = []
lowerCAmelCase__ : Any = []
for line in f:
if line.startswith("""-DOCSTART-""" ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=f"""{mode}-{guid_index}""" , words=UpperCamelCase , labels=UpperCamelCase ) )
guid_index += 1
lowerCAmelCase__ : Optional[Any] = []
lowerCAmelCase__ : Tuple = []
else:
lowerCAmelCase__ : Optional[int] = line.split(""" """ )
words.append(splits[0] )
if len(UpperCamelCase ) > 1:
labels.append(splits[self.label_idx].replace("""\n""" , """""" ) )
else:
# Examples could have no label for mode = "test"
labels.append("""O""" )
if words:
examples.append(InputExample(guid=f"""{mode}-{guid_index}""" , words=UpperCamelCase , labels=UpperCamelCase ) )
return examples
def _lowerCAmelCase ( self : Any , UpperCamelCase : TextIO , UpperCamelCase : TextIO , UpperCamelCase : List ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ : Dict = 0
for line in test_input_reader:
if line.startswith("""-DOCSTART-""" ) or line == "" or line == "\n":
writer.write(UpperCamelCase )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
lowerCAmelCase__ : Union[str, Any] = line.split()[0] + """ """ + preds_list[example_id].pop(0 ) + """\n"""
writer.write(UpperCamelCase )
else:
logger.warning("""Maximum sequence length exceeded: No prediction for '%s'.""" , line.split()[0] )
def _lowerCAmelCase ( self : str , UpperCamelCase : str ) -> List[str]:
"""simple docstring"""
if path:
with open(UpperCamelCase , """r""" ) as f:
lowerCAmelCase__ : Any = f.read().splitlines()
if "O" not in labels:
lowerCAmelCase__ : List[str] = ["""O"""] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class _lowerCamelCase ( a_ ):
def __init__( self : Union[str, Any] ) -> Any:
"""simple docstring"""
# in CONLL2003 dataset chunk column is second-to-last
super().__init__(label_idx=-2 )
def _lowerCAmelCase ( self : Optional[Any] , UpperCamelCase : str ) -> List[str]:
"""simple docstring"""
if path:
with open(UpperCamelCase , """r""" ) as f:
lowerCAmelCase__ : Any = f.read().splitlines()
if "O" not in labels:
lowerCAmelCase__ : str = ["""O"""] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class _lowerCamelCase ( a_ ):
def _lowerCAmelCase ( self : Union[str, Any] , UpperCamelCase : Any , UpperCamelCase : Union[Split, str] ) -> List[InputExample]:
"""simple docstring"""
if isinstance(UpperCamelCase , UpperCamelCase ):
lowerCAmelCase__ : Optional[Any] = mode.value
lowerCAmelCase__ : int = os.path.join(UpperCamelCase , f"""{mode}.txt""" )
lowerCAmelCase__ : Optional[int] = 1
lowerCAmelCase__ : List[Any] = []
with open(UpperCamelCase , encoding="""utf-8""" ) as f:
for sentence in parse_incr(UpperCamelCase ):
lowerCAmelCase__ : Union[str, Any] = []
lowerCAmelCase__ : List[Any] = []
for token in sentence:
words.append(token["""form"""] )
labels.append(token["""upos"""] )
assert len(UpperCamelCase ) == len(UpperCamelCase )
if words:
examples.append(InputExample(guid=f"""{mode}-{guid_index}""" , words=UpperCamelCase , labels=UpperCamelCase ) )
guid_index += 1
return examples
def _lowerCAmelCase ( self : List[str] , UpperCamelCase : TextIO , UpperCamelCase : TextIO , UpperCamelCase : List ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ : Any = 0
for sentence in parse_incr(UpperCamelCase ):
lowerCAmelCase__ : Union[str, Any] = preds_list[example_id]
lowerCAmelCase__ : List[Any] = """"""
for token in sentence:
out += f"""{token['form']} ({token['upos']}|{s_p.pop(0 )}) """
out += "\n"
writer.write(UpperCamelCase )
example_id += 1
def _lowerCAmelCase ( self : Dict , UpperCamelCase : str ) -> List[str]:
"""simple docstring"""
if path:
with open(UpperCamelCase , """r""" ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 212
| 1
|
def __lowerCamelCase ( lowerCamelCase__ : int ):
'''simple docstring'''
if divisor % 5 == 0 or divisor % 2 == 0:
return 0
lowerCamelCase = 1
lowerCamelCase = 1
while repunit:
lowerCamelCase = (10 * repunit + 1) % divisor
repunit_index += 1
return repunit_index
def __lowerCamelCase ( lowerCamelCase__ : int = 1000000 ):
'''simple docstring'''
lowerCamelCase = limit - 1
if divisor % 2 == 0:
divisor += 1
while least_divisible_repunit(lowerCamelCase__ ) <= limit:
divisor += 2
return divisor
if __name__ == "__main__":
print(f"""{solution() = }""")
| 252
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase : List[str] = {
"configuration_x_clip": [
"XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"XCLIPConfig",
"XCLIPTextConfig",
"XCLIPVisionConfig",
],
"processing_x_clip": ["XCLIPProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Union[str, Any] = [
"XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"XCLIPModel",
"XCLIPPreTrainedModel",
"XCLIPTextModel",
"XCLIPVisionModel",
]
if TYPE_CHECKING:
from .configuration_x_clip import (
XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
XCLIPConfig,
XCLIPTextConfig,
XCLIPVisionConfig,
)
from .processing_x_clip import XCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_x_clip import (
XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
XCLIPModel,
XCLIPPreTrainedModel,
XCLIPTextModel,
XCLIPVisionModel,
)
else:
import sys
UpperCAmelCase : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 252
| 1
|
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from huggingface_hub import HfFolder, Repository, create_repo, delete_repo
from requests.exceptions import HTTPError
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
PROCESSOR_MAPPING,
TOKENIZER_MAPPING,
AutoConfig,
AutoFeatureExtractor,
AutoProcessor,
AutoTokenizer,
BertTokenizer,
ProcessorMixin,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
)
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
from test_module.custom_processing import CustomProcessor # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
lowerCAmelCase__ = get_tests_dir('''fixtures/dummy_feature_extractor_config.json''')
lowerCAmelCase__ = get_tests_dir('''fixtures/vocab.json''')
lowerCAmelCase__ = get_tests_dir('''fixtures''')
class snake_case__(unittest.TestCase ):
"""simple docstring"""
lowercase_ = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """bla""", """blou"""]
def snake_case ( self : List[Any] ):
lowercase__ : Optional[int] = 0
def snake_case ( self : int ):
lowercase__ : Optional[Any] = AutoProcessor.from_pretrained("facebook/wav2vec2-base-960h" )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def snake_case ( self : Optional[int] ):
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase__ : Dict = WavaVecaConfig()
lowercase__ : int = AutoProcessor.from_pretrained("facebook/wav2vec2-base-960h" )
# save in new folder
model_config.save_pretrained(SCREAMING_SNAKE_CASE )
processor.save_pretrained(SCREAMING_SNAKE_CASE )
lowercase__ : str = AutoProcessor.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def snake_case ( self : Tuple ):
with tempfile.TemporaryDirectory() as tmpdirname:
# copy relevant files
copyfile(SCREAMING_SNAKE_CASE , os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
copyfile(SCREAMING_SNAKE_CASE , os.path.join(SCREAMING_SNAKE_CASE , "vocab.json" ) )
lowercase__ : Any = AutoProcessor.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def snake_case ( self : Any ):
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase__ : int = WavaVecaFeatureExtractor()
lowercase__ : str = AutoTokenizer.from_pretrained("facebook/wav2vec2-base-960h" )
lowercase__ : Union[str, Any] = WavaVecaProcessor(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# save in new folder
processor.save_pretrained(SCREAMING_SNAKE_CASE )
# drop `processor_class` in tokenizer
with open(os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , "r" ) as f:
lowercase__ : Tuple = json.load(SCREAMING_SNAKE_CASE )
config_dict.pop("processor_class" )
with open(os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , "w" ) as f:
f.write(json.dumps(SCREAMING_SNAKE_CASE ) )
lowercase__ : Any = AutoProcessor.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def snake_case ( self : Tuple ):
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase__ : List[str] = WavaVecaFeatureExtractor()
lowercase__ : Optional[Any] = AutoTokenizer.from_pretrained("facebook/wav2vec2-base-960h" )
lowercase__ : List[Any] = WavaVecaProcessor(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# save in new folder
processor.save_pretrained(SCREAMING_SNAKE_CASE )
# drop `processor_class` in feature extractor
with open(os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , "r" ) as f:
lowercase__ : Tuple = json.load(SCREAMING_SNAKE_CASE )
config_dict.pop("processor_class" )
with open(os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , "w" ) as f:
f.write(json.dumps(SCREAMING_SNAKE_CASE ) )
lowercase__ : Any = AutoProcessor.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def snake_case ( self : Optional[Any] ):
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase__ : Optional[int] = WavaVecaConfig(processor_class="Wav2Vec2Processor" )
model_config.save_pretrained(SCREAMING_SNAKE_CASE )
# copy relevant files
copyfile(SCREAMING_SNAKE_CASE , os.path.join(SCREAMING_SNAKE_CASE , "vocab.json" ) )
# create emtpy sample processor
with open(os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , "w" ) as f:
f.write("{}" )
lowercase__ : Union[str, Any] = AutoProcessor.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def snake_case ( self : str ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(SCREAMING_SNAKE_CASE ):
lowercase__ : List[Any] = AutoProcessor.from_pretrained("hf-internal-testing/test_dynamic_processor" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(SCREAMING_SNAKE_CASE ):
lowercase__ : List[str] = AutoProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_processor" , trust_remote_code=SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = AutoProcessor.from_pretrained("hf-internal-testing/test_dynamic_processor" , trust_remote_code=SCREAMING_SNAKE_CASE )
self.assertTrue(processor.special_attribute_present )
self.assertEqual(processor.__class__.__name__ , "NewProcessor" )
lowercase__ : Any = processor.feature_extractor
self.assertTrue(feature_extractor.special_attribute_present )
self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
lowercase__ : Tuple = processor.tokenizer
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizerFast" )
# Test we can also load the slow version
lowercase__ : Optional[Any] = AutoProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_processor" , trust_remote_code=SCREAMING_SNAKE_CASE , use_fast=SCREAMING_SNAKE_CASE )
lowercase__ : int = new_processor.tokenizer
self.assertTrue(new_tokenizer.special_attribute_present )
self.assertEqual(new_tokenizer.__class__.__name__ , "NewTokenizer" )
else:
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" )
def snake_case ( self : str ):
try:
AutoConfig.register("custom" , SCREAMING_SNAKE_CASE )
AutoFeatureExtractor.register(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
AutoTokenizer.register(SCREAMING_SNAKE_CASE , slow_tokenizer_class=SCREAMING_SNAKE_CASE )
AutoProcessor.register(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(SCREAMING_SNAKE_CASE ):
AutoProcessor.register(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Now that the config is registered, it can be used as any other config with the auto-API
lowercase__ : Tuple = CustomFeatureExtractor.from_pretrained(SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase__ : List[str] = os.path.join(SCREAMING_SNAKE_CASE , "vocab.txt" )
with open(SCREAMING_SNAKE_CASE , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
lowercase__ : Any = CustomTokenizer(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = CustomProcessor(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(SCREAMING_SNAKE_CASE )
lowercase__ : str = AutoProcessor.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def snake_case ( self : List[str] ):
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
lowercase_ = False
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
lowercase_ = False
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
lowercase_ = """AutoFeatureExtractor"""
lowercase_ = """AutoTokenizer"""
lowercase_ = False
try:
AutoConfig.register("custom" , SCREAMING_SNAKE_CASE )
AutoFeatureExtractor.register(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
AutoTokenizer.register(SCREAMING_SNAKE_CASE , slow_tokenizer_class=SCREAMING_SNAKE_CASE )
AutoProcessor.register(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# If remote code is not set, the default is to use local classes.
lowercase__ : str = AutoProcessor.from_pretrained("hf-internal-testing/test_dynamic_processor" )
self.assertEqual(processor.__class__.__name__ , "NewProcessor" )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote code is disabled, we load the local ones.
lowercase__ : Tuple = AutoProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_processor" , trust_remote_code=SCREAMING_SNAKE_CASE )
self.assertEqual(processor.__class__.__name__ , "NewProcessor" )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub.
lowercase__ : str = AutoProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_processor" , trust_remote_code=SCREAMING_SNAKE_CASE )
self.assertEqual(processor.__class__.__name__ , "NewProcessor" )
self.assertTrue(processor.special_attribute_present )
self.assertTrue(processor.feature_extractor.special_attribute_present )
self.assertTrue(processor.tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def snake_case ( self : str ):
lowercase__ : Any = AutoProcessor.from_pretrained("hf-internal-testing/tiny-random-bert" )
self.assertEqual(processor.__class__.__name__ , "BertTokenizerFast" )
def snake_case ( self : Tuple ):
lowercase__ : Any = AutoProcessor.from_pretrained("hf-internal-testing/tiny-random-convnext" )
self.assertEqual(processor.__class__.__name__ , "ConvNextImageProcessor" )
@is_staging_test
class snake_case__(unittest.TestCase ):
"""simple docstring"""
lowercase_ = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """bla""", """blou"""]
@classmethod
def snake_case ( cls : Optional[int] ):
lowercase__ : int = TOKEN
HfFolder.save_token(SCREAMING_SNAKE_CASE )
@classmethod
def snake_case ( cls : Optional[int] ):
try:
delete_repo(token=cls._token , repo_id="test-processor" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-processor-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-processor" )
except HTTPError:
pass
def snake_case ( self : List[str] ):
lowercase__ : List[str] = WavaVecaProcessor.from_pretrained(SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(SCREAMING_SNAKE_CASE , "test-processor" ) , push_to_hub=SCREAMING_SNAKE_CASE , use_auth_token=self._token )
lowercase__ : List[str] = WavaVecaProcessor.from_pretrained(f"""{USER}/test-processor""" )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(SCREAMING_SNAKE_CASE , getattr(new_processor.feature_extractor , SCREAMING_SNAKE_CASE ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def snake_case ( self : str ):
lowercase__ : List[Any] = WavaVecaProcessor.from_pretrained(SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(SCREAMING_SNAKE_CASE , "test-processor-org" ) , push_to_hub=SCREAMING_SNAKE_CASE , use_auth_token=self._token , organization="valid_org" , )
lowercase__ : Optional[Any] = WavaVecaProcessor.from_pretrained("valid_org/test-processor-org" )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(SCREAMING_SNAKE_CASE , getattr(new_processor.feature_extractor , SCREAMING_SNAKE_CASE ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def snake_case ( self : Tuple ):
CustomFeatureExtractor.register_for_auto_class()
CustomTokenizer.register_for_auto_class()
CustomProcessor.register_for_auto_class()
lowercase__ : int = CustomFeatureExtractor.from_pretrained(SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase__ : str = os.path.join(SCREAMING_SNAKE_CASE , "vocab.txt" )
with open(SCREAMING_SNAKE_CASE , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
lowercase__ : Optional[Any] = CustomTokenizer(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = CustomProcessor(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dir:
create_repo(f"""{USER}/test-dynamic-processor""" , token=self._token )
lowercase__ : List[str] = Repository(SCREAMING_SNAKE_CASE , clone_from=f"""{USER}/test-dynamic-processor""" , token=self._token )
processor.save_pretrained(SCREAMING_SNAKE_CASE )
# This has added the proper auto_map field to the feature extractor config
self.assertDictEqual(
processor.feature_extractor.auto_map , {
"AutoFeatureExtractor": "custom_feature_extraction.CustomFeatureExtractor",
"AutoProcessor": "custom_processing.CustomProcessor",
} , )
# This has added the proper auto_map field to the tokenizer config
with open(os.path.join(SCREAMING_SNAKE_CASE , "tokenizer_config.json" ) ) as f:
lowercase__ : Any = json.load(SCREAMING_SNAKE_CASE )
self.assertDictEqual(
tokenizer_config["auto_map"] , {
"AutoTokenizer": ["custom_tokenization.CustomTokenizer", None],
"AutoProcessor": "custom_processing.CustomProcessor",
} , )
# The code has been copied from fixtures
self.assertTrue(os.path.isfile(os.path.join(SCREAMING_SNAKE_CASE , "custom_feature_extraction.py" ) ) )
self.assertTrue(os.path.isfile(os.path.join(SCREAMING_SNAKE_CASE , "custom_tokenization.py" ) ) )
self.assertTrue(os.path.isfile(os.path.join(SCREAMING_SNAKE_CASE , "custom_processing.py" ) ) )
repo.push_to_hub()
lowercase__ : str = AutoProcessor.from_pretrained(f"""{USER}/test-dynamic-processor""" , trust_remote_code=SCREAMING_SNAKE_CASE )
# Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module
self.assertEqual(new_processor.__class__.__name__ , "CustomProcessor" )
| 121
|
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class snake_case__(unittest.TestCase ):
"""simple docstring"""
@slow
def snake_case ( self : Optional[int] ):
lowercase__ : Dict = FlaxMTaForConditionalGeneration.from_pretrained("google/mt5-small" )
lowercase__ : Dict = AutoTokenizer.from_pretrained("google/mt5-small" )
lowercase__ : Optional[Any] = tokenizer("Hello there" , return_tensors="np" ).input_ids
lowercase__ : Optional[Any] = tokenizer("Hi I am" , return_tensors="np" ).input_ids
lowercase__ : int = shift_tokens_right(SCREAMING_SNAKE_CASE , model.config.pad_token_id , model.config.decoder_start_token_id )
lowercase__ : Tuple = model(SCREAMING_SNAKE_CASE , decoder_input_ids=SCREAMING_SNAKE_CASE ).logits
lowercase__ : Dict = optax.softmax_cross_entropy(SCREAMING_SNAKE_CASE , onehot(SCREAMING_SNAKE_CASE , logits.shape[-1] ) ).mean()
lowercase__ : Union[str, Any] = -(labels.shape[-1] * loss.item())
lowercase__ : Union[str, Any] = -84.9_127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
| 121
| 1
|
import os
def a ( snake_case__: str ):
'''simple docstring'''
lowercase_ = len(grid[0] )
lowercase_ = len(_UpperCamelCase )
lowercase_ = 0
lowercase_ = 0
lowercase_ = 0
# Check vertically, horizontally, diagonally at the same time (only works
# for nxn grid)
for i in range(_UpperCamelCase ):
for j in range(n_rows - 3 ):
lowercase_ = grid[j][i] * grid[j + 1][i] * grid[j + 2][i] * grid[j + 3][i]
lowercase_ = grid[i][j] * grid[i][j + 1] * grid[i][j + 2] * grid[i][j + 3]
# Left-to-right diagonal (\) product
if i < n_columns - 3:
lowercase_ = (
grid[i][j]
* grid[i + 1][j + 1]
* grid[i + 2][j + 2]
* grid[i + 3][j + 3]
)
# Right-to-left diagonal(/) product
if i > 2:
lowercase_ = (
grid[i][j]
* grid[i - 1][j + 1]
* grid[i - 2][j + 2]
* grid[i - 3][j + 3]
)
lowercase_ = max(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
if max_product > largest:
lowercase_ = max_product
return largest
def a ( ):
'''simple docstring'''
lowercase_ = []
with open(os.path.dirname(_UpperCamelCase ) + '''/grid.txt''' ) as file:
for line in file:
grid.append(line.strip('''\n''' ).split(''' ''' ) )
lowercase_ = [[int(_UpperCamelCase ) for i in grid[j]] for j in range(len(_UpperCamelCase ) )]
return largest_product(_UpperCamelCase )
if __name__ == "__main__":
print(solution())
| 30
|
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import TimesformerConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
TimesformerForVideoClassification,
TimesformerModel,
)
from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class __lowerCAmelCase :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=10 , __UpperCAmelCase=3 , __UpperCAmelCase=2 , __UpperCAmelCase=2 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=32 , __UpperCAmelCase=5 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=10 , __UpperCAmelCase=0.02 , __UpperCAmelCase="divided_space_time" , __UpperCAmelCase=None , ):
'''simple docstring'''
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = image_size
__lowerCamelCase = num_channels
__lowerCamelCase = patch_size
__lowerCamelCase = num_frames
__lowerCamelCase = is_training
__lowerCamelCase = use_labels
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = attention_type
__lowerCamelCase = initializer_range
__lowerCamelCase = scope
__lowerCamelCase = num_labels
# in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token
__lowerCamelCase = (image_size // patch_size) ** 2
__lowerCamelCase = (num_frames) * self.num_patches_per_frame + 1
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
__lowerCamelCase = None
if self.use_labels:
__lowerCamelCase = ids_tensor([self.batch_size] , self.num_labels )
__lowerCamelCase = self.get_config()
return config, pixel_values, labels
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = TimesformerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , )
__lowerCamelCase = self.num_labels
return config
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = TimesformerModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__lowerCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = TimesformerForVideoClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__lowerCamelCase = model(__UpperCAmelCase )
# verify the logits shape
__lowerCamelCase = torch.Size((self.batch_size, self.num_labels) )
self.parent.assertEqual(result.logits.shape , __UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.prepare_config_and_inputs()
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = config_and_inputs
__lowerCamelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
lowerCAmelCase__ = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else ()
lowerCAmelCase__ = (
{"""feature-extraction""": TimesformerModel, """video-classification""": TimesformerForVideoClassification}
if is_torch_available()
else {}
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = TimesformerModelTester(self )
__lowerCamelCase = ConfigTester(
self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase , hidden_size=37 )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False ):
'''simple docstring'''
__lowerCamelCase = copy.deepcopy(__UpperCAmelCase )
if return_labels:
if model_class in get_values(__UpperCAmelCase ):
__lowerCamelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__UpperCAmelCase )
return inputs_dict
def lowerCamelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='''TimeSformer does not use inputs_embeds''' )
def lowerCamelCase ( self ):
'''simple docstring'''
pass
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase ,__lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase = model_class(__UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__lowerCamelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCAmelCase , nn.Linear ) )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase ,__lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase = model_class(__UpperCAmelCase )
__lowerCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCamelCase = [*signature.parameters.keys()]
__lowerCamelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_video_classification(*__UpperCAmelCase )
@slow
def lowerCamelCase ( self ):
'''simple docstring'''
for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase = TimesformerModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
if not self.has_attentions:
pass
else:
__lowerCamelCase ,__lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCamelCase = True
for model_class in self.all_model_classes:
__lowerCamelCase = self.model_tester.seq_length
__lowerCamelCase = self.model_tester.num_frames
__lowerCamelCase = True
__lowerCamelCase = False
__lowerCamelCase = True
__lowerCamelCase = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
with torch.no_grad():
__lowerCamelCase = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) )
__lowerCamelCase = outputs.attentions
self.assertEqual(len(__UpperCAmelCase ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__lowerCamelCase = True
__lowerCamelCase = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
with torch.no_grad():
__lowerCamelCase = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) )
__lowerCamelCase = outputs.attentions
self.assertEqual(len(__UpperCAmelCase ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
__lowerCamelCase = len(__UpperCAmelCase )
# Check attention is always last and order is fine
__lowerCamelCase = True
__lowerCamelCase = True
__lowerCamelCase = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
with torch.no_grad():
__lowerCamelCase = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) )
self.assertEqual(out_len + 1 , len(__UpperCAmelCase ) )
__lowerCamelCase = outputs.attentions
self.assertEqual(len(__UpperCAmelCase ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
def lowerCamelCase ( self ):
'''simple docstring'''
def check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
__lowerCamelCase = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
with torch.no_grad():
__lowerCamelCase = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) )
__lowerCamelCase = outputs.hidden_states
__lowerCamelCase = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(__UpperCAmelCase ) , __UpperCAmelCase )
__lowerCamelCase = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
__lowerCamelCase ,__lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase = True
check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowerCamelCase = True
check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def a__ ( ):
__lowerCamelCase = hf_hub_download(
repo_id='''hf-internal-testing/spaghetti-video''' ,filename='''eating_spaghetti.npy''' ,repo_type='''dataset''' )
__lowerCamelCase = np.load(_UpperCamelCase )
return list(_UpperCamelCase )
@require_torch
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
@cached_property
def lowerCamelCase ( self ):
'''simple docstring'''
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = TimesformerForVideoClassification.from_pretrained('''facebook/timesformer-base-finetuned-k400''' ).to(
__UpperCAmelCase )
__lowerCamelCase = self.default_image_processor
__lowerCamelCase = prepare_video()
__lowerCamelCase = image_processor(video[:8] , return_tensors='''pt''' ).to(__UpperCAmelCase )
# forward pass
with torch.no_grad():
__lowerCamelCase = model(**__UpperCAmelCase )
# verify the logits
__lowerCamelCase = torch.Size((1, 400) )
self.assertEqual(outputs.logits.shape , __UpperCAmelCase )
__lowerCamelCase = torch.tensor([-0.3_016, -0.7_713, -0.4_205] ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCAmelCase , atol=1E-4 ) )
| 330
| 0
|
class SCREAMING_SNAKE_CASE__ :
def __init__( self : Tuple , _lowerCAmelCase : int ):
__snake_case : Optional[int] = size
__snake_case : List[Any] = [0] * size
__snake_case : Dict = [0] * size
@staticmethod
def snake_case__ ( _lowerCAmelCase : int ):
return index | (index + 1)
@staticmethod
def snake_case__ ( _lowerCAmelCase : int ):
return (index & (index + 1)) - 1
def snake_case__ ( self : Optional[Any] , _lowerCAmelCase : int , _lowerCAmelCase : int ):
__snake_case : Any = value
while index < self.size:
__snake_case : Optional[int] = self.get_prev(_lowerCAmelCase ) + 1
if current_left_border == index:
__snake_case : Dict = value
else:
__snake_case : str = max(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
__snake_case : Optional[Any] = self.get_next(_lowerCAmelCase )
def snake_case__ ( self : Dict , _lowerCAmelCase : int , _lowerCAmelCase : int ):
right -= 1 # Because of right is exclusive
__snake_case : Optional[Any] = 0
while left <= right:
__snake_case : List[str] = self.get_prev(_lowerCAmelCase )
if left <= current_left:
__snake_case : Optional[Any] = max(_lowerCAmelCase , self.tree[right] )
__snake_case : List[str] = current_left
else:
__snake_case : Union[str, Any] = max(_lowerCAmelCase , self.arr[right] )
right -= 1
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 371
|
import argparse
import os
from . import (
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
AlbertConfig,
BartConfig,
BertConfig,
CamembertConfig,
CTRLConfig,
DistilBertConfig,
DPRConfig,
ElectraConfig,
FlaubertConfig,
GPTaConfig,
LayoutLMConfig,
LxmertConfig,
OpenAIGPTConfig,
RobertaConfig,
TaConfig,
TFAlbertForPreTraining,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFCamembertForMaskedLM,
TFCTRLLMHeadModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
TFElectraForPreTraining,
TFFlaubertWithLMHeadModel,
TFGPTaLMHeadModel,
TFLayoutLMForMaskedLM,
TFLxmertForPreTraining,
TFLxmertVisualFeatureEncoder,
TFOpenAIGPTLMHeadModel,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForSequenceClassification,
TFTaForConditionalGeneration,
TFTransfoXLLMHeadModel,
TFWavaVecaModel,
TFXLMRobertaForMaskedLM,
TFXLMWithLMHeadModel,
TFXLNetLMHeadModel,
TransfoXLConfig,
WavaVecaConfig,
WavaVecaModel,
XLMConfig,
XLMRobertaConfig,
XLNetConfig,
is_torch_available,
load_pytorch_checkpoint_in_tfa_model,
)
from .utils import CONFIG_NAME, WEIGHTS_NAME, cached_file, logging
if is_torch_available():
import numpy as np
import torch
from . import (
AlbertForPreTraining,
BartForConditionalGeneration,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
CamembertForMaskedLM,
CTRLLMHeadModel,
DistilBertForMaskedLM,
DistilBertForQuestionAnswering,
DPRContextEncoder,
DPRQuestionEncoder,
DPRReader,
ElectraForPreTraining,
FlaubertWithLMHeadModel,
GPTaLMHeadModel,
LayoutLMForMaskedLM,
LxmertForPreTraining,
LxmertVisualFeatureEncoder,
OpenAIGPTLMHeadModel,
RobertaForMaskedLM,
RobertaForSequenceClassification,
TaForConditionalGeneration,
TransfoXLLMHeadModel,
XLMRobertaForMaskedLM,
XLMWithLMHeadModel,
XLNetLMHeadModel,
)
logging.set_verbosity_info()
lowercase_ = {
"bart": (
BartConfig,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
BartForConditionalGeneration,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"bert": (
BertConfig,
TFBertForPreTraining,
BertForPreTraining,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"bert-large-uncased-whole-word-masking-finetuned-squad": (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"bert-large-cased-whole-word-masking-finetuned-squad": (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"bert-base-cased-finetuned-mrpc": (
BertConfig,
TFBertForSequenceClassification,
BertForSequenceClassification,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"dpr": (
DPRConfig,
TFDPRQuestionEncoder,
TFDPRContextEncoder,
TFDPRReader,
DPRQuestionEncoder,
DPRContextEncoder,
DPRReader,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"gpt2": (
GPTaConfig,
TFGPTaLMHeadModel,
GPTaLMHeadModel,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"xlnet": (
XLNetConfig,
TFXLNetLMHeadModel,
XLNetLMHeadModel,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"xlm": (
XLMConfig,
TFXLMWithLMHeadModel,
XLMWithLMHeadModel,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"xlm-roberta": (
XLMRobertaConfig,
TFXLMRobertaForMaskedLM,
XLMRobertaForMaskedLM,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"transfo-xl": (
TransfoXLConfig,
TFTransfoXLLMHeadModel,
TransfoXLLMHeadModel,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"openai-gpt": (
OpenAIGPTConfig,
TFOpenAIGPTLMHeadModel,
OpenAIGPTLMHeadModel,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"roberta": (
RobertaConfig,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
RobertaForMaskedLM,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"layoutlm": (
LayoutLMConfig,
TFLayoutLMForMaskedLM,
LayoutLMForMaskedLM,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"roberta-large-mnli": (
RobertaConfig,
TFRobertaForSequenceClassification,
RobertaForSequenceClassification,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"camembert": (
CamembertConfig,
TFCamembertForMaskedLM,
CamembertForMaskedLM,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"flaubert": (
FlaubertConfig,
TFFlaubertWithLMHeadModel,
FlaubertWithLMHeadModel,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"distilbert": (
DistilBertConfig,
TFDistilBertForMaskedLM,
DistilBertForMaskedLM,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"distilbert-base-distilled-squad": (
DistilBertConfig,
TFDistilBertForQuestionAnswering,
DistilBertForQuestionAnswering,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"lxmert": (
LxmertConfig,
TFLxmertForPreTraining,
LxmertForPreTraining,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"lxmert-visual-feature-encoder": (
LxmertConfig,
TFLxmertVisualFeatureEncoder,
LxmertVisualFeatureEncoder,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"ctrl": (
CTRLConfig,
TFCTRLLMHeadModel,
CTRLLMHeadModel,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"albert": (
AlbertConfig,
TFAlbertForPreTraining,
AlbertForPreTraining,
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"t5": (
TaConfig,
TFTaForConditionalGeneration,
TaForConditionalGeneration,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"electra": (
ElectraConfig,
TFElectraForPreTraining,
ElectraForPreTraining,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"wav2vec2": (
WavaVecaConfig,
TFWavaVecaModel,
WavaVecaModel,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
}
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[str]=False , __SCREAMING_SNAKE_CASE : List[Any]=True ):
'''simple docstring'''
if model_type not in MODEL_CLASSES:
raise ValueError(F'''Unrecognized model type, should be one of {list(MODEL_CLASSES.keys() )}.''' )
__snake_case , __snake_case , __snake_case , __snake_case : Any = MODEL_CLASSES[model_type]
# Initialise TF model
if config_file in aws_config_map:
__snake_case : int = cached_file(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , force_download=not use_cached_models )
__snake_case : Dict = config_class.from_json_file(__SCREAMING_SNAKE_CASE )
__snake_case : Tuple = True
__snake_case : Union[str, Any] = True
print(F'''Building TensorFlow model from configuration: {config}''' )
__snake_case : List[Any] = model_class(__SCREAMING_SNAKE_CASE )
# Load weights from tf checkpoint
if pytorch_checkpoint_path in aws_config_map.keys():
__snake_case : Optional[Any] = cached_file(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , force_download=not use_cached_models )
# Load PyTorch checkpoint in tf2 model:
__snake_case : List[Any] = load_pytorch_checkpoint_in_tfa_model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if compare_with_pt_model:
__snake_case : Tuple = tf_model(tf_model.dummy_inputs , training=__SCREAMING_SNAKE_CASE ) # build the network
__snake_case : List[str] = torch.load(__SCREAMING_SNAKE_CASE , map_location="""cpu""" )
__snake_case : Any = pt_model_class.from_pretrained(
pretrained_model_name_or_path=__SCREAMING_SNAKE_CASE , config=__SCREAMING_SNAKE_CASE , state_dict=__SCREAMING_SNAKE_CASE )
with torch.no_grad():
__snake_case : Union[str, Any] = pt_model(**pt_model.dummy_inputs )
__snake_case : Any = pto[0].numpy()
__snake_case : Optional[int] = tfo[0].numpy()
__snake_case : Optional[int] = np.amax(np.abs(np_pt - np_tf ) )
print(F'''Max absolute difference between models outputs {diff}''' )
assert diff <= 2E-2, F'''Error, model absolute difference is >2e-2: {diff}'''
# Save pytorch-model
print(F'''Save TensorFlow model to {tf_dump_path}''' )
tf_model.save_weights(__SCREAMING_SNAKE_CASE , save_format="""h5""" )
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : List[str]=None , __SCREAMING_SNAKE_CASE : Any=None , __SCREAMING_SNAKE_CASE : Tuple=False , __SCREAMING_SNAKE_CASE : Tuple=False , __SCREAMING_SNAKE_CASE : List[Any]=False , __SCREAMING_SNAKE_CASE : Any=False , ):
'''simple docstring'''
if args_model_type is None:
__snake_case : Tuple = list(MODEL_CLASSES.keys() )
else:
__snake_case : Union[str, Any] = [args_model_type]
for j, model_type in enumerate(__SCREAMING_SNAKE_CASE , start=1 ):
print("""=""" * 1_0_0 )
print(F''' Converting model type {j}/{len(__SCREAMING_SNAKE_CASE )}: {model_type}''' )
print("""=""" * 1_0_0 )
if model_type not in MODEL_CLASSES:
raise ValueError(F'''Unrecognized model type {model_type}, should be one of {list(MODEL_CLASSES.keys() )}.''' )
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case : Optional[int] = MODEL_CLASSES[model_type]
if model_shortcut_names_or_path is None:
__snake_case : int = list(aws_model_maps.keys() )
if config_shortcut_names_or_path is None:
__snake_case : Union[str, Any] = model_shortcut_names_or_path
for i, (model_shortcut_name, config_shortcut_name) in enumerate(
zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , start=1 ):
print("""-""" * 1_0_0 )
if "-squad" in model_shortcut_name or "-mrpc" in model_shortcut_name or "-mnli" in model_shortcut_name:
if not only_convert_finetuned_models:
print(F''' Skipping finetuned checkpoint {model_shortcut_name}''' )
continue
__snake_case : List[Any] = model_shortcut_name
elif only_convert_finetuned_models:
print(F''' Skipping not finetuned checkpoint {model_shortcut_name}''' )
continue
print(
F''' Converting checkpoint {i}/{len(__SCREAMING_SNAKE_CASE )}: {model_shortcut_name} - model_type {model_type}''' )
print("""-""" * 1_0_0 )
if config_shortcut_name in aws_config_map:
__snake_case : int = cached_file(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , force_download=not use_cached_models )
else:
__snake_case : Dict = config_shortcut_name
if model_shortcut_name in aws_model_maps:
__snake_case : Union[str, Any] = cached_file(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , force_download=not use_cached_models )
else:
__snake_case : List[Any] = model_shortcut_name
if os.path.isfile(__SCREAMING_SNAKE_CASE ):
__snake_case : List[str] = """converted_model"""
convert_pt_checkpoint_to_tf(
model_type=__SCREAMING_SNAKE_CASE , pytorch_checkpoint_path=__SCREAMING_SNAKE_CASE , config_file=__SCREAMING_SNAKE_CASE , tf_dump_path=os.path.join(__SCREAMING_SNAKE_CASE , model_shortcut_name + """-tf_model.h5""" ) , compare_with_pt_model=__SCREAMING_SNAKE_CASE , )
if remove_cached_files:
os.remove(__SCREAMING_SNAKE_CASE )
os.remove(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_dump_path", default=None, type=str, required=True, help="Path to the output Tensorflow dump file."
)
parser.add_argument(
"--model_type",
default=None,
type=str,
help=(
F'''Model type selected in the list of {list(MODEL_CLASSES.keys())}. If not given, will download and '''
"convert all the models from AWS."
),
)
parser.add_argument(
"--pytorch_checkpoint_path",
default=None,
type=str,
help=(
"Path to the PyTorch checkpoint path or shortcut name to download from AWS. "
"If not given, will download and convert all the checkpoints from AWS."
),
)
parser.add_argument(
"--config_file",
default=None,
type=str,
help=(
"The config json file corresponding to the pre-trained model. \n"
"This specifies the model architecture. If not given and "
"--pytorch_checkpoint_path is not given or is a shortcut name "
"use the configuration associated to the shortcut name on the AWS"
),
)
parser.add_argument(
"--compare_with_pt_model", action="store_true", help="Compare Tensorflow and PyTorch model predictions."
)
parser.add_argument(
"--use_cached_models",
action="store_true",
help="Use cached models if possible instead of updating to latest checkpoint versions.",
)
parser.add_argument(
"--remove_cached_files",
action="store_true",
help="Remove pytorch models after conversion (save memory when converting in batches).",
)
parser.add_argument("--only_convert_finetuned_models", action="store_true", help="Only convert finetuned models.")
lowercase_ = parser.parse_args()
# if args.pytorch_checkpoint_path is not None:
# convert_pt_checkpoint_to_tf(args.model_type.lower(),
# args.pytorch_checkpoint_path,
# args.config_file if args.config_file is not None else args.pytorch_checkpoint_path,
# args.tf_dump_path,
# compare_with_pt_model=args.compare_with_pt_model,
# use_cached_models=args.use_cached_models)
# else:
convert_all_pt_checkpoints_to_tf(
args.model_type.lower() if args.model_type is not None else None,
args.tf_dump_path,
model_shortcut_names_or_path=[args.pytorch_checkpoint_path]
if args.pytorch_checkpoint_path is not None
else None,
config_shortcut_names_or_path=[args.config_file] if args.config_file is not None else None,
compare_with_pt_model=args.compare_with_pt_model,
use_cached_models=args.use_cached_models,
remove_cached_files=args.remove_cached_files,
only_convert_finetuned_models=args.only_convert_finetuned_models,
)
| 20
| 0
|
'''simple docstring'''
import argparse
import datetime
import json
import time
import warnings
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params
lowerCAmelCase_ : Any = getLogger(__name__)
lowerCAmelCase_ : int = 'cuda' if torch.cuda.is_available() else 'cpu'
def _lowerCamelCase ( lowercase : List[str] , lowercase : str , lowercase : str , lowercase : int = 8 , lowercase : str = DEFAULT_DEVICE , lowercase : List[Any]=False , lowercase : int="summarization" , lowercase : int=None , **lowercase : int , ) -> Dict:
_a = Path(lowercase ).open("w" , encoding="utf-8" )
_a = str(lowercase )
_a = AutoModelForSeqaSeqLM.from_pretrained(lowercase ).to(lowercase )
if fpaa:
_a = model.half()
_a = AutoTokenizer.from_pretrained(lowercase )
logger.info(F'Inferred tokenizer type: {tokenizer.__class__}' ) # if this is wrong, check config.model_type.
_a = time.time()
# update config with task specific params
use_task_specific_params(lowercase , lowercase )
if prefix is None:
_a = prefix or getattr(model.config , "prefix" , "" ) or ""
for examples_chunk in tqdm(list(chunks(lowercase , lowercase ) ) ):
_a = [prefix + text for text in examples_chunk]
_a = tokenizer(lowercase , return_tensors="pt" , truncation=lowercase , padding="longest" ).to(lowercase )
_a = model.generate(
input_ids=batch.input_ids , attention_mask=batch.attention_mask , **lowercase , )
_a = tokenizer.batch_decode(lowercase , skip_special_tokens=lowercase , clean_up_tokenization_spaces=lowercase )
for hypothesis in dec:
fout.write(hypothesis + "\n" )
fout.flush()
fout.close()
_a = int(time.time() - start_time ) # seconds
_a = len(lowercase )
return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs , 4 )}
def _lowerCamelCase ( ) -> int:
return datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S" )
def _lowerCamelCase ( lowercase : str=True ) -> int:
_a = argparse.ArgumentParser()
parser.add_argument("model_name" , type=lowercase , help="like facebook/bart-large-cnn,t5-base, etc." )
parser.add_argument("input_path" , type=lowercase , help="like cnn_dm/test.source" )
parser.add_argument("save_path" , type=lowercase , help="where to save summaries" )
parser.add_argument("--reference_path" , type=lowercase , required=lowercase , help="like cnn_dm/test.target" )
parser.add_argument("--score_path" , type=lowercase , required=lowercase , default="metrics.json" , help="where to save metrics" )
parser.add_argument("--device" , type=lowercase , required=lowercase , default=lowercase , help="cuda, cuda:1, cpu etc." )
parser.add_argument(
"--prefix" , type=lowercase , required=lowercase , default=lowercase , help="will be added to the begininng of src examples" )
parser.add_argument("--task" , type=lowercase , default="summarization" , help="used for task_specific_params + metrics" )
parser.add_argument("--bs" , type=lowercase , default=8 , required=lowercase , help="batch size" )
parser.add_argument(
"--n_obs" , type=lowercase , default=-1 , required=lowercase , help="How many observations. Defaults to all." )
parser.add_argument("--fp16" , action="store_true" )
parser.add_argument("--dump-args" , action="store_true" , help="print the custom hparams with the results" )
parser.add_argument(
"--info" , nargs="?" , type=lowercase , const=datetime_now() , help=(
"use in conjunction w/ --dump-args to print with the results whatever other info you'd like, e.g."
" lang=en-ru. If no value is passed, the current datetime string will be used."
) , )
# Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate
_a , _a = parser.parse_known_args()
_a = parse_numeric_n_bool_cl_kwargs(lowercase )
if parsed_args and verbose:
print(F'parsed the following generate kwargs: {parsed_args}' )
_a = [" " + x.rstrip() if "t5" in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()]
if args.n_obs > 0:
_a = examples[: args.n_obs]
Path(args.save_path ).parent.mkdir(exist_ok=lowercase )
if args.reference_path is None and Path(args.score_path ).exists():
warnings.warn(F'score_path {args.score_path} will be overwritten unless you type ctrl-c.' )
if args.device == "cpu" and args.fpaa:
# this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half'
raise ValueError("Can't mix --fp16 and --device cpu" )
_a = generate_summaries_or_translations(
lowercase , args.save_path , args.model_name , batch_size=args.bs , device=args.device , fpaa=args.fpaa , task=args.task , prefix=args.prefix , **lowercase , )
if args.reference_path is None:
return {}
# Compute scores
_a = calculate_bleu if "translation" in args.task else calculate_rouge
_a = [x.rstrip() for x in open(args.save_path ).readlines()]
_a = [x.rstrip() for x in open(args.reference_path ).readlines()][: len(lowercase )]
_a = score_fn(lowercase , lowercase )
scores.update(lowercase )
if args.dump_args:
scores.update(lowercase )
if args.info:
_a = args.info
if verbose:
print(lowercase )
if args.score_path is not None:
json.dump(lowercase , open(args.score_path , "w" ) )
return scores
if __name__ == "__main__":
# Usage for MT:
# python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@
run_generate(verbose=True)
| 63
|
'''simple docstring'''
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def _lowerCamelCase ( lowercase : Union[str, Any] , lowercase : int , lowercase : int=1024 , lowercase : int=1024 , lowercase : Tuple=False , **lowercase : Optional[int] ) -> Union[str, Any]:
_a = AutoTokenizer.from_pretrained(lowercase )
_a = SeqaSeqDataset(lowercase , lowercase , lowercase , lowercase , type_path="train" , **lowercase )
_a = tok.pad_token_id
def get_lens(lowercase : Optional[int] ):
_a = tqdm(
DataLoader(lowercase , batch_size=512 , num_workers=8 , shuffle=lowercase , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , )
_a = []
for batch in dl:
_a = batch["input_ids"].ne(lowercase ).sum(1 ).tolist()
_a = batch["labels"].ne(lowercase ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(lowercase , lowercase ):
max_lens.append(max(lowercase , lowercase ) )
else:
max_lens.extend(lowercase )
return max_lens
_a = get_lens(lowercase )
_a = SeqaSeqDataset(lowercase , lowercase , lowercase , lowercase , type_path="val" , **lowercase )
_a = get_lens(lowercase )
pickle_save(lowercase , train_ds.len_file )
pickle_save(lowercase , val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file)
| 63
| 1
|
"""simple docstring"""
def lowercase ( _snake_case : int = 200 ) ->int:
"""simple docstring"""
__snake_case : List[str] = [1, 2, 5, 10, 20, 50, 100, 200]
__snake_case : Tuple = [0] * (pence + 1)
__snake_case : List[Any] = 1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(_snake_case , pence + 1 , 1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(200) == 7_3682
| 24
|
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import DistilBertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
)
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
def __init__(self , a_ , a_=13 , a_=7 , a_=True , a_=True , a_=False , a_=True , a_=99 , a_=32 , a_=5 , a_=4 , a_=37 , a_="gelu" , a_=0.1 , a_=0.1 , a_=5_12 , a_=16 , a_=2 , a_=0.02 , a_=3 , a_=4 , a_=None , ):
'''simple docstring'''
__snake_case : List[Any] = parent
__snake_case : List[Any] = batch_size
__snake_case : str = seq_length
__snake_case : Any = is_training
__snake_case : Any = use_input_mask
__snake_case : str = use_token_type_ids
__snake_case : Dict = use_labels
__snake_case : int = vocab_size
__snake_case : Union[str, Any] = hidden_size
__snake_case : List[str] = num_hidden_layers
__snake_case : str = num_attention_heads
__snake_case : Optional[int] = intermediate_size
__snake_case : str = hidden_act
__snake_case : Union[str, Any] = hidden_dropout_prob
__snake_case : Optional[Any] = attention_probs_dropout_prob
__snake_case : str = max_position_embeddings
__snake_case : Dict = type_vocab_size
__snake_case : List[Any] = type_sequence_label_size
__snake_case : Union[str, Any] = initializer_range
__snake_case : str = num_labels
__snake_case : Dict = num_choices
__snake_case : Optional[int] = scope
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case : Dict = None
if self.use_input_mask:
__snake_case : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
__snake_case : Tuple = None
__snake_case : List[str] = None
__snake_case : Dict = None
if self.use_labels:
__snake_case : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__snake_case : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
__snake_case : List[Any] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ ):
'''simple docstring'''
__snake_case : List[str] = DistilBertModel(config=a_ )
model.to(a_ )
model.eval()
__snake_case : int = model(a_ , a_ )
__snake_case : List[Any] = model(a_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ ):
'''simple docstring'''
__snake_case : Optional[Any] = DistilBertForMaskedLM(config=a_ )
model.to(a_ )
model.eval()
__snake_case : Union[str, Any] = model(a_ , attention_mask=a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ ):
'''simple docstring'''
__snake_case : Tuple = DistilBertForQuestionAnswering(config=a_ )
model.to(a_ )
model.eval()
__snake_case : Optional[Any] = model(
a_ , attention_mask=a_ , start_positions=a_ , end_positions=a_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ ):
'''simple docstring'''
__snake_case : Any = self.num_labels
__snake_case : Optional[int] = DistilBertForSequenceClassification(a_ )
model.to(a_ )
model.eval()
__snake_case : Union[str, Any] = model(a_ , attention_mask=a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ ):
'''simple docstring'''
__snake_case : Union[str, Any] = self.num_labels
__snake_case : Optional[int] = DistilBertForTokenClassification(config=a_ )
model.to(a_ )
model.eval()
__snake_case : Dict = model(a_ , attention_mask=a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ ):
'''simple docstring'''
__snake_case : List[Any] = self.num_choices
__snake_case : Any = DistilBertForMultipleChoice(config=a_ )
model.to(a_ )
model.eval()
__snake_case : Union[str, Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__snake_case : List[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__snake_case : Optional[int] = model(
a_ , attention_mask=a_ , labels=a_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[Any] = self.prepare_config_and_inputs()
((__snake_case) , (__snake_case) , (__snake_case) , (__snake_case) , (__snake_case) , (__snake_case)) : str = config_and_inputs
__snake_case : Optional[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( __snake_case, __snake_case, unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =(
(
DistilBertModel,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
)
if is_torch_available()
else None
)
lowerCamelCase__ =(
{
'feature-extraction': DistilBertModel,
'fill-mask': DistilBertForMaskedLM,
'question-answering': DistilBertForQuestionAnswering,
'text-classification': DistilBertForSequenceClassification,
'token-classification': DistilBertForTokenClassification,
'zero-shot': DistilBertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase__ =True
lowerCamelCase__ =True
lowerCamelCase__ =True
lowerCamelCase__ =True
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Any = DistilBertModelTester(self )
__snake_case : List[str] = ConfigTester(self , config_class=a_ , dim=37 )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*a_ )
@slow
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : Tuple = DistilBertModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
@slow
@require_torch_gpu
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case , __snake_case : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# BertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == DistilBertForMultipleChoice:
return
__snake_case : List[str] = True
__snake_case : Tuple = model_class(config=a_ )
__snake_case : Any = self._prepare_for_class(a_ , a_ )
__snake_case : Dict = torch.jit.trace(
a_ , (inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(a_ , os.path.join(a_ , '''traced_model.pt''' ) )
__snake_case : int = torch.jit.load(os.path.join(a_ , '''traced_model.pt''' ) , map_location=a_ )
loaded(inputs_dict['''input_ids'''].to(a_ ) , inputs_dict['''attention_mask'''].to(a_ ) )
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[int] = DistilBertModel.from_pretrained('''distilbert-base-uncased''' )
__snake_case : List[Any] = torch.tensor([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] )
__snake_case : Any = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__snake_case : List[Any] = model(a_ , attention_mask=a_ )[0]
__snake_case : Tuple = torch.Size((1, 11, 7_68) )
self.assertEqual(output.shape , a_ )
__snake_case : Optional[int] = torch.tensor(
[[[-0.1639, 0.3299, 0.1648], [-0.1746, 0.3289, 0.1710], [-0.1884, 0.3357, 0.1810]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , a_ , atol=1E-4 ) )
| 24
| 1
|
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class A__ ( unittest.TestCase ):
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : int = [[1, 2, 4], [1, 2, 3, 4]]
lowerCAmelCase__ : Dict = DisjunctiveConstraint(a )
self.assertTrue(isinstance(dc.token_ids , a ) )
with self.assertRaises(a ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(a ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(a ):
DisjunctiveConstraint(a ) # fails here
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : str = [[1, 2, 3], [1, 2, 4]]
lowerCAmelCase__ : Any = DisjunctiveConstraint(a )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = dc.update(1 )
lowerCAmelCase__ : Union[str, Any] = stepped is True and completed is False and reset is False
self.assertTrue(a )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : int = dc.update(2 )
lowerCAmelCase__ : int = stepped is True and completed is False and reset is False
self.assertTrue(a )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : List[str] = dc.update(3 )
lowerCAmelCase__ : Any = stepped is True and completed is True and reset is False
self.assertTrue(a )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
lowerCAmelCase__ : Optional[Any] = DisjunctiveConstraint(a )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : List[str] = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Tuple = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : int = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : str = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : str = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : int = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 212
|
import argparse
import hashlib # hashlib is only used inside the Test class
import struct
class A__ :
def __init__( self : Dict , a : str ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = data
lowerCAmelCase__ : List[Any] = [0X67_452_301, 0Xef_cda_b89, 0X98_bad_cfe, 0X10_325_476, 0Xc3_d2e_1f0]
@staticmethod
def _lowerCamelCase ( a : int , a : List[str] ):
'''simple docstring'''
return ((n << b) | (n >> (32 - b))) & 0Xff_fff_fff
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Any = b'\x80' + b'\x00' * (63 - (len(self.data ) + 8) % 64)
lowerCAmelCase__ : List[str] = self.data + padding + struct.pack('>Q' , 8 * len(self.data ) )
return padded_data
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
return [
self.padded_data[i : i + 64] for i in range(0 , len(self.padded_data ) , 64 )
]
def _lowerCamelCase ( self : Tuple , a : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = list(struct.unpack('>16L' , a ) ) + [0] * 64
for i in range(16 , 80 ):
lowerCAmelCase__ : int = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]) , 1 )
return w
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = self.padding()
lowerCAmelCase__ : List[Any] = self.split_blocks()
for block in self.blocks:
lowerCAmelCase__ : str = self.expand_block(a )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : str = self.h
for i in range(0 , 80 ):
if 0 <= i < 20:
lowerCAmelCase__ : Tuple = (b & c) | ((~b) & d)
lowerCAmelCase__ : int = 0X5a_827_999
elif 20 <= i < 40:
lowerCAmelCase__ : List[str] = b ^ c ^ d
lowerCAmelCase__ : Any = 0X6e_d9e_ba1
elif 40 <= i < 60:
lowerCAmelCase__ : Tuple = (b & c) | (b & d) | (c & d)
lowerCAmelCase__ : Tuple = 0X8f_1bb_cdc
elif 60 <= i < 80:
lowerCAmelCase__ : List[Any] = b ^ c ^ d
lowerCAmelCase__ : int = 0Xca_62c_1d6
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Dict = (
self.rotate(a , 5 ) + f + e + k + expanded_block[i] & 0Xff_fff_fff,
a,
self.rotate(a , 30 ),
c,
d,
)
lowerCAmelCase__ : Optional[Any] = (
self.h[0] + a & 0Xff_fff_fff,
self.h[1] + b & 0Xff_fff_fff,
self.h[2] + c & 0Xff_fff_fff,
self.h[3] + d & 0Xff_fff_fff,
self.h[4] + e & 0Xff_fff_fff,
)
return ("{:08x}" * 5).format(*self.h )
def lowerCAmelCase__ ( ) -> Tuple:
lowerCAmelCase__ : Optional[int] = b'Test String'
assert SHAaHash(SCREAMING_SNAKE_CASE_ ).final_hash() == hashlib.shaa(SCREAMING_SNAKE_CASE_ ).hexdigest() # noqa: S324
def lowerCAmelCase__ ( ) -> Any:
lowerCAmelCase__ : str = argparse.ArgumentParser(description='Process some strings or files' )
parser.add_argument(
'--string' , dest='input_string' , default='Hello World!! Welcome to Cryptography' , help='Hash the string' , )
parser.add_argument('--file' , dest='input_file' , help='Hash contents of a file' )
lowerCAmelCase__ : Dict = parser.parse_args()
lowerCAmelCase__ : Tuple = args.input_string
# In any case hash input should be a bytestring
if args.input_file:
with open(args.input_file , 'rb' ) as f:
lowerCAmelCase__ : List[Any] = f.read()
else:
lowerCAmelCase__ : Tuple = bytes(SCREAMING_SNAKE_CASE_ , 'utf-8' )
print(SHAaHash(SCREAMING_SNAKE_CASE_ ).final_hash() )
if __name__ == "__main__":
main()
import doctest
doctest.testmod()
| 212
| 1
|
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
lowerCAmelCase__ = [
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
"text-classification",
"language-modeling",
"summarization",
"token-classification",
"question-answering",
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
lowerCAmelCase__ = logging.getLogger()
def __lowerCamelCase ( ):
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('-f' )
lowerCAmelCase__ = parser.parse_args()
return args.f
def __lowerCamelCase ( lowerCAmelCase__ , lowerCAmelCase__="eval" ):
lowerCAmelCase__ = os.path.join(lowerCAmelCase__ , F"""{split}_results.json""" )
if os.path.exists(lowerCAmelCase__ ):
with open(lowerCAmelCase__ , 'r' ) as f:
return json.load(lowerCAmelCase__ )
raise ValueError(F"""can\'t find {path}""" )
lowerCAmelCase__ = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class a_ ( lowercase_ ):
'''simple docstring'''
def __snake_case ( self : Optional[int]):
'''simple docstring'''
lowerCAmelCase__ = self.get_auto_remove_tmp_dir()
lowerCAmelCase__ = F"""\n run_glue.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --eval_steps=2\n --warmup_steps=2\n --seed=42\n --max_seq_length=128\n """.split()
with patch.object(a__ , 'argv' , a__):
run_flax_glue.main()
lowerCAmelCase__ = get_results(a__)
self.assertGreaterEqual(result['eval_accuracy'] , 0.75)
@slow
def __snake_case ( self : int):
'''simple docstring'''
lowerCAmelCase__ = self.get_auto_remove_tmp_dir()
lowerCAmelCase__ = F"""\n run_clm_flax.py\n --model_name_or_path distilgpt2\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --do_train\n --do_eval\n --block_size 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --num_train_epochs 2\n --logging_steps 2 --eval_steps 2\n --output_dir {tmp_dir}\n --overwrite_output_dir\n """.split()
with patch.object(a__ , 'argv' , a__):
run_clm_flax.main()
lowerCAmelCase__ = get_results(a__)
self.assertLess(result['eval_perplexity'] , 100)
@slow
def __snake_case ( self : Optional[int]):
'''simple docstring'''
lowerCAmelCase__ = self.get_auto_remove_tmp_dir()
lowerCAmelCase__ = F"""\n run_summarization.py\n --model_name_or_path t5-small\n --train_file tests/fixtures/tests_samples/xsum/sample.json\n --validation_file tests/fixtures/tests_samples/xsum/sample.json\n --test_file tests/fixtures/tests_samples/xsum/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --num_train_epochs=3\n --warmup_steps=8\n --do_train\n --do_eval\n --do_predict\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --predict_with_generate\n """.split()
with patch.object(a__ , 'argv' , a__):
run_summarization_flax.main()
lowerCAmelCase__ = get_results(a__ , split='test')
self.assertGreaterEqual(result['test_rouge1'] , 10)
self.assertGreaterEqual(result['test_rouge2'] , 2)
self.assertGreaterEqual(result['test_rougeL'] , 7)
self.assertGreaterEqual(result['test_rougeLsum'] , 7)
@slow
def __snake_case ( self : List[Any]):
'''simple docstring'''
lowerCAmelCase__ = self.get_auto_remove_tmp_dir()
lowerCAmelCase__ = F"""\n run_mlm.py\n --model_name_or_path distilroberta-base\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --max_seq_length 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --logging_steps 2 --eval_steps 2\n --do_train\n --do_eval\n --num_train_epochs=1\n """.split()
with patch.object(a__ , 'argv' , a__):
run_mlm_flax.main()
lowerCAmelCase__ = get_results(a__)
self.assertLess(result['eval_perplexity'] , 42)
@slow
def __snake_case ( self : Dict):
'''simple docstring'''
lowerCAmelCase__ = self.get_auto_remove_tmp_dir()
lowerCAmelCase__ = F"""\n run_t5_mlm_flax.py\n --model_name_or_path t5-small\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --do_train\n --do_eval\n --max_seq_length 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --num_train_epochs 2\n --logging_steps 2 --eval_steps 2\n --output_dir {tmp_dir}\n --overwrite_output_dir\n """.split()
with patch.object(a__ , 'argv' , a__):
run_ta_mlm_flax.main()
lowerCAmelCase__ = get_results(a__)
self.assertGreaterEqual(result['eval_accuracy'] , 0.42)
@slow
def __snake_case ( self : Union[str, Any]):
'''simple docstring'''
lowerCAmelCase__ = 7 if get_gpu_count() > 1 else 2
lowerCAmelCase__ = self.get_auto_remove_tmp_dir()
lowerCAmelCase__ = F"""\n run_flax_ner.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/conll/sample.json\n --validation_file tests/fixtures/tests_samples/conll/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --do_train\n --do_eval\n --warmup_steps=2\n --learning_rate=2e-4\n --logging_steps 2 --eval_steps 2\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=2\n --num_train_epochs={epochs}\n --seed 7\n """.split()
with patch.object(a__ , 'argv' , a__):
run_flax_ner.main()
lowerCAmelCase__ = get_results(a__)
self.assertGreaterEqual(result['eval_accuracy'] , 0.75)
self.assertGreaterEqual(result['eval_f1'] , 0.3)
@slow
def __snake_case ( self : Dict):
'''simple docstring'''
lowerCAmelCase__ = self.get_auto_remove_tmp_dir()
lowerCAmelCase__ = F"""\n run_qa.py\n --model_name_or_path bert-base-uncased\n --version_2_with_negative\n --train_file tests/fixtures/tests_samples/SQUAD/sample.json\n --validation_file tests/fixtures/tests_samples/SQUAD/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --num_train_epochs=3\n --warmup_steps=2\n --do_train\n --do_eval\n --logging_steps 2 --eval_steps 2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n """.split()
with patch.object(a__ , 'argv' , a__):
run_qa.main()
lowerCAmelCase__ = get_results(a__)
self.assertGreaterEqual(result['eval_f1'] , 30)
self.assertGreaterEqual(result['eval_exact'] , 30)
| 354
|
import logging
import os
import quant_trainer
import torch
from torch.utils.data import DataLoader
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput
lowerCAmelCase__ = logging.getLogger(__name__)
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class a_ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : int , *lowercase__ : Dict , lowercase__ : List[Any]=None , lowercase__ : Optional[Any]=None , lowercase__ : Dict=None , **lowercase__ : Optional[int]):
'''simple docstring'''
super().__init__(*lowercase__ , **lowercase__)
lowerCAmelCase__ = eval_examples
lowerCAmelCase__ = post_process_function
lowerCAmelCase__ = quant_trainer_args
lowerCAmelCase__ = 128 # default number of calibration samples
def __snake_case ( self : Tuple , lowercase__ : Any=None):
'''simple docstring'''
if calib_dataset is None and self.calib_dataset is None:
raise ValueError('Trainer: calibration requires an calib_dataset.')
lowerCAmelCase__ = calib_dataset if calib_dataset is not None else self.calib_dataset
lowerCAmelCase__ = self._remove_unused_columns(lowercase__ , description='Calibration')
return DataLoader(
lowercase__ , batch_size=self.args.eval_batch_size , collate_fn=self.data_collator , drop_last=self.args.dataloader_drop_last , num_workers=self.args.dataloader_num_workers , pin_memory=self.args.dataloader_pin_memory , shuffle=lowercase__ , )
def __snake_case ( self : List[Any] , lowercase__ : Union[str, Any]=None):
'''simple docstring'''
lowerCAmelCase__ = self.train_dataset if calib_dataset is None else calib_dataset
lowerCAmelCase__ = self.get_calib_dataloader(lowercase__)
lowerCAmelCase__ = self.model
quant_trainer.configure_model(lowercase__ , self.quant_trainer_args , calib=lowercase__)
model.eval()
quant_trainer.enable_calibration(lowercase__)
logger.info('***** Running calibration *****')
logger.info(F""" Num examples = {self.calib_num}""")
logger.info(F""" Batch size = {calib_dataloader.batch_size}""")
for step, inputs in enumerate(lowercase__):
# Prediction step
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = self.prediction_step(lowercase__ , lowercase__ , prediction_loss_only=lowercase__)
if (step + 1) * calib_dataloader.batch_size >= self.calib_num:
break
quant_trainer.finish_calibration(lowercase__ , self.quant_trainer_args)
lowerCAmelCase__ = model
def __snake_case ( self : Optional[Any] , lowercase__ : List[Any]=None , lowercase__ : Optional[Any]=None , lowercase__ : List[Any]=None , lowercase__ : str = "eval"):
'''simple docstring'''
lowerCAmelCase__ = self.eval_dataset if eval_dataset is None else eval_dataset
lowerCAmelCase__ = self.get_eval_dataloader(lowercase__)
lowerCAmelCase__ = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
lowerCAmelCase__ = self.compute_metrics
lowerCAmelCase__ = None
lowerCAmelCase__ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
lowerCAmelCase__ = eval_loop(
lowercase__ , description='Evaluation' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowercase__ , )
finally:
lowerCAmelCase__ = compute_metrics
if self.post_process_function is not None and self.compute_metrics is not None:
lowerCAmelCase__ = self.post_process_function(lowercase__ , lowercase__ , output.predictions)
lowerCAmelCase__ = self.compute_metrics(lowercase__)
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(F"""{metric_key_prefix}_"""):
lowerCAmelCase__ = metrics.pop(lowercase__)
self.log(lowercase__)
else:
lowerCAmelCase__ = {}
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report())
lowerCAmelCase__ = self.callback_handler.on_evaluate(self.args , self.state , self.control , lowercase__)
return metrics
def __snake_case ( self : Optional[int] , lowercase__ : str , lowercase__ : Any , lowercase__ : List[str]=None , lowercase__ : str = "test"):
'''simple docstring'''
lowerCAmelCase__ = self.get_test_dataloader(lowercase__)
# Temporarily disable metric computation, we will do it in the loop here.
lowerCAmelCase__ = self.compute_metrics
lowerCAmelCase__ = None
lowerCAmelCase__ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
lowerCAmelCase__ = eval_loop(
lowercase__ , description='Prediction' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowercase__ , )
finally:
lowerCAmelCase__ = compute_metrics
if self.post_process_function is None or self.compute_metrics is None:
return output
lowerCAmelCase__ = self.post_process_function(lowercase__ , lowercase__ , output.predictions , 'predict')
lowerCAmelCase__ = self.compute_metrics(lowercase__)
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(F"""{metric_key_prefix}_"""):
lowerCAmelCase__ = metrics.pop(lowercase__)
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=lowercase__)
def __snake_case ( self : List[str] , lowercase__ : List[str]="./"):
'''simple docstring'''
lowerCAmelCase__ = self.eval_dataset
lowerCAmelCase__ = self.get_eval_dataloader(lowercase__)
lowerCAmelCase__ = next(iter(lowercase__))
# saving device - to make it consistent
lowerCAmelCase__ = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# convert to tuple
lowerCAmelCase__ = tuple(v.to(lowercase__) for k, v in batch.items())
logger.info('Converting model to be onnx compatible')
from pytorch_quantization.nn import TensorQuantizer
lowerCAmelCase__ = True
lowerCAmelCase__ = self.model.to(lowercase__)
model.eval()
model.float()
lowerCAmelCase__ = model.module if hasattr(lowercase__ , 'module') else model
quant_trainer.configure_model(lowercase__ , self.quant_trainer_args)
lowerCAmelCase__ = os.path.join(lowercase__ , 'model.onnx')
logger.info(F"""exporting model to {output_model_file}""")
lowerCAmelCase__ = {0: 'batch_size', 1: 'seq_len'}
torch.onnx.export(
lowercase__ , lowercase__ , lowercase__ , export_params=lowercase__ , opset_version=13 , do_constant_folding=lowercase__ , input_names=['input_ids', 'attention_mask', 'token_type_ids'] , output_names=['output_start_logits', 'output_end_logits'] , dynamic_axes={
'input_ids': axes,
'attention_mask': axes,
'token_type_ids': axes,
'output_start_logits': axes,
'output_end_logits': axes,
} , verbose=lowercase__ , )
logger.info('onnx export finished')
| 119
| 0
|
from __future__ import annotations
def lowerCamelCase__ ( a , a , a , a , a , ) -> None:
_A: Optional[Any] = len(a )
# If row is equal to the size of the board it means there are a queen in each row in
# the current board (possible_board)
if row == n:
# We convert the variable possible_board that looks like this: [1, 3, 0, 2] to
# this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . ']
boards.append(['''. ''' * i + '''Q ''' + '''. ''' * (n - 1 - i) for i in possible_board] )
return
# We iterate each column in the row to find all possible results in each row
for col in range(a ):
# We apply that we learned previously. First we check that in the current board
# (possible_board) there are not other same value because if there is it means
# that there are a collision in vertical. Then we apply the two formulas we
# learned before:
#
# 45º: y - x = b or 45: row - col = b
# 135º: y + x = b or row + col = b.
#
# And we verify if the results of this two formulas not exist in their variables
# respectively. (diagonal_right_collisions, diagonal_left_collisions)
#
# If any or these are True it means there is a collision so we continue to the
# next value in the for loop.
if (
col in possible_board
or row - col in diagonal_right_collisions
or row + col in diagonal_left_collisions
):
continue
# If it is False we call dfs function again and we update the inputs
depth_first_search(
[*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , a , a , )
def lowerCamelCase__ ( a ) -> None:
_A: list[list[str]] = []
depth_first_search([] , [] , [] , a , a )
# Print all the boards
for board in boards:
for column in board:
print(a )
print('''''' )
print(len(a ) , '''solutions were found.''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
n_queens_solution(4)
| 121
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ : Dict = logging.get_logger(__name__)
UpperCAmelCase__ : Union[str, Any] = {
'caidas/swin2sr-classicalsr-x2-64': (
'https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json'
),
}
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__UpperCamelCase : Dict = '''swin2sr'''
__UpperCamelCase : str = {
'''hidden_size''': '''embed_dim''',
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self : List[str] , lowerCAmelCase_ : int=6_4 , lowerCAmelCase_ : Optional[Any]=1 , lowerCAmelCase_ : Dict=3 , lowerCAmelCase_ : Dict=1_8_0 , lowerCAmelCase_ : Union[str, Any]=[6, 6, 6, 6, 6, 6] , lowerCAmelCase_ : Tuple=[6, 6, 6, 6, 6, 6] , lowerCAmelCase_ : int=8 , lowerCAmelCase_ : Any=2.0 , lowerCAmelCase_ : Union[str, Any]=True , lowerCAmelCase_ : Dict=0.0 , lowerCAmelCase_ : Optional[int]=0.0 , lowerCAmelCase_ : int=0.1 , lowerCAmelCase_ : Any="gelu" , lowerCAmelCase_ : List[str]=False , lowerCAmelCase_ : str=0.02 , lowerCAmelCase_ : Optional[Any]=1e-5 , lowerCAmelCase_ : int=2 , lowerCAmelCase_ : int=1.0 , lowerCAmelCase_ : Any="1conv" , lowerCAmelCase_ : List[str]="pixelshuffle" , **lowerCAmelCase_ : str , ):
"""simple docstring"""
super().__init__(**lowerCAmelCase_ )
_A: List[str] = image_size
_A: Any = patch_size
_A: Any = num_channels
_A: Union[str, Any] = embed_dim
_A: int = depths
_A: List[Any] = len(lowerCAmelCase_ )
_A: int = num_heads
_A: Any = window_size
_A: Optional[int] = mlp_ratio
_A: int = qkv_bias
_A: List[Any] = hidden_dropout_prob
_A: List[str] = attention_probs_dropout_prob
_A: List[Any] = drop_path_rate
_A: Any = hidden_act
_A: List[str] = use_absolute_embeddings
_A: Tuple = layer_norm_eps
_A: str = initializer_range
_A: int = upscale
_A: int = img_range
_A: Optional[Any] = resi_connection
_A: int = upsampler
| 121
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_torch_available,
is_vision_available,
)
__UpperCAmelCase :Union[str, Any] = {"configuration_beit": ["BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "BeitConfig", "BeitOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase :Union[str, Any] = ["BeitFeatureExtractor"]
__UpperCAmelCase :int = ["BeitImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase :Optional[Any] = [
"BEIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BeitForImageClassification",
"BeitForMaskedImageModeling",
"BeitForSemanticSegmentation",
"BeitModel",
"BeitPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase :List[Any] = [
"FlaxBeitForImageClassification",
"FlaxBeitForMaskedImageModeling",
"FlaxBeitModel",
"FlaxBeitPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_beit import BeitFeatureExtractor
from .image_processing_beit import BeitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_beit import (
BEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
BeitPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_beit import (
FlaxBeitForImageClassification,
FlaxBeitForMaskedImageModeling,
FlaxBeitModel,
FlaxBeitPreTrainedModel,
)
else:
import sys
__UpperCAmelCase :Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 357
|
'''simple docstring'''
import unittest
from transformers import DebertaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
)
from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST
class a ( _a ):
"""simple docstring"""
def __init__( self : Optional[Any] , snake_case : Dict , snake_case : Dict=13 , snake_case : str=7 , snake_case : Dict=True , snake_case : Any=True , snake_case : Optional[Any]=True , snake_case : Optional[Any]=True , snake_case : List[str]=99 , snake_case : str=32 , snake_case : Any=5 , snake_case : List[str]=4 , snake_case : List[str]=37 , snake_case : int="gelu" , snake_case : int=0.1 , snake_case : int=0.1 , snake_case : Union[str, Any]=512 , snake_case : int=16 , snake_case : Optional[Any]=2 , snake_case : List[Any]=0.02 , snake_case : Any=False , snake_case : int=True , snake_case : Union[str, Any]="None" , snake_case : str=3 , snake_case : Union[str, Any]=4 , snake_case : Any=None , ) -> List[Any]:
__UpperCAmelCase : List[str] = parent
__UpperCAmelCase : Dict = batch_size
__UpperCAmelCase : Any = seq_length
__UpperCAmelCase : List[Any] = is_training
__UpperCAmelCase : List[str] = use_input_mask
__UpperCAmelCase : Union[str, Any] = use_token_type_ids
__UpperCAmelCase : Any = use_labels
__UpperCAmelCase : Any = vocab_size
__UpperCAmelCase : Dict = hidden_size
__UpperCAmelCase : List[str] = num_hidden_layers
__UpperCAmelCase : Union[str, Any] = num_attention_heads
__UpperCAmelCase : List[str] = intermediate_size
__UpperCAmelCase : str = hidden_act
__UpperCAmelCase : Union[str, Any] = hidden_dropout_prob
__UpperCAmelCase : int = attention_probs_dropout_prob
__UpperCAmelCase : Tuple = max_position_embeddings
__UpperCAmelCase : Any = type_vocab_size
__UpperCAmelCase : Tuple = type_sequence_label_size
__UpperCAmelCase : Union[str, Any] = initializer_range
__UpperCAmelCase : Dict = num_labels
__UpperCAmelCase : Any = num_choices
__UpperCAmelCase : Any = relative_attention
__UpperCAmelCase : Dict = position_biased_input
__UpperCAmelCase : Optional[int] = pos_att_type
__UpperCAmelCase : Dict = scope
def lowerCamelCase__ ( self : Dict ) -> Optional[int]:
__UpperCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase : Optional[Any] = None
if self.use_input_mask:
__UpperCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
__UpperCAmelCase : Any = None
if self.use_token_type_ids:
__UpperCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCAmelCase : List[str] = None
__UpperCAmelCase : int = None
__UpperCAmelCase : str = None
if self.use_labels:
__UpperCAmelCase : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
__UpperCAmelCase : str = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase__ ( self : Tuple ) -> List[str]:
return DebertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def lowerCamelCase__ ( self : Union[str, Any] ) -> str:
__UpperCAmelCase : Optional[int] = self.get_config()
__UpperCAmelCase : Dict = 300
return config
def lowerCamelCase__ ( self : Any , snake_case : int ) -> Any:
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def lowerCamelCase__ ( self : List[Any] , snake_case : Optional[int] , snake_case : Any , snake_case : List[Any] , snake_case : Any , snake_case : Union[str, Any] , snake_case : Optional[Any] , snake_case : Union[str, Any] ) -> Optional[Any]:
__UpperCAmelCase : List[str] = DebertaModel(config=snake_case )
model.to(snake_case )
model.eval()
__UpperCAmelCase : str = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case )[0]
__UpperCAmelCase : Tuple = model(snake_case , token_type_ids=snake_case )[0]
__UpperCAmelCase : Optional[int] = model(snake_case )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def lowerCamelCase__ ( self : Optional[int] , snake_case : int , snake_case : Tuple , snake_case : Any , snake_case : Tuple , snake_case : Union[str, Any] , snake_case : Optional[int] , snake_case : List[str] ) -> Optional[int]:
__UpperCAmelCase : Union[str, Any] = DebertaForMaskedLM(config=snake_case )
model.to(snake_case )
model.eval()
__UpperCAmelCase : int = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase__ ( self : str , snake_case : Union[str, Any] , snake_case : Tuple , snake_case : Optional[Any] , snake_case : Tuple , snake_case : List[Any] , snake_case : str , snake_case : Tuple ) -> Union[str, Any]:
__UpperCAmelCase : Tuple = self.num_labels
__UpperCAmelCase : List[Any] = DebertaForSequenceClassification(snake_case )
model.to(snake_case )
model.eval()
__UpperCAmelCase : Any = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(snake_case )
def lowerCamelCase__ ( self : str , snake_case : List[str] , snake_case : Union[str, Any] , snake_case : List[str] , snake_case : Dict , snake_case : Optional[Any] , snake_case : Dict , snake_case : Optional[int] ) -> int:
__UpperCAmelCase : List[Any] = self.num_labels
__UpperCAmelCase : Union[str, Any] = DebertaForTokenClassification(config=snake_case )
model.to(snake_case )
model.eval()
__UpperCAmelCase : Any = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase__ ( self : str , snake_case : List[Any] , snake_case : Optional[Any] , snake_case : str , snake_case : Optional[int] , snake_case : int , snake_case : int , snake_case : str ) -> Union[str, Any]:
__UpperCAmelCase : Union[str, Any] = DebertaForQuestionAnswering(config=snake_case )
model.to(snake_case )
model.eval()
__UpperCAmelCase : Optional[int] = model(
snake_case , attention_mask=snake_case , token_type_ids=snake_case , start_positions=snake_case , end_positions=snake_case , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase__ ( self : str ) -> int:
__UpperCAmelCase : int = self.prepare_config_and_inputs()
(
(
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) ,
) : Any = config_and_inputs
__UpperCAmelCase : str = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class a ( _a , _a , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = (
(
DebertaModel,
DebertaForMaskedLM,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaForQuestionAnswering,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE : Dict = (
{
"feature-extraction": DebertaModel,
"fill-mask": DebertaForMaskedLM,
"question-answering": DebertaForQuestionAnswering,
"text-classification": DebertaForSequenceClassification,
"token-classification": DebertaForTokenClassification,
"zero-shot": DebertaForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE : Union[str, Any] = True
SCREAMING_SNAKE_CASE : Dict = False
SCREAMING_SNAKE_CASE : List[Any] = False
SCREAMING_SNAKE_CASE : List[str] = False
SCREAMING_SNAKE_CASE : Any = False
def lowerCamelCase__ ( self : Union[str, Any] ) -> Union[str, Any]:
__UpperCAmelCase : Dict = DebertaModelTester(self )
__UpperCAmelCase : List[Any] = ConfigTester(self , config_class=snake_case , hidden_size=37 )
def lowerCamelCase__ ( self : Optional[int] ) -> Dict:
self.config_tester.run_common_tests()
def lowerCamelCase__ ( self : List[str] ) -> Optional[Any]:
__UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*snake_case )
def lowerCamelCase__ ( self : Dict ) -> List[str]:
__UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*snake_case )
def lowerCamelCase__ ( self : Dict ) -> str:
__UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*snake_case )
def lowerCamelCase__ ( self : Union[str, Any] ) -> Any:
__UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*snake_case )
def lowerCamelCase__ ( self : Dict ) -> Optional[Any]:
__UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*snake_case )
@slow
def lowerCamelCase__ ( self : Dict ) -> Tuple:
for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase : str = DebertaModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
@require_torch
@require_sentencepiece
@require_tokenizers
class a ( unittest.TestCase ):
"""simple docstring"""
@unittest.skip(reason='''Model not available yet''' )
def lowerCamelCase__ ( self : Dict ) -> Tuple:
pass
@slow
def lowerCamelCase__ ( self : int ) -> Optional[Any]:
__UpperCAmelCase : Any = DebertaModel.from_pretrained('''microsoft/deberta-base''' )
__UpperCAmelCase : Any = torch.tensor([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] )
__UpperCAmelCase : Dict = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__UpperCAmelCase : int = model(snake_case , attention_mask=snake_case )[0]
# compare the actual values for a slice.
__UpperCAmelCase : Optional[Any] = torch.tensor(
[[[-0.5_986, -0.8_055, -0.8_462], [1.4_484, -0.9_348, -0.8_059], [0.3_123, 0.0_032, -1.4_131]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , snake_case , atol=1E-4 ) , f'{output[:, 1:4, 1:4]}' )
| 240
| 0
|
import unittest
from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class A_ :
@staticmethod
def _lowercase ( *_A , **_A ):
'''simple docstring'''
pass
@is_pipeline_test
@require_torch
@require_vision
class A_ (unittest.TestCase ):
UpperCAmelCase__ = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
def _lowercase ( self , _A , _A , _A ):
'''simple docstring'''
UpperCAmelCase = pipeline('''visual-question-answering''' , model='''hf-internal-testing/tiny-vilt-random-vqa''' )
UpperCAmelCase = [
{
"""image""": Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ),
"""question""": """How many cats are there?""",
},
{
"""image""": """./tests/fixtures/tests_samples/COCO/000000039769.png""",
"""question""": """How many cats are there?""",
},
]
return vqa_pipeline, examples
def _lowercase ( self , _A , _A ):
'''simple docstring'''
UpperCAmelCase = vqa_pipeline(_A , top_k=1 )
self.assertEqual(
_A , [
[{'''score''': ANY(_A ), '''answer''': ANY(_A )}],
[{'''score''': ANY(_A ), '''answer''': ANY(_A )}],
] , )
@require_torch
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = pipeline('''visual-question-answering''' , model='''hf-internal-testing/tiny-vilt-random-vqa''' )
UpperCAmelCase = """./tests/fixtures/tests_samples/COCO/000000039769.png"""
UpperCAmelCase = """How many cats are there?"""
UpperCAmelCase = vqa_pipeline(image=_A , question='''How many cats are there?''' , top_k=2 )
self.assertEqual(
_A , [{'''score''': ANY(_A ), '''answer''': ANY(_A )}, {'''score''': ANY(_A ), '''answer''': ANY(_A )}] )
UpperCAmelCase = vqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
_A , [{'''score''': ANY(_A ), '''answer''': ANY(_A )}, {'''score''': ANY(_A ), '''answer''': ANY(_A )}] )
@slow
@require_torch
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = pipeline('''visual-question-answering''' , model='''dandelin/vilt-b32-finetuned-vqa''' )
UpperCAmelCase = """./tests/fixtures/tests_samples/COCO/000000039769.png"""
UpperCAmelCase = """How many cats are there?"""
UpperCAmelCase = vqa_pipeline(image=_A , question=_A , top_k=2 )
self.assertEqual(
nested_simplify(_A , decimals=4 ) , [{'''score''': 0.87_99, '''answer''': '''2'''}, {'''score''': 0.2_96, '''answer''': '''1'''}] )
UpperCAmelCase = vqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(_A , decimals=4 ) , [{'''score''': 0.87_99, '''answer''': '''2'''}, {'''score''': 0.2_96, '''answer''': '''1'''}] )
UpperCAmelCase = vqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(_A , decimals=4 ) , [[{'''score''': 0.87_99, '''answer''': '''2'''}, {'''score''': 0.2_96, '''answer''': '''1'''}]] * 2 , )
@require_tf
@unittest.skip('''Visual question answering not implemented in TF''' )
def _lowercase ( self ):
'''simple docstring'''
pass
| 273
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class __snake_case ( unittest.TestCase ):
def __init__( self ,snake_case ,snake_case=7 ,snake_case=3 ,snake_case=18 ,snake_case=30 ,snake_case=400 ,snake_case=True ,snake_case=None ,snake_case=True ,snake_case=None ,):
'''simple docstring'''
lowercase : Dict = size if size is not None else {"""shortest_edge""": 20}
lowercase : Union[str, Any] = crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
lowercase : str = parent
lowercase : int = batch_size
lowercase : str = num_channels
lowercase : int = image_size
lowercase : List[str] = min_resolution
lowercase : str = max_resolution
lowercase : Dict = do_resize
lowercase : Dict = size
lowercase : Dict = do_center_crop
lowercase : str = crop_size
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class __snake_case ( lowerCAmelCase , unittest.TestCase ):
_a : Any= MobileNetVaImageProcessor if is_vision_available() else None
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : List[Any] = MobileNetVaImageProcessingTester(self )
@property
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Optional[int] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case ,"""do_resize""" ) )
self.assertTrue(hasattr(snake_case ,"""size""" ) )
self.assertTrue(hasattr(snake_case ,"""do_center_crop""" ) )
self.assertTrue(hasattr(snake_case ,"""crop_size""" ) )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : int = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{"""shortest_edge""": 20} )
self.assertEqual(image_processor.crop_size ,{"""height""": 18, """width""": 18} )
lowercase : int = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 ,crop_size=84 )
self.assertEqual(image_processor.size ,{"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size ,{"""height""": 84, """width""": 84} )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase : str = prepare_image_inputs(self.image_processor_tester ,equal_resolution=snake_case )
for image in image_inputs:
self.assertIsInstance(snake_case ,Image.Image )
# Test not batched input
lowercase : Dict = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
# Test batched
lowercase : Tuple = image_processing(snake_case ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase : Union[str, Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=snake_case ,numpify=snake_case )
for image in image_inputs:
self.assertIsInstance(snake_case ,np.ndarray )
# Test not batched input
lowercase : Optional[Any] = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
# Test batched
lowercase : List[str] = image_processing(snake_case ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase : Tuple = prepare_image_inputs(self.image_processor_tester ,equal_resolution=snake_case ,torchify=snake_case )
for image in image_inputs:
self.assertIsInstance(snake_case ,torch.Tensor )
# Test not batched input
lowercase : Optional[int] = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
# Test batched
lowercase : List[str] = image_processing(snake_case ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
| 20
| 0
|
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCamelCase_ :
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=13 , lowerCAmelCase_=7 , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=99 , lowerCAmelCase_=16 , lowerCAmelCase_=36 , lowerCAmelCase_=6 , lowerCAmelCase_=6 , lowerCAmelCase_=6 , lowerCAmelCase_=37 , lowerCAmelCase_="gelu" , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.1 , lowerCAmelCase_=512 , lowerCAmelCase_=16 , lowerCAmelCase_=2 , lowerCAmelCase_=0.02 , lowerCAmelCase_=3 , lowerCAmelCase_=4 , lowerCAmelCase_=None , ) -> Tuple:
_snake_case = parent
_snake_case = batch_size
_snake_case = seq_length
_snake_case = is_training
_snake_case = use_input_mask
_snake_case = use_token_type_ids
_snake_case = use_labels
_snake_case = vocab_size
_snake_case = embedding_size
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_hidden_groups
_snake_case = num_attention_heads
_snake_case = intermediate_size
_snake_case = hidden_act
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = max_position_embeddings
_snake_case = type_vocab_size
_snake_case = type_sequence_label_size
_snake_case = initializer_range
_snake_case = num_labels
_snake_case = num_choices
_snake_case = scope
def lowerCAmelCase ( self ) -> Union[str, Any]:
_snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_snake_case = None
if self.use_input_mask:
_snake_case = random_attention_mask([self.batch_size, self.seq_length] )
_snake_case = None
if self.use_token_type_ids:
_snake_case = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_snake_case = None
_snake_case = None
_snake_case = None
if self.use_labels:
_snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_snake_case = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_snake_case = ids_tensor([self.batch_size] , self.num_choices )
_snake_case = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase ( self ) -> Optional[int]:
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Any:
_snake_case = AlbertModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_snake_case = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase )
_snake_case = model(_lowerCAmelCase , token_type_ids=_lowerCAmelCase )
_snake_case = model(_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> List[str]:
_snake_case = AlbertForPreTraining(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_snake_case = model(
_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase , sentence_order_label=_lowerCAmelCase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Dict:
_snake_case = AlbertForMaskedLM(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_snake_case = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Dict:
_snake_case = AlbertForQuestionAnswering(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_snake_case = model(
_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , start_positions=_lowerCAmelCase , end_positions=_lowerCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Optional[int]:
_snake_case = self.num_labels
_snake_case = AlbertForSequenceClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_snake_case = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> str:
_snake_case = self.num_labels
_snake_case = AlbertForTokenClassification(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_snake_case = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Any:
_snake_case = self.num_choices
_snake_case = AlbertForMultipleChoice(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_snake_case = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_snake_case = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_snake_case = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_snake_case = model(
_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCAmelCase ( self ) -> Dict:
_snake_case = self.prepare_config_and_inputs()
(
_snake_case
) = config_and_inputs
_snake_case = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase_ ( __UpperCamelCase , __UpperCamelCase , unittest.TestCase ):
lowerCAmelCase_ = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowerCAmelCase_ = (
{
"feature-extraction": AlbertModel,
"fill-mask": AlbertForMaskedLM,
"question-answering": AlbertForQuestionAnswering,
"text-classification": AlbertForSequenceClassification,
"token-classification": AlbertForTokenClassification,
"zero-shot": AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase_ = True
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=False ) -> Optional[int]:
_snake_case = super()._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase , return_labels=_lowerCAmelCase )
if return_labels:
if model_class in get_values(_lowerCAmelCase ):
_snake_case = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=_lowerCAmelCase )
_snake_case = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_lowerCAmelCase )
return inputs_dict
def lowerCAmelCase ( self ) -> List[Any]:
_snake_case = AlbertModelTester(self )
_snake_case = ConfigTester(self , config_class=_lowerCAmelCase , hidden_size=37 )
def lowerCAmelCase ( self ) -> Optional[Any]:
self.config_tester.run_common_tests()
def lowerCAmelCase ( self ) -> Any:
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def lowerCAmelCase ( self ) -> int:
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_lowerCAmelCase )
def lowerCAmelCase ( self ) -> Any:
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_lowerCAmelCase )
def lowerCAmelCase ( self ) -> List[Any]:
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_lowerCAmelCase )
def lowerCAmelCase ( self ) -> str:
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_lowerCAmelCase )
def lowerCAmelCase ( self ) -> Any:
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_lowerCAmelCase )
def lowerCAmelCase ( self ) -> Dict:
_snake_case = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_snake_case = type
self.model_tester.create_and_check_model(*_lowerCAmelCase )
@slow
def lowerCAmelCase ( self ) -> Tuple:
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case = AlbertModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
@require_torch
class UpperCamelCase_ ( unittest.TestCase ):
@slow
def lowerCAmelCase ( self ) -> str:
_snake_case = AlbertModel.from_pretrained('albert-base-v2' )
_snake_case = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
_snake_case = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_snake_case = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase )[0]
_snake_case = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , _lowerCAmelCase )
_snake_case = torch.tensor(
[[[-0.65_13, 1.50_35, -0.27_66], [-0.65_15, 1.50_46, -0.27_80], [-0.65_12, 1.50_49, -0.27_84]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _lowerCAmelCase , atol=1E-4 ) )
| 370
|
def lowerCamelCase__ ( ) -> int:
'''simple docstring'''
return 1
def lowerCamelCase__ ( UpperCamelCase__ : int ) -> int:
'''simple docstring'''
return 0 if x < 0 else two_pence(x - 2 ) + one_pence()
def lowerCamelCase__ ( UpperCamelCase__ : int ) -> int:
'''simple docstring'''
return 0 if x < 0 else five_pence(x - 5 ) + two_pence(UpperCamelCase__ )
def lowerCamelCase__ ( UpperCamelCase__ : int ) -> int:
'''simple docstring'''
return 0 if x < 0 else ten_pence(x - 10 ) + five_pence(UpperCamelCase__ )
def lowerCamelCase__ ( UpperCamelCase__ : int ) -> int:
'''simple docstring'''
return 0 if x < 0 else twenty_pence(x - 20 ) + ten_pence(UpperCamelCase__ )
def lowerCamelCase__ ( UpperCamelCase__ : int ) -> int:
'''simple docstring'''
return 0 if x < 0 else fifty_pence(x - 50 ) + twenty_pence(UpperCamelCase__ )
def lowerCamelCase__ ( UpperCamelCase__ : int ) -> int:
'''simple docstring'''
return 0 if x < 0 else one_pound(x - 100 ) + fifty_pence(UpperCamelCase__ )
def lowerCamelCase__ ( UpperCamelCase__ : int ) -> int:
'''simple docstring'''
return 0 if x < 0 else two_pound(x - 200 ) + one_pound(UpperCamelCase__ )
def lowerCamelCase__ ( UpperCamelCase__ : int = 200 ) -> int:
'''simple docstring'''
return two_pound(UpperCamelCase__ )
if __name__ == "__main__":
print(solution(int(input().strip())))
| 295
| 0
|
import inspect
import unittest
from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel
from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class SCREAMING_SNAKE_CASE__ :
def __init__(self : Any , a__ : List[Any] , a__ : Dict=13 , a__ : str=32 , a__ : Tuple=3 , a__ : Optional[Any]=4 , a__ : Optional[int]=[10, 20, 30, 40] , a__ : List[Any]=[2, 2, 3, 2] , a__ : List[Any]=True , a__ : int=True , a__ : List[Any]=37 , a__ : Any="gelu" , a__ : int=10 , a__ : Dict=0.0_2 , a__ : Dict=["stage2", "stage3", "stage4"] , a__ : Tuple=[2, 3, 4] , a__ : List[str]=None , ):
"""simple docstring"""
__snake_case = parent
__snake_case = batch_size
__snake_case = image_size
__snake_case = num_channels
__snake_case = num_stages
__snake_case = hidden_sizes
__snake_case = depths
__snake_case = is_training
__snake_case = use_labels
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = num_labels
__snake_case = initializer_range
__snake_case = out_features
__snake_case = out_indices
__snake_case = scope
def a (self : Dict ):
"""simple docstring"""
__snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case = None
if self.use_labels:
__snake_case = ids_tensor([self.batch_size] , self.num_labels )
__snake_case = self.get_config()
return config, pixel_values, labels
def a (self : List[str] ):
"""simple docstring"""
return ConvNextConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=a__ , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def a (self : str , a__ : Union[str, Any] , a__ : List[str] , a__ : List[Any] ):
"""simple docstring"""
__snake_case = ConvNextModel(config=a__ )
model.to(a__ )
model.eval()
__snake_case = model(a__ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def a (self : Optional[Any] , a__ : List[Any] , a__ : str , a__ : List[Any] ):
"""simple docstring"""
__snake_case = ConvNextForImageClassification(a__ )
model.to(a__ )
model.eval()
__snake_case = model(a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a (self : Tuple , a__ : List[Any] , a__ : List[str] , a__ : List[str] ):
"""simple docstring"""
__snake_case = ConvNextBackbone(config=a__ )
model.to(a__ )
model.eval()
__snake_case = model(a__ )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
__snake_case = None
__snake_case = ConvNextBackbone(config=a__ )
model.to(a__ )
model.eval()
__snake_case = model(a__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def a (self : Tuple ):
"""simple docstring"""
__snake_case = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case = config_and_inputs
__snake_case = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
A_ : Dict = (
(
ConvNextModel,
ConvNextForImageClassification,
ConvNextBackbone,
)
if is_torch_available()
else ()
)
A_ : Optional[Any] = (
{'feature-extraction': ConvNextModel, 'image-classification': ConvNextForImageClassification}
if is_torch_available()
else {}
)
A_ : Dict = True
A_ : Optional[Any] = False
A_ : int = False
A_ : int = False
A_ : List[str] = False
def a (self : List[str] ):
"""simple docstring"""
__snake_case = ConvNextModelTester(self )
__snake_case = ConfigTester(self , config_class=a__ , has_text_modality=a__ , hidden_size=37 )
def a (self : Tuple ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def a (self : str ):
"""simple docstring"""
return
@unittest.skip(reason='''ConvNext does not use inputs_embeds''' )
def a (self : int ):
"""simple docstring"""
pass
@unittest.skip(reason='''ConvNext does not support input and output embeddings''' )
def a (self : Dict ):
"""simple docstring"""
pass
@unittest.skip(reason='''ConvNext does not use feedforward chunking''' )
def a (self : List[Any] ):
"""simple docstring"""
pass
def a (self : Optional[Any] ):
"""simple docstring"""
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = model_class(a__ )
__snake_case = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case = [*signature.parameters.keys()]
__snake_case = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , a__ )
def a (self : List[Any] ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a__ )
def a (self : Dict ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*a__ )
def a (self : Dict ):
"""simple docstring"""
def check_hidden_states_output(a__ : List[str] , a__ : str , a__ : Tuple ):
__snake_case = model_class(a__ )
model.to(a__ )
model.eval()
with torch.no_grad():
__snake_case = model(**self._prepare_for_class(a__ , a__ ) )
__snake_case = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__snake_case = self.model_tester.num_stages
self.assertEqual(len(a__ ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = True
check_hidden_states_output(a__ , a__ , a__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__snake_case = True
check_hidden_states_output(a__ , a__ , a__ )
def a (self : Optional[Any] ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a__ )
@slow
def a (self : Any ):
"""simple docstring"""
for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case = ConvNextModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
def lowerCamelCase__ ( ) -> List[str]:
__snake_case = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@cached_property
def a (self : Tuple ):
"""simple docstring"""
return AutoImageProcessor.from_pretrained('''facebook/convnext-tiny-224''' ) if is_vision_available() else None
@slow
def a (self : Optional[Any] ):
"""simple docstring"""
__snake_case = ConvNextForImageClassification.from_pretrained('''facebook/convnext-tiny-224''' ).to(a__ )
__snake_case = self.default_image_processor
__snake_case = prepare_img()
__snake_case = image_processor(images=a__ , return_tensors='''pt''' ).to(a__ )
# forward pass
with torch.no_grad():
__snake_case = model(**a__ )
# verify the logits
__snake_case = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , a__ )
__snake_case = torch.tensor([-0.0_2_6_0, -0.4_7_3_9, 0.1_9_1_1] ).to(a__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , a__ , atol=1E-4 ) )
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase , _UpperCAmelCase ):
A_ : Union[str, Any] = (ConvNextBackbone,) if is_torch_available() else ()
A_ : List[Any] = ConvNextConfig
A_ : Optional[Any] = False
def a (self : Optional[int] ):
"""simple docstring"""
__snake_case = ConvNextModelTester(self )
| 24
|
def lowerCamelCase__ ( snake_case_ : int ) -> int:
if not isinstance(snake_case_ , snake_case_ ) or number < 0:
raise ValueError('''Input must be a non-negative integer''' )
__snake_case = 0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 24
| 1
|
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def __UpperCAmelCase ( __a : int ) -> bool:
"""simple docstring"""
_a : int = int(number**0.5 )
return number == sq * sq
def __UpperCAmelCase ( __a : int ,__a : int ,__a : int ,__a : int ,__a : int ,__a : int ) -> tuple[int, int]:
"""simple docstring"""
_a : int = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
_a : int = x_den * y_den * z_den
_a : int = gcd(__a ,__a )
top //= hcf
bottom //= hcf
return top, bottom
def __UpperCAmelCase ( __a : int = 35 ) -> int:
"""simple docstring"""
_a : set = set()
_a : int
_a : Fraction = Fraction(0 )
_a : tuple[int, int]
for x_num in range(1 ,order + 1 ):
for x_den in range(x_num + 1 ,order + 1 ):
for y_num in range(1 ,order + 1 ):
for y_den in range(y_num + 1 ,order + 1 ):
# n=1
_a : Optional[Any] = x_num * y_den + x_den * y_num
_a : Any = x_den * y_den
_a : List[str] = gcd(__a ,__a )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_a : List[Any] = add_three(
__a ,__a ,__a ,__a ,__a ,__a )
unique_s.add(__a )
# n=2
_a : Dict = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
_a : Tuple = x_den * x_den * y_den * y_den
if is_sq(__a ) and is_sq(__a ):
_a : List[str] = int(sqrt(__a ) )
_a : List[str] = int(sqrt(__a ) )
_a : List[Any] = gcd(__a ,__a )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_a : List[Any] = add_three(
__a ,__a ,__a ,__a ,__a ,__a )
unique_s.add(__a )
# n=-1
_a : Optional[Any] = x_num * y_num
_a : List[Any] = x_den * y_num + x_num * y_den
_a : List[Any] = gcd(__a ,__a )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_a : Any = add_three(
__a ,__a ,__a ,__a ,__a ,__a )
unique_s.add(__a )
# n=2
_a : int = x_num * x_num * y_num * y_num
_a : Tuple = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(__a ) and is_sq(__a ):
_a : str = int(sqrt(__a ) )
_a : Optional[int] = int(sqrt(__a ) )
_a : Optional[int] = gcd(__a ,__a )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_a : Optional[int] = add_three(
__a ,__a ,__a ,__a ,__a ,__a )
unique_s.add(__a )
for num, den in unique_s:
total += Fraction(__a ,__a )
return total.denominator + total.numerator
if __name__ == "__main__":
print(f'''{solution() = }''')
| 15
|
import re
import tempfile
from pathlib import Path
import pytest
import yaml
from datasets.utils.readme import ReadMe
# @pytest.fixture
# def example_yaml_structure():
a__ = yaml.safe_load(
'''\
name: ""
allow_empty: false
allow_empty_text: true
subsections:
- name: "Dataset Card for X" # First-level markdown heading
allow_empty: false
allow_empty_text: true
subsections:
- name: "Table of Contents"
allow_empty: false
allow_empty_text: false
subsections: null
- name: "Dataset Description"
allow_empty: false
allow_empty_text: false
subsections:
- name: "Dataset Summary"
allow_empty: false
allow_empty_text: false
subsections: null
- name: "Supported Tasks and Leaderboards"
allow_empty: true
allow_empty_text: true
subsections: null
- name: Languages
allow_empty: false
allow_empty_text: true
subsections: null
'''
)
a__ = {
'''name''': '''root''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{
'''name''': '''Dataset Card for My Dataset''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{'''name''': '''Table of Contents''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': []},
{
'''name''': '''Dataset Description''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [
{
'''name''': '''Dataset Summary''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [],
},
{
'''name''': '''Supported Tasks and Leaderboards''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [],
},
{'''name''': '''Languages''', '''text''': '''Language Text''', '''is_empty_text''': False, '''subsections''': []},
],
},
],
}
],
}
a__ = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
a__ = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
#### Extra Ignored Subsection
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
a__ = {
'''name''': '''root''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{
'''name''': '''Dataset Card for My Dataset''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{'''name''': '''Table of Contents''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': []},
{
'''name''': '''Dataset Description''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [
{
'''name''': '''Dataset Summary''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [
{
'''name''': '''Extra Ignored Subsection''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [],
}
],
},
{
'''name''': '''Supported Tasks and Leaderboards''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [],
},
{'''name''': '''Languages''', '''text''': '''Language Text''', '''is_empty_text''': False, '''subsections''': []},
],
},
],
}
],
}
a__ = '''\
---
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
a__ = (
'''The following issues were found for the README at `{path}`:\n-\tEmpty YAML markers are present in the README.'''
)
a__ = '''\
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
a__ = (
'''The following issues were found for the README at `{path}`:\n-\tNo YAML markers are present in the README.'''
)
a__ = '''\
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
a__ = '''The following issues were found for the README at `{path}`:\n-\tOnly the start of YAML tags present in the README.'''
a__ = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
a__ = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Summary` but it is empty.\n-\tExpected some text in section `Dataset Summary` but it is empty (text in subsections are ignored).'''
a__ = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
'''
a__ = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Card for My Dataset` but it is empty.\n-\tSection `Dataset Card for My Dataset` expected the following subsections: `Table of Contents`, `Dataset Description`. Found \'None\'.'''
a__ = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Languages
Language Text
'''
a__ = '''The following issues were found for the README at `{path}`:\n-\tSection `Dataset Description` is missing subsection: `Supported Tasks and Leaderboards`.'''
a__ = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
'''
a__ = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Languages` but it is empty.'''
a__ = '''\
---
language:
- zh
- en
---
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
a__ = '''The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.'''
a__ = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
# Dataset Card My Dataset
'''
a__ = '''The following issues were found for the README at `{path}`:\n-\tThe README has several first-level headings: `Dataset Card for My Dataset`, `Dataset Card My Dataset`. Only one heading is expected. Skipping further validation for this README.'''
a__ = '''\
---
language:
- zh
- en
---
# Dataset Card My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
a__ = '''The following issues were found for the README at `{path}`:\n-\tNo first-level heading starting with `Dataset Card for` found in README. Skipping further validation for this README.'''
a__ = ''''''
a__ = '''The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.\n-\tNo YAML markers are present in the README.'''
a__ = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
a__ = '''The following issues were found while parsing the README at `{path}`:\n-\tMultiple sections with the same heading `Dataset Card for My Dataset` have been found. Please keep only one of these sections.'''
@pytest.mark.parametrize(
'''readme_md, expected_dict''' ,[
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] ,)
def __UpperCAmelCase ( __a : Union[str, Any] ,__a : List[str] ) -> Optional[int]:
"""simple docstring"""
assert ReadMe.from_string(__a ,__a ).to_dict() == expected_dict
@pytest.mark.parametrize(
'''readme_md, expected_error''' ,[
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] ,)
def __UpperCAmelCase ( __a : List[str] ,__a : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
with pytest.raises(__a ,match=re.escape(expected_error.format(path='''root''' ) ) ):
_a : List[Any] = ReadMe.from_string(__a ,__a )
readme.validate()
@pytest.mark.parametrize(
'''readme_md, expected_error''' ,[
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] ,)
def __UpperCAmelCase ( __a : Dict ,__a : Dict ) -> Tuple:
"""simple docstring"""
with pytest.raises(__a ,match=re.escape(expected_error.format(path='''root''' ) ) ):
ReadMe.from_string(__a ,__a )
@pytest.mark.parametrize(
'''readme_md,''' ,[
(README_MULTIPLE_SAME_HEADING_1),
] ,)
def __UpperCAmelCase ( __a : Optional[Any] ) -> Tuple:
"""simple docstring"""
ReadMe.from_string(__a ,__a ,suppress_parsing_errors=__a )
@pytest.mark.parametrize(
'''readme_md, expected_dict''' ,[
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] ,)
def __UpperCAmelCase ( __a : Union[str, Any] ,__a : Any ) -> Optional[int]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
_a : Tuple = Path(__a ) / '''README.md'''
with open(__a ,'''w+''' ) as readme_file:
readme_file.write(__a )
_a : Optional[Any] = ReadMe.from_readme(__a ,__a ).to_dict()
assert out["name"] == path
assert out["text"] == ""
assert out["is_empty_text"]
assert out["subsections"] == expected_dict["subsections"]
@pytest.mark.parametrize(
'''readme_md, expected_error''' ,[
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] ,)
def __UpperCAmelCase ( __a : List[Any] ,__a : List[Any] ) -> int:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
_a : int = Path(__a ) / '''README.md'''
with open(__a ,'''w+''' ) as readme_file:
readme_file.write(__a )
_a : Optional[int] = expected_error.format(path=__a )
with pytest.raises(__a ,match=re.escape(__a ) ):
_a : Any = ReadMe.from_readme(__a ,__a )
readme.validate()
@pytest.mark.parametrize(
'''readme_md, expected_error''' ,[
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] ,)
def __UpperCAmelCase ( __a : str ,__a : Union[str, Any] ) -> Dict:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
_a : Optional[Any] = Path(__a ) / '''README.md'''
with open(__a ,'''w+''' ) as readme_file:
readme_file.write(__a )
_a : str = expected_error.format(path=__a )
with pytest.raises(__a ,match=re.escape(__a ) ):
ReadMe.from_readme(__a ,__a )
@pytest.mark.parametrize(
'''readme_md,''' ,[
(README_MULTIPLE_SAME_HEADING_1),
] ,)
def __UpperCAmelCase ( __a : Optional[Any] ) -> str:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
_a : int = Path(__a ) / '''README.md'''
with open(__a ,'''w+''' ) as readme_file:
readme_file.write(__a )
ReadMe.from_readme(__a ,__a ,suppress_parsing_errors=__a )
| 15
| 1
|
'''simple docstring'''
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class __magic_name__ ( _UpperCamelCase , unittest.TestCase ):
lowerCAmelCase : Any = RoCBertTokenizer
lowerCAmelCase : List[str] = None
lowerCAmelCase : Dict = False
lowerCAmelCase : Optional[int] = True
lowerCAmelCase : int = filter_non_english
def __lowercase ( self : Tuple ):
super().setUp()
_a : Union[str, Any] = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', '你', '好', '是', '谁', 'a', 'b', 'c', 'd']
_a : Optional[Any] = {}
_a : List[str] = {}
for i, value in enumerate(_UpperCAmelCase ):
_a : Optional[Any] = i
_a : Optional[int] = i
_a : int = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] )
_a : Union[str, Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['word_shape_file'] )
_a : Any = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['word_pronunciation_file'] )
with open(self.vocab_file ,'w' ,encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
with open(self.word_shape_file ,'w' ,encoding='utf-8' ) as word_shape_writer:
json.dump(_UpperCAmelCase ,_UpperCAmelCase ,ensure_ascii=_UpperCAmelCase )
with open(self.word_pronunciation_file ,'w' ,encoding='utf-8' ) as word_pronunciation_writer:
json.dump(_UpperCAmelCase ,_UpperCAmelCase ,ensure_ascii=_UpperCAmelCase )
def __lowercase ( self : Tuple ):
_a : int = self.tokenizer_class(self.vocab_file ,self.word_shape_file ,self.word_pronunciation_file )
_a : Optional[int] = tokenizer.tokenize('你好[SEP]你是谁' )
self.assertListEqual(_UpperCAmelCase ,['你', '好', '[SEP]', '你', '是', '谁'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) ,[5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(_UpperCAmelCase ) ,[5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(_UpperCAmelCase ) ,[5, 6, 2, 5, 7, 8] )
def __lowercase ( self : Dict ):
_a : int = RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) ,['ah', '\u535A', '\u63A8', 'zz'] )
def __lowercase ( self : Union[str, Any] ):
_a : Optional[Any] = RoCBertBasicTokenizer(do_lower_case=_UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) ,['hello', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) ,['hello'] )
def __lowercase ( self : Union[str, Any] ):
_a : List[str] = RoCBertBasicTokenizer(do_lower_case=_UpperCAmelCase ,strip_accents=_UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) ,['hällo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) ,['h\u00E9llo'] )
def __lowercase ( self : List[str] ):
_a : Dict = RoCBertBasicTokenizer(do_lower_case=_UpperCAmelCase ,strip_accents=_UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) ,['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) ,['hello'] )
def __lowercase ( self : str ):
_a : Dict = RoCBertBasicTokenizer(do_lower_case=_UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) ,['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) ,['hello'] )
def __lowercase ( self : Optional[Any] ):
_a : Optional[Any] = RoCBertBasicTokenizer(do_lower_case=_UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) ,['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def __lowercase ( self : Optional[Any] ):
_a : List[str] = RoCBertBasicTokenizer(do_lower_case=_UpperCAmelCase ,strip_accents=_UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) ,['HäLLo', '!', 'how', 'Are', 'yoU', '?'] )
def __lowercase ( self : Optional[int] ):
_a : Optional[Any] = RoCBertBasicTokenizer(do_lower_case=_UpperCAmelCase ,strip_accents=_UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) ,['HaLLo', '!', 'how', 'Are', 'yoU', '?'] )
def __lowercase ( self : Any ):
_a : Optional[Any] = RoCBertBasicTokenizer(do_lower_case=_UpperCAmelCase ,never_split=['[UNK]'] )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) ,['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] )
def __lowercase ( self : Dict ):
_a : int = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing']
_a : Optional[int] = {}
for i, token in enumerate(_UpperCAmelCase ):
_a : List[str] = i
_a : Tuple = RoCBertWordpieceTokenizer(vocab=_UpperCAmelCase ,unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) ,[] )
self.assertListEqual(tokenizer.tokenize('unwanted running' ) ,['un', '##want', '##ed', 'runn', '##ing'] )
self.assertListEqual(tokenizer.tokenize('unwantedX running' ) ,['[UNK]', 'runn', '##ing'] )
def __lowercase ( self : Tuple ):
self.assertTrue(_is_whitespace(' ' ) )
self.assertTrue(_is_whitespace('\t' ) )
self.assertTrue(_is_whitespace('\r' ) )
self.assertTrue(_is_whitespace('\n' ) )
self.assertTrue(_is_whitespace('\u00A0' ) )
self.assertFalse(_is_whitespace('A' ) )
self.assertFalse(_is_whitespace('-' ) )
def __lowercase ( self : List[str] ):
self.assertTrue(_is_control('\u0005' ) )
self.assertFalse(_is_control('A' ) )
self.assertFalse(_is_control(' ' ) )
self.assertFalse(_is_control('\t' ) )
self.assertFalse(_is_control('\r' ) )
def __lowercase ( self : Optional[int] ):
self.assertTrue(_is_punctuation('-' ) )
self.assertTrue(_is_punctuation('$' ) )
self.assertTrue(_is_punctuation('`' ) )
self.assertTrue(_is_punctuation('.' ) )
self.assertFalse(_is_punctuation('A' ) )
self.assertFalse(_is_punctuation(' ' ) )
def __lowercase ( self : Optional[int] ):
_a : List[str] = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(_UpperCAmelCase ) for t in ['Test', '\xad', 'test']] ,[['[UNK]'], [], ['[UNK]']] )
if self.test_rust_tokenizer:
_a : List[Any] = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(_UpperCAmelCase ) for t in ['Test', '\xad', 'test']] ,[['[UNK]'], [], ['[UNK]']] )
def __lowercase ( self : int ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_a : Any = self.rust_tokenizer_class.from_pretrained(_UpperCAmelCase ,**_UpperCAmelCase )
_a : Optional[Any] = F"""A, naïve {tokenizer_r.mask_token} AllenNLP sentence."""
_a : str = tokenizer_r.encode_plus(
_UpperCAmelCase ,return_attention_mask=_UpperCAmelCase ,return_token_type_ids=_UpperCAmelCase ,return_offsets_mapping=_UpperCAmelCase ,add_special_tokens=_UpperCAmelCase ,)
_a : int = tokenizer_r.do_lower_case if hasattr(_UpperCAmelCase ,'do_lower_case' ) else False
_a : Tuple = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), 'A'),
((1, 2), ','),
((3, 5), 'na'),
((5, 6), '##ï'),
((6, 8), '##ve'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'Allen'),
((21, 23), '##NL'),
((23, 24), '##P'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), 'a'),
((1, 2), ','),
((3, 8), 'naive'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'allen'),
((21, 23), '##nl'),
((23, 24), '##p'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] ,tokenizer_r.convert_ids_to_tokens(tokens['input_ids'] ) )
self.assertEqual([e[0] for e in expected_results] ,tokens['offset_mapping'] )
def __lowercase ( self : Dict ):
_a : Tuple = ['的', '人', '有']
_a : List[Any] = ''.join(_UpperCAmelCase )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_a : Dict = True
_a : Optional[int] = self.tokenizer_class.from_pretrained(_UpperCAmelCase ,**_UpperCAmelCase )
_a : List[Any] = self.rust_tokenizer_class.from_pretrained(_UpperCAmelCase ,**_UpperCAmelCase )
_a : Tuple = tokenizer_p.encode(_UpperCAmelCase ,add_special_tokens=_UpperCAmelCase )
_a : Optional[Any] = tokenizer_r.encode(_UpperCAmelCase ,add_special_tokens=_UpperCAmelCase )
_a : List[Any] = tokenizer_r.convert_ids_to_tokens(_UpperCAmelCase )
_a : Optional[int] = tokenizer_p.convert_ids_to_tokens(_UpperCAmelCase )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(_UpperCAmelCase ,_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase ,_UpperCAmelCase )
_a : int = False
_a : Optional[Any] = self.rust_tokenizer_class.from_pretrained(_UpperCAmelCase ,**_UpperCAmelCase )
_a : Dict = self.tokenizer_class.from_pretrained(_UpperCAmelCase ,**_UpperCAmelCase )
_a : Any = tokenizer_r.encode(_UpperCAmelCase ,add_special_tokens=_UpperCAmelCase )
_a : str = tokenizer_p.encode(_UpperCAmelCase ,add_special_tokens=_UpperCAmelCase )
_a : Optional[Any] = tokenizer_r.convert_ids_to_tokens(_UpperCAmelCase )
_a : str = tokenizer_p.convert_ids_to_tokens(_UpperCAmelCase )
# it is expected that only the first Chinese character is not preceded by "##".
_a : int = [
F"""##{token}""" if idx != 0 else token for idx, token in enumerate(_UpperCAmelCase )
]
self.assertListEqual(_UpperCAmelCase ,_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase ,_UpperCAmelCase )
@slow
def __lowercase ( self : str ):
_a : int = self.tokenizer_class(self.vocab_file ,self.word_shape_file ,self.word_pronunciation_file )
_a : Optional[int] = tokenizer.encode('你好' ,add_special_tokens=_UpperCAmelCase )
_a : Any = tokenizer.encode('你是谁' ,add_special_tokens=_UpperCAmelCase )
_a : Tuple = tokenizer.build_inputs_with_special_tokens(_UpperCAmelCase )
_a : str = tokenizer.build_inputs_with_special_tokens(_UpperCAmelCase ,_UpperCAmelCase )
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def __lowercase ( self : List[Any] ):
_a : int = self.get_tokenizers(do_lower_case=_UpperCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
_a : Optional[Any] = '你好,你是谁'
_a : Optional[int] = tokenizer.tokenize(_UpperCAmelCase )
_a : int = tokenizer.convert_tokens_to_ids(_UpperCAmelCase )
_a : List[str] = tokenizer.convert_tokens_to_shape_ids(_UpperCAmelCase )
_a : List[str] = tokenizer.convert_tokens_to_pronunciation_ids(_UpperCAmelCase )
_a : List[str] = tokenizer.prepare_for_model(
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,add_special_tokens=_UpperCAmelCase )
_a : Dict = tokenizer.encode_plus(_UpperCAmelCase ,add_special_tokens=_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase ,_UpperCAmelCase )
| 89
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional
from packaging import version
if TYPE_CHECKING:
from ... import PreTrainedTokenizer, TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import is_torch_available, logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'''bigscience/bloom''': '''https://huggingface.co/bigscience/bloom/resolve/main/config.json''',
'''bigscience/bloom-560m''': '''https://huggingface.co/bigscience/bloom-560m/blob/main/config.json''',
'''bigscience/bloom-1b1''': '''https://huggingface.co/bigscience/bloom-1b1/blob/main/config.json''',
'''bigscience/bloom-1b7''': '''https://huggingface.co/bigscience/bloom-1b7/blob/main/config.json''',
'''bigscience/bloom-3b''': '''https://huggingface.co/bigscience/bloom-3b/blob/main/config.json''',
'''bigscience/bloom-7b1''': '''https://huggingface.co/bigscience/bloom-7b1/blob/main/config.json''',
}
class lowerCAmelCase_ ( a__ ):
UpperCAmelCase__ : Tuple = "bloom"
UpperCAmelCase__ : List[Any] = ["past_key_values"]
UpperCAmelCase__ : str = {
"num_hidden_layers": "n_layer",
"num_attention_heads": "n_head",
}
def __init__( self, SCREAMING_SNAKE_CASE_=25_0880, SCREAMING_SNAKE_CASE_=64, SCREAMING_SNAKE_CASE_=2, SCREAMING_SNAKE_CASE_=8, SCREAMING_SNAKE_CASE_=1e-5, SCREAMING_SNAKE_CASE_=0.02, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=1, SCREAMING_SNAKE_CASE_=2, SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_=0.0, SCREAMING_SNAKE_CASE_=0.0, SCREAMING_SNAKE_CASE_=1, SCREAMING_SNAKE_CASE_=False, **SCREAMING_SNAKE_CASE_, ) -> Tuple:
UpperCamelCase : str = vocab_size
# Backward compatibility with n_embed kwarg
UpperCamelCase : Optional[Any] = kwargs.pop('n_embed', SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = hidden_size if n_embed is None else n_embed
UpperCamelCase : Tuple = n_layer
UpperCamelCase : Dict = n_head
UpperCamelCase : List[Any] = layer_norm_epsilon
UpperCamelCase : Optional[int] = initializer_range
UpperCamelCase : int = use_cache
UpperCamelCase : int = pretraining_tp
UpperCamelCase : Optional[int] = apply_residual_connection_post_layernorm
UpperCamelCase : str = hidden_dropout
UpperCamelCase : str = attention_dropout
UpperCamelCase : List[Any] = bos_token_id
UpperCamelCase : Tuple = eos_token_id
UpperCamelCase : Union[str, Any] = slow_but_exact
super().__init__(bos_token_id=SCREAMING_SNAKE_CASE_, eos_token_id=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
class lowerCAmelCase_ ( a__ ):
UpperCAmelCase__ : Any = version.parse("1.12" )
def __init__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = "default", SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = False, ) -> Any:
super().__init__(SCREAMING_SNAKE_CASE_, task=SCREAMING_SNAKE_CASE_, patching_specs=SCREAMING_SNAKE_CASE_, use_past=SCREAMING_SNAKE_CASE_ )
if not getattr(self._config, 'pad_token_id', SCREAMING_SNAKE_CASE_ ):
# TODO: how to do that better?
UpperCamelCase : Tuple = 0
@property
def snake_case_ ( self ) -> Mapping[str, Mapping[int, str]]:
UpperCamelCase : str = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} )
if self.use_past:
# BLOOM stores values on dynamic axis 2. For more details see: https://github.com/huggingface/transformers/pull/18344
self.fill_with_past_key_values_(SCREAMING_SNAKE_CASE_, direction='inputs', inverted_values_shape=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = {0: 'batch', 1: 'past_sequence + sequence'}
else:
UpperCamelCase : Optional[int] = {0: 'batch', 1: 'sequence'}
return common_inputs
@property
def snake_case_ ( self ) -> int:
return self._config.n_layer
@property
def snake_case_ ( self ) -> int:
return self._config.n_head
@property
def snake_case_ ( self ) -> float:
return 1e-3
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = -1, SCREAMING_SNAKE_CASE_ = -1, SCREAMING_SNAKE_CASE_ = False, SCREAMING_SNAKE_CASE_ = None, ) -> Mapping[str, Any]:
UpperCamelCase : Dict = super(SCREAMING_SNAKE_CASE_, self ).generate_dummy_inputs(
SCREAMING_SNAKE_CASE_, batch_size=SCREAMING_SNAKE_CASE_, seq_length=SCREAMING_SNAKE_CASE_, is_pair=SCREAMING_SNAKE_CASE_, framework=SCREAMING_SNAKE_CASE_ )
# We need to order the input in the way they appears in the forward()
UpperCamelCase : Any = OrderedDict({'input_ids': common_inputs['input_ids']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
UpperCamelCase , UpperCamelCase : int = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
UpperCamelCase : Any = seqlen + 2
UpperCamelCase : Optional[int] = self._config.hidden_size // self.num_attention_heads
UpperCamelCase : Any = (
batch * self.num_attention_heads,
head_dim,
past_key_values_length,
)
UpperCamelCase : Optional[Any] = (
batch * self.num_attention_heads,
past_key_values_length,
head_dim,
)
UpperCamelCase : List[str] = [
(torch.zeros(SCREAMING_SNAKE_CASE_ ), torch.zeros(SCREAMING_SNAKE_CASE_ )) for _ in range(self.num_layers )
]
UpperCamelCase : str = common_inputs['attention_mask']
if self.use_past:
UpperCamelCase : int = ordered_inputs['attention_mask'].dtype
UpperCamelCase : List[Any] = torch.cat(
[ordered_inputs['attention_mask'], torch.ones(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, dtype=SCREAMING_SNAKE_CASE_ )], dim=1 )
return ordered_inputs
@property
def snake_case_ ( self ) -> int:
return 13
| 119
| 0
|
'''simple docstring'''
import json
import os
from datetime import date
from pathlib import Path
from tabulate import DataRow, TableFormat, tabulate
lowerCAmelCase : Tuple = TableFormat(
lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow('', '|', '|'),
datarow=DataRow('', '|', '|'),
padding=1,
with_header_hide=None,
)
lowerCAmelCase : int = []
lowerCAmelCase : Any = []
lowerCAmelCase : List[Any] = {'type': 'section', 'text': {'type': 'plain_text', 'text': 'No failed tests! 🤗', 'emoji': True}}
lowerCAmelCase : Any = [
{
'type': 'header',
'text': {
'type': 'plain_text',
'text': f"""🤗 Accelerate nightly {os.environ.get('TEST_TYPE', '')} test results""",
'emoji': True,
},
}
]
lowerCAmelCase : List[str] = 0
for log in Path().glob('*.log'):
lowerCAmelCase : List[str] = 0
with open(log, 'r') as f:
for line in f:
lowerCAmelCase : Dict = json.loads(line)
if line.get('nodeid', '') != "":
lowerCAmelCase : Tuple = line['nodeid']
if line.get('duration', None) is not None:
lowerCAmelCase : List[str] = f"""{line['duration']:.4f}"""
if line.get('outcome', '') == "failed":
section_num_failed += 1
failed.append([test, duration, log.name.split('_')[0]])
total_num_failed += 1
group_info.append([str(log), section_num_failed, failed])
lowerCAmelCase : Optional[Any] = []
log.unlink()
lowerCAmelCase : int = ''
lowerCAmelCase : Tuple = []
if total_num_failed > 0:
for name, num_failed, failed_tests in group_info:
if num_failed > 0:
if num_failed == 1:
message += f"*{name[1:]}: {num_failed} failed test*\n"
else:
message += f"*{name[1:]}: {num_failed} failed tests*\n"
lowerCAmelCase : Any = []
lowerCAmelCase : List[str] = {}
for test in failed_tests:
lowerCAmelCase : int = test[0].split('::')
lowerCAmelCase : Optional[int] = data[0].split('/')[-1]
if data[0] not in filesafailed:
lowerCAmelCase : Tuple = [data[1:]]
else:
filesafailed[data[0]] += [data[1:]]
failed_table.append(data)
lowerCAmelCase : Optional[int] = [test[0] for test in failed_table]
lowerCAmelCase : int = list(set(files))
# Count number of instances in failed_tests
lowerCAmelCase : List[Any] = []
for file in individual_files:
table.append([file, len(filesafailed[file])])
lowerCAmelCase : int = tabulate(
table,
headers=['Test Location', 'Num Failed'],
tablefmt=hf_table_format,
stralign='right',
)
message += f"\n```\n{failed_table}\n```"
all_filesafailed.append(filesafailed)
if len(message) > 30_00:
lowerCAmelCase : Any = 'Too many failed tests, please see the full report in the Action results.'
lowerCAmelCase : int = len(err) + 10
lowerCAmelCase : Tuple = message[: 30_00 - offset] + f"""\n...\n```\n{err}"""
print(f"""### {message}""")
else:
lowerCAmelCase : List[Any] = 'No failed tests! 🤗'
print(f"""## {message}""")
payload.append(no_error_payload)
if os.environ.get('TEST_TYPE', '') != "":
from slack_sdk import WebClient
lowerCAmelCase : List[str] = WebClient(token=os.environ['SLACK_API_TOKEN'])
if message != "No failed tests! 🤗":
lowerCAmelCase : Tuple = {
'type': 'section',
'text': {
'type': 'mrkdwn',
'text': message,
},
}
payload.append(md_report)
lowerCAmelCase : Optional[int] = {
'type': 'section',
'text': {
'type': 'mrkdwn',
'text': '*For more details:*',
},
'accessory': {
'type': 'button',
'text': {
'type': 'plain_text',
'text': 'Check Action results',
'emoji': True,
},
'url': f"""https://github.com/{os.environ['GITHUB_REPOSITORY']}/actions/runs/{os.environ['GITHUB_RUN_ID']}""",
},
}
payload.append(action_button)
lowerCAmelCase : Optional[int] = {
'type': 'context',
'elements': [
{
'type': 'plain_text',
'text': f"""Nightly {os.environ.get('TEST_TYPE')} test results for {date.today()}""",
}
],
}
payload.append(date_report)
lowerCAmelCase : List[Any] = client.chat_postMessage(channel='#accelerate-ci-daily', text=message, blocks=payload)
lowerCAmelCase : str = response.data['ts']
for failed_file in all_filesafailed:
for test_location, test_failures in failed_file.items():
# Keep only the first instance of the test name
lowerCAmelCase : str = ''
for i, row in enumerate(test_failures):
if row[0] != test_class:
lowerCAmelCase : Optional[Any] = row[0]
else:
lowerCAmelCase : int = ''
lowerCAmelCase : int = {
'type': 'section',
'text': {
'type': 'mrkdwn',
'text': f"""Test location: {test_location}\n```\n{tabulate(test_failures, headers=['Class', 'Test'], tablefmt=hf_table_format, stralign='right')}\n```""",
},
}
client.chat_postMessage(
channel='#accelerate-ci-daily',
thread_ts=ts,
blocks=[payload],
)
| 367
|
'''simple docstring'''
from __future__ import annotations
import time
import numpy as np
lowerCAmelCase : List[Any] = [8, 5, 9, 7]
lowerCAmelCase : str = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
lowerCAmelCase : Tuple = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class SCREAMING_SNAKE_CASE__ :
def __init__( self , A_ , A_ , A_ , )-> None:
'''simple docstring'''
UpperCamelCase = claim_vector
UpperCamelCase = allocated_resources_table
UpperCamelCase = maximum_claim_table
def UpperCAmelCase_ ( self )-> list[int]:
'''simple docstring'''
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def UpperCAmelCase_ ( self )-> list[int]:
'''simple docstring'''
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def UpperCAmelCase_ ( self )-> list[list[int]]:
'''simple docstring'''
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(A_ ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def UpperCAmelCase_ ( self )-> dict[int, list[int]]:
'''simple docstring'''
return {self.__need().index(A_ ): i for i in self.__need()}
def UpperCAmelCase_ ( self , **A_ )-> None:
'''simple docstring'''
UpperCamelCase = self.__need()
UpperCamelCase = self.__allocated_resources_table
UpperCamelCase = self.__available_resources()
UpperCamelCase = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print('_' * 50 + '\n' )
while need_list:
UpperCamelCase = False
for each_need in need_list:
UpperCamelCase = True
for index, need in enumerate(A_ ):
if need > available_resources[index]:
UpperCamelCase = False
break
if execution:
UpperCamelCase = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
UpperCamelCase = original_need_index
print(F'''Process {process_number + 1} is executing.''' )
# remove the process run from stack
need_list.remove(A_ )
# update available/freed resources stack
UpperCamelCase = np.array(A_ ) + np.array(
alloc_resources_table[process_number] )
print(
'Updated available resource stack for processes: '
+ ' '.join([str(A_ ) for x in available_resources] ) )
break
if safe:
print('The process is in a safe state.\n' )
else:
print('System in unsafe state. Aborting...\n' )
break
def UpperCAmelCase_ ( self )-> Union[str, Any]:
'''simple docstring'''
print(' ' * 9 + 'Allocated Resource Table' )
for item in self.__allocated_resources_table:
print(
F'''P{self.__allocated_resources_table.index(A_ ) + 1}'''
+ ' '.join(F'''{it:>8}''' for it in item )
+ '\n' )
print(' ' * 9 + 'System Resource Table' )
for item in self.__maximum_claim_table:
print(
F'''P{self.__maximum_claim_table.index(A_ ) + 1}'''
+ ' '.join(F'''{it:>8}''' for it in item )
+ '\n' )
print(
'Current Usage by Active Processes: '
+ ' '.join(str(A_ ) for x in self.__claim_vector ) )
print(
'Initial Available Resources: '
+ ' '.join(str(A_ ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 251
| 0
|
'''simple docstring'''
import unittest
from transformers import AutoTokenizer, FalconConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
)
class SCREAMING_SNAKE_CASE:
"""simple docstring"""
def __init__( self : Optional[Any] , __snake_case : str , __snake_case : Tuple=3 , __snake_case : Dict=7 , __snake_case : List[str]=True , __snake_case : List[Any]=True , __snake_case : Any=False , __snake_case : Dict=True , __snake_case : Optional[Any]=99 , __snake_case : List[Any]=32 , __snake_case : List[Any]=5 , __snake_case : int=4 , __snake_case : Optional[Any]=37 , __snake_case : int="gelu" , __snake_case : Dict=0.1 , __snake_case : Dict=0.1 , __snake_case : Union[str, Any]=512 , __snake_case : Optional[int]=16 , __snake_case : List[Any]=2 , __snake_case : int=0.02 , __snake_case : int=3 , __snake_case : Tuple=4 , __snake_case : Tuple=None , ) -> Optional[int]:
UpperCAmelCase : str = parent
UpperCAmelCase : List[str] = batch_size
UpperCAmelCase : str = seq_length
UpperCAmelCase : Dict = is_training
UpperCAmelCase : Tuple = use_input_mask
UpperCAmelCase : Optional[int] = use_token_type_ids
UpperCAmelCase : List[Any] = use_labels
UpperCAmelCase : Any = vocab_size
UpperCAmelCase : Union[str, Any] = hidden_size
UpperCAmelCase : Any = num_hidden_layers
UpperCAmelCase : int = num_attention_heads
UpperCAmelCase : Tuple = intermediate_size
UpperCAmelCase : Union[str, Any] = hidden_act
UpperCAmelCase : List[str] = hidden_dropout_prob
UpperCAmelCase : List[Any] = attention_probs_dropout_prob
UpperCAmelCase : List[Any] = max_position_embeddings
UpperCAmelCase : Any = type_vocab_size
UpperCAmelCase : str = type_sequence_label_size
UpperCAmelCase : Any = initializer_range
UpperCAmelCase : Any = num_labels
UpperCAmelCase : Optional[Any] = num_choices
UpperCAmelCase : Any = scope
def A ( self : List[Any] ) -> int:
UpperCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase : Optional[Any] = None
if self.use_input_mask:
UpperCAmelCase : Any = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase : Union[str, Any] = None
UpperCAmelCase : Optional[int] = None
UpperCAmelCase : List[str] = None
UpperCAmelCase : Union[str, Any] = None
if self.use_labels:
UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase : str = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase : str = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A ( self : Optional[Any] ) -> List[str]:
return FalconConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__snake_case , initializer_range=self.initializer_range , pad_token_id=1 , new_decoder_architecture=__snake_case , )
def A ( self : str , __snake_case : List[Any] , __snake_case : int , __snake_case : Tuple , __snake_case : int , __snake_case : Union[str, Any] , __snake_case : List[str] , __snake_case : Optional[int] ) -> Any:
UpperCAmelCase : List[Any] = FalconModel(config=__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase : List[str] = model(__snake_case , attention_mask=__snake_case )
UpperCAmelCase : int = model(__snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A ( self : Union[str, Any] , __snake_case : Tuple , __snake_case : List[Any] , __snake_case : Any , __snake_case : int , __snake_case : Dict , __snake_case : Any , __snake_case : Any , __snake_case : Optional[Any] , __snake_case : Optional[int] , ) -> List[str]:
UpperCAmelCase : str = True
UpperCAmelCase : Union[str, Any] = FalconModel(__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase : List[str] = model(
__snake_case , attention_mask=__snake_case , encoder_hidden_states=__snake_case , encoder_attention_mask=__snake_case , )
UpperCAmelCase : int = model(
__snake_case , attention_mask=__snake_case , encoder_hidden_states=__snake_case , )
UpperCAmelCase : Any = model(__snake_case , attention_mask=__snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A ( self : Optional[Any] , __snake_case : str , __snake_case : Union[str, Any] , __snake_case : Tuple , __snake_case : Tuple , __snake_case : Tuple , __snake_case : int , __snake_case : str , __snake_case : str , __snake_case : Optional[int] , ) -> Optional[int]:
UpperCAmelCase : List[Any] = FalconForCausalLM(config=__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase : int = model(__snake_case , attention_mask=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A ( self : Union[str, Any] , __snake_case : Tuple , __snake_case : Tuple , __snake_case : Tuple , __snake_case : List[Any] , __snake_case : Optional[Any] , __snake_case : str , __snake_case : Optional[Any] , __snake_case : str , __snake_case : str , ) -> int:
UpperCAmelCase : List[Any] = True
UpperCAmelCase : Union[str, Any] = True
UpperCAmelCase : Tuple = FalconForCausalLM(config=__snake_case )
model.to(__snake_case )
model.eval()
# first forward pass
UpperCAmelCase : List[str] = model(
__snake_case , attention_mask=__snake_case , encoder_hidden_states=__snake_case , encoder_attention_mask=__snake_case , use_cache=__snake_case , )
UpperCAmelCase : str = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
UpperCAmelCase : str = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCAmelCase : int = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
UpperCAmelCase : Tuple = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCAmelCase : str = torch.cat([input_mask, next_mask] , dim=-1 )
UpperCAmelCase : Dict = model(
__snake_case , attention_mask=__snake_case , encoder_hidden_states=__snake_case , encoder_attention_mask=__snake_case , output_hidden_states=__snake_case , )['''hidden_states'''][0]
UpperCAmelCase : Tuple = model(
__snake_case , attention_mask=__snake_case , encoder_hidden_states=__snake_case , encoder_attention_mask=__snake_case , past_key_values=__snake_case , output_hidden_states=__snake_case , )['''hidden_states'''][0]
# select random slice
UpperCAmelCase : int = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCAmelCase : List[Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCAmelCase : List[Any] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__snake_case , __snake_case , atol=1E-3 ) )
def A ( self : int ) -> Tuple:
UpperCAmelCase : Tuple = self.prepare_config_and_inputs()
(
(
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) ,
) : Optional[Any] = config_and_inputs
UpperCAmelCase : Tuple = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE( A__ , A__ , A__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase__ = (
(
FalconModel,
FalconForCausalLM,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowerCamelCase__ = (FalconForCausalLM,) if is_torch_available() else ()
lowerCamelCase__ = (
{
"""feature-extraction""": FalconModel,
"""text-classification""": FalconForSequenceClassification,
"""text-generation""": FalconForCausalLM,
"""question-answering""": FalconForQuestionAnswering,
"""token-classification""": FalconForTokenClassification,
"""zero-shot""": FalconForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase__ = False
lowerCamelCase__ = False
def A ( self : str ) -> Optional[Any]:
UpperCAmelCase : Dict = FalconModelTester(self )
UpperCAmelCase : Any = ConfigTester(self , config_class=__snake_case , hidden_size=37 )
def A ( self : int ) -> Any:
self.config_tester.run_common_tests()
def A ( self : List[str] ) -> Union[str, Any]:
UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__snake_case )
def A ( self : List[str] ) -> List[str]:
UpperCAmelCase , *UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs()
for alibi in [True, False]:
UpperCAmelCase : Optional[Any] = alibi
self.model_tester.create_and_check_model(__snake_case , *__snake_case )
def A ( self : int ) -> Dict:
UpperCAmelCase , UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : Tuple = 3
UpperCAmelCase : Union[str, Any] = input_dict['''input_ids''']
UpperCAmelCase : Any = input_ids.ne(1 ).to(__snake_case )
UpperCAmelCase : Optional[int] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
UpperCAmelCase : Tuple = FalconForSequenceClassification(__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase : Optional[Any] = model(__snake_case , attention_mask=__snake_case , labels=__snake_case )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def A ( self : Union[str, Any] ) -> List[Any]:
UpperCAmelCase , UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : Union[str, Any] = 3
UpperCAmelCase : Tuple = '''single_label_classification'''
UpperCAmelCase : Union[str, Any] = input_dict['''input_ids''']
UpperCAmelCase : Dict = input_ids.ne(1 ).to(__snake_case )
UpperCAmelCase : int = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
UpperCAmelCase : Tuple = FalconForSequenceClassification(__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase : List[str] = model(__snake_case , attention_mask=__snake_case , labels=__snake_case )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def A ( self : Optional[Any] ) -> Dict:
UpperCAmelCase , UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : List[Any] = input_dict['''input_ids''']
UpperCAmelCase : Tuple = FalconForCausalLM(__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase : Dict = model(__snake_case , use_cache=__snake_case )
UpperCAmelCase : Tuple = input_ids.shape[0]
UpperCAmelCase : Any = model._convert_to_rw_cache(result.past_key_values )
UpperCAmelCase : Any = model._convert_cache_to_standard_format(__snake_case , __snake_case )
for layer in range(len(__snake_case ) ):
for tensor_idx in range(2 ):
self.assertTrue(rw_cache[layer][tensor_idx].ndim == 3 )
self.assertTrue(result.past_key_values[layer][tensor_idx].ndim == 4 )
self.assertTrue(
torch.all(result.past_key_values[layer][tensor_idx] == standard_cache[layer][tensor_idx] ) )
def A ( self : Optional[Any] ) -> Optional[int]:
UpperCAmelCase , UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : Union[str, Any] = 3
UpperCAmelCase : List[Any] = '''multi_label_classification'''
UpperCAmelCase : Tuple = input_dict['''input_ids''']
UpperCAmelCase : List[Any] = input_ids.ne(1 ).to(__snake_case )
UpperCAmelCase : Optional[Any] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
UpperCAmelCase : str = FalconForSequenceClassification(__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase : Dict = model(__snake_case , attention_mask=__snake_case , labels=__snake_case )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def A ( self : List[str] ) -> Tuple:
# Falcon can have different numbers of KV-heads than the number of query heads, so we need
# to override this test to use the right head counts.
for model_class in self.all_generative_model_classes:
UpperCAmelCase , UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
# If it doesn't support cache, pass the test
if not hasattr(__snake_case , '''use_cache''' ):
return
UpperCAmelCase : List[str] = model_class(__snake_case ).to(__snake_case )
if "use_cache" not in inputs:
UpperCAmelCase : Optional[Any] = True
UpperCAmelCase : Optional[int] = model(**__snake_case )
# If "past_key_values" is not returned, pass the test (e.g. RWKV uses a different cache name and format)
if "past_key_values" not in outputs:
return
UpperCAmelCase : List[Any] = (
getattr(__snake_case , '''decoder_layers''' , __snake_case )
or getattr(__snake_case , '''num_decoder_layers''' , __snake_case )
or config.num_hidden_layers
)
UpperCAmelCase : Any = getattr(__snake_case , '''num_kv_heads''' , config.num_attention_heads )
UpperCAmelCase : Optional[Any] = getattr(__snake_case , '''d_model''' , config.hidden_size )
UpperCAmelCase : Union[str, Any] = embed_dim // num_attention_heads
UpperCAmelCase : List[str] = outputs['''past_key_values''']
self.assertEqual(len(__snake_case ) , __snake_case )
UpperCAmelCase , UpperCAmelCase : List[Any] = inputs['''input_ids'''].shape
for i in range(__snake_case ):
if config.new_decoder_architecture:
UpperCAmelCase : Tuple = config.num_attention_heads
elif config.multi_query:
UpperCAmelCase : List[Any] = 1
self.assertEqual(len(past_kv[0] ) , 2 ) # K V for the decoder = 2
self.assertEqual(
past_kv[i][0].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
self.assertEqual(
past_kv[i][1].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
@require_torch
class SCREAMING_SNAKE_CASE( unittest.TestCase ):
"""simple docstring"""
@slow
def A ( self : Any ) -> Tuple:
UpperCAmelCase : Dict = AutoTokenizer.from_pretrained('''Rocketknight1/falcon-rw-1b''' )
UpperCAmelCase : List[str] = FalconForCausalLM.from_pretrained('''Rocketknight1/falcon-rw-1b''' )
model.eval()
model.to(__snake_case )
UpperCAmelCase : int = tokenizer('''My favorite food is''' , return_tensors='''pt''' ).to(__snake_case )
UpperCAmelCase : List[Any] = (
'''My favorite food is pizza. I love it so much that I have a pizza party every year for my birthday.'''
)
UpperCAmelCase : List[str] = model.generate(**__snake_case , do_sample=__snake_case , max_new_tokens=19 )
UpperCAmelCase : str = tokenizer.batch_decode(__snake_case )[0]
self.assertEqual(__snake_case , __snake_case )
@slow
def A ( self : Tuple ) -> List[Any]:
# The big models are way too big for the CI, so we use tiny random models that resemble their
# architectures but with much smaller and fewer layers
for repo in ["Rocketknight1/tiny-random-falcon-7b", "Rocketknight1/tiny-random-falcon-40b"]:
UpperCAmelCase : Tuple = AutoTokenizer.from_pretrained(__snake_case )
UpperCAmelCase : List[Any] = FalconForCausalLM.from_pretrained(__snake_case )
model.eval()
model.to(__snake_case )
UpperCAmelCase : List[str] = tokenizer('''My favorite food is''' , return_tensors='''pt''' ).to(__snake_case )
# We just test that these run without errors - the models are randomly initialized
# and so the actual text outputs will be garbage
model.generate(**__snake_case , do_sample=__snake_case , max_new_tokens=4 )
model.generate(**__snake_case , do_sample=__snake_case , max_new_tokens=4 )
model.generate(**__snake_case , num_beams=2 , max_new_tokens=4 )
@slow
def A ( self : str ) -> Optional[int]:
# The big models are way too big for the CI, so we use tiny random models that resemble their
# architectures but with much smaller and fewer layers
with torch.no_grad():
for repo in [
"Rocketknight1/falcon-rw-1b",
"Rocketknight1/tiny-random-falcon-7b",
"Rocketknight1/tiny-random-falcon-40b",
]:
UpperCAmelCase : List[str] = AutoTokenizer.from_pretrained(__snake_case )
UpperCAmelCase : Union[str, Any] = FalconForCausalLM.from_pretrained(__snake_case )
model.eval()
model.to(device=__snake_case )
UpperCAmelCase : int = tokenizer('''My favorite food is''' , return_tensors='''pt''' ).to(__snake_case )
# Test results are the same with and without cache
UpperCAmelCase : Dict = model.generate(**__snake_case , do_sample=__snake_case , max_new_tokens=20 , use_cache=__snake_case )
UpperCAmelCase : Tuple = model.generate(**__snake_case , do_sample=__snake_case , max_new_tokens=20 , use_cache=__snake_case )
self.assertTrue((outputs_cache - outputs_no_cache).sum().item() == 0 )
| 23
|
import math
def __lowercase ( __lowerCAmelCase : int ):
a__ = [True] * n
a__ = False
a__ = False
a__ = True
for i in range(3 , int(n**0.5 + 1 ) , 2 ):
a__ = i * 2
while index < n:
a__ = False
a__ = index + i
a__ = [2]
for i in range(3 , __lowerCAmelCase , 2 ):
if is_prime[i]:
primes.append(__lowerCAmelCase )
return primes
def __lowercase ( __lowerCAmelCase : int = 9_9_9_9_6_6_6_6_3_3_3_3 ):
a__ = math.floor(math.sqrt(__lowerCAmelCase ) ) + 1_0_0
a__ = prime_sieve(__lowerCAmelCase )
a__ = 0
a__ = 0
a__ = primes[prime_index]
while (last_prime**2) <= limit:
a__ = primes[prime_index + 1]
a__ = last_prime**2
a__ = next_prime**2
# Get numbers divisible by lps(current)
a__ = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
a__ = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
a__ = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
a__ = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 240
| 0
|
'''simple docstring'''
from manim import *
class a__( lowerCamelCase__ ):
def lowercase_ ( self : str ):
a : Dict = Rectangle(height=0.5 , width=0.5 )
a : Union[str, Any] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
a : Optional[Any] = [mem.copy() for i in range(6 )]
a : Dict = [mem.copy() for i in range(6 )]
a : List[Any] = VGroup(*__snake_case ).arrange(__snake_case , buff=0 )
a : int = VGroup(*__snake_case ).arrange(__snake_case , buff=0 )
a : Union[str, Any] = VGroup(__snake_case , __snake_case ).arrange(__snake_case , buff=0 )
a : Any = Text('CPU' , font_size=24 )
a : int = Group(__snake_case , __snake_case ).arrange(__snake_case , buff=0.5 , aligned_edge=__snake_case )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__snake_case )
a : int = [mem.copy() for i in range(4 )]
a : str = VGroup(*__snake_case ).arrange(__snake_case , buff=0 )
a : List[Any] = Text('GPU' , font_size=24 )
a : Optional[int] = Group(__snake_case , __snake_case ).arrange(__snake_case , buff=0.5 , aligned_edge=__snake_case )
gpu.move_to([-1, -1, 0] )
self.add(__snake_case )
a : List[str] = [mem.copy() for i in range(6 )]
a : List[str] = VGroup(*__snake_case ).arrange(__snake_case , buff=0 )
a : int = Text('Model' , font_size=24 )
a : Dict = Group(__snake_case , __snake_case ).arrange(__snake_case , buff=0.5 , aligned_edge=__snake_case )
model.move_to([3, -1.0, 0] )
self.add(__snake_case )
a : Optional[Any] = []
for i, rect in enumerate(__snake_case ):
rect.set_stroke(__snake_case )
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
a : Optional[int] = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(__snake_case , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=__snake_case )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=__snake_case , buff=0.0 )
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=__snake_case , buff=0.0 )
self.add(__snake_case )
cpu_targs.append(__snake_case )
a : List[Any] = [mem.copy() for i in range(6 )]
a : Optional[int] = VGroup(*__snake_case ).arrange(__snake_case , buff=0 )
a : Tuple = Text('Loaded Checkpoint' , font_size=24 )
a : str = Group(__snake_case , __snake_case ).arrange(__snake_case , aligned_edge=__snake_case , buff=0.4 )
checkpoint.move_to([3, 0.5, 0] )
a : int = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
a : Union[str, Any] = MarkupText(
F"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(__snake_case , __snake_case )
a : Tuple = MarkupText(
F"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=18 , )
blue_text.next_to(__snake_case , DOWN * 2.4 , aligned_edge=key_text.get_left() )
a : Optional[int] = MarkupText(
F"""Next, a <i><span fgcolor=\"{BLUE}\">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor=\"{BLUE}\">single shard</span>.""" , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(__snake_case ) , Write(__snake_case ) )
self.play(Write(__snake_case , run_time=1 ) , Create(__snake_case , run_time=1 ) )
a : List[str] = []
a : Dict = []
for i, rect in enumerate(__snake_case ):
a : Optional[int] = fill.copy().set_fill(__snake_case , opacity=0.7 )
target.move_to(__snake_case )
first_animations.append(GrowFromCenter(__snake_case , run_time=1 ) )
a : List[Any] = target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5] )
second_animations.append(MoveToTarget(__snake_case , run_time=1.5 ) )
self.play(*__snake_case )
self.play(*__snake_case )
self.wait()
| 367
|
'''simple docstring'''
import torch
from transformers import AutoModel
class a__( torch.nn.Module ):
def __init__( self : Any , __snake_case : str="sayef/fsner-bert-base-uncased" ):
super(__snake_case , self ).__init__()
a : List[str] = AutoModel.from_pretrained(__snake_case , return_dict=__snake_case )
a : Optional[Any] = torch.nn.CosineSimilarity(3 , 1e-0_8 )
a : Tuple = torch.nn.Softmax(dim=1 )
def lowercase_ ( self : List[str] , **__snake_case : int ):
return self.bert(**__snake_case ).last_hidden_state
def lowercase_ ( self : Optional[int] , __snake_case : Union[str, Any] ):
return token_embeddings.sum(2 , keepdim=__snake_case )
def lowercase_ ( self : str , __snake_case : Tuple , __snake_case : List[Any] , __snake_case : Optional[Any]=1 ):
return self.softmax(T * self.cos(__snake_case , __snake_case ) )
def lowercase_ ( self : int , __snake_case : int , __snake_case : Tuple ):
a : List[Any] = W_supports['sizes'].tolist()
a : Any = W_supports['start_token_id'].item()
a : int = W_supports['end_token_id'].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
a : Optional[Any] = self.BERT(**__snake_case )
a : Tuple = self.BERT(**__snake_case )
a : Dict = None
a : Optional[Any] = None
a : Union[str, Any] = W_supports['input_ids'] == start_token_id
a : str = W_supports['input_ids'] == end_token_id
for i, size in enumerate(__snake_case ):
if i == 0:
a : Optional[int] = 0
else:
a : Tuple = support_sizes[i - 1]
a : Tuple = S[s : s + size][start_token_masks[s : s + size]]
a : int = S[s : s + size][end_token_masks[s : s + size]]
a : Any = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
a : List[str] = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
a : List[Any] = torch.vstack((p_starts, p_start) )
a : List[Any] = torch.vstack((p_ends, p_end) )
else:
a : List[Any] = p_start
a : Optional[int] = p_end
return p_starts, p_ends
| 96
| 0
|
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase : List[Any] = logging.get_logger(__name__)
def A_ ( _UpperCAmelCase , _UpperCAmelCase=False ):
SCREAMING_SNAKE_CASE_: str = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"blocks.{i}.norm1.weight", f"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((f"blocks.{i}.norm1.bias", f"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append((f"blocks.{i}.attn.proj.weight", f"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((f"blocks.{i}.attn.proj.bias", f"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((f"blocks.{i}.norm2.weight", f"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((f"blocks.{i}.norm2.bias", f"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((f"blocks.{i}.mlp.fc1.weight", f"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((f"blocks.{i}.mlp.fc1.bias", f"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((f"blocks.{i}.mlp.fc2.weight", f"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((f"blocks.{i}.mlp.fc2.bias", f"vit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
("cls_token", "vit.embeddings.cls_token"),
("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"),
("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"),
("pos_embed", "vit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
("pre_logits.fc.weight", "pooler.dense.weight"),
("pre_logits.fc.bias", "pooler.dense.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
SCREAMING_SNAKE_CASE_: Dict = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False ):
for i in range(config.num_hidden_layers ):
if base_model:
SCREAMING_SNAKE_CASE_: Dict = ""
else:
SCREAMING_SNAKE_CASE_: Any = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
SCREAMING_SNAKE_CASE_: str = state_dict.pop(f"blocks.{i}.attn.qkv.weight" )
SCREAMING_SNAKE_CASE_: Tuple = state_dict.pop(f"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE_: Dict = in_proj_weight[
: config.hidden_size, :
]
SCREAMING_SNAKE_CASE_: List[str] = in_proj_bias[: config.hidden_size]
SCREAMING_SNAKE_CASE_: Dict = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
SCREAMING_SNAKE_CASE_: Dict = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
SCREAMING_SNAKE_CASE_: Tuple = in_proj_weight[
-config.hidden_size :, :
]
SCREAMING_SNAKE_CASE_: List[Any] = in_proj_bias[-config.hidden_size :]
def A_ ( _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: List[str] = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(_UpperCAmelCase , _UpperCAmelCase )
def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: List[str] = dct.pop(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: int = val
def A_ ( ):
SCREAMING_SNAKE_CASE_: Union[str, Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
SCREAMING_SNAKE_CASE_: Dict = Image.open(requests.get(_UpperCAmelCase , stream=_UpperCAmelCase ).raw )
return im
@torch.no_grad()
def A_ ( _UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Optional[Any] = ViTConfig()
SCREAMING_SNAKE_CASE_: int = False
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
if vit_name[-5:] == "in21k":
SCREAMING_SNAKE_CASE_: str = True
SCREAMING_SNAKE_CASE_: Tuple = int(vit_name[-12:-10] )
SCREAMING_SNAKE_CASE_: List[str] = int(vit_name[-9:-6] )
else:
SCREAMING_SNAKE_CASE_: Optional[int] = 10_00
SCREAMING_SNAKE_CASE_: str = "huggingface/label-files"
SCREAMING_SNAKE_CASE_: Optional[Any] = "imagenet-1k-id2label.json"
SCREAMING_SNAKE_CASE_: Tuple = json.load(open(hf_hub_download(_UpperCAmelCase , _UpperCAmelCase , repo_type="dataset" ) , "r" ) )
SCREAMING_SNAKE_CASE_: Optional[Any] = {int(_UpperCAmelCase ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE_: Any = idalabel
SCREAMING_SNAKE_CASE_: Any = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE_: Union[str, Any] = int(vit_name[-6:-4] )
SCREAMING_SNAKE_CASE_: Optional[int] = int(vit_name[-3:] )
# size of the architecture
if "deit" in vit_name:
if vit_name[9:].startswith("tiny" ):
SCREAMING_SNAKE_CASE_: Any = 1_92
SCREAMING_SNAKE_CASE_: Any = 7_68
SCREAMING_SNAKE_CASE_: int = 12
SCREAMING_SNAKE_CASE_: List[Any] = 3
elif vit_name[9:].startswith("small" ):
SCREAMING_SNAKE_CASE_: Union[str, Any] = 3_84
SCREAMING_SNAKE_CASE_: Any = 15_36
SCREAMING_SNAKE_CASE_: List[str] = 12
SCREAMING_SNAKE_CASE_: Union[str, Any] = 6
else:
pass
else:
if vit_name[4:].startswith("small" ):
SCREAMING_SNAKE_CASE_: Union[str, Any] = 7_68
SCREAMING_SNAKE_CASE_: List[Any] = 23_04
SCREAMING_SNAKE_CASE_: Union[str, Any] = 8
SCREAMING_SNAKE_CASE_: str = 8
elif vit_name[4:].startswith("base" ):
pass
elif vit_name[4:].startswith("large" ):
SCREAMING_SNAKE_CASE_: Optional[int] = 10_24
SCREAMING_SNAKE_CASE_: List[str] = 40_96
SCREAMING_SNAKE_CASE_: List[Any] = 24
SCREAMING_SNAKE_CASE_: Optional[Any] = 16
elif vit_name[4:].startswith("huge" ):
SCREAMING_SNAKE_CASE_: Optional[Any] = 12_80
SCREAMING_SNAKE_CASE_: List[Any] = 51_20
SCREAMING_SNAKE_CASE_: str = 32
SCREAMING_SNAKE_CASE_: List[str] = 16
# load original model from timm
SCREAMING_SNAKE_CASE_: int = timm.create_model(_UpperCAmelCase , pretrained=_UpperCAmelCase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
SCREAMING_SNAKE_CASE_: Dict = timm_model.state_dict()
if base_model:
remove_classification_head_(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[Any] = create_rename_keys(_UpperCAmelCase , _UpperCAmelCase )
for src, dest in rename_keys:
rename_key(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
read_in_q_k_v(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# load HuggingFace model
if vit_name[-5:] == "in21k":
SCREAMING_SNAKE_CASE_: str = ViTModel(_UpperCAmelCase ).eval()
else:
SCREAMING_SNAKE_CASE_: Tuple = ViTForImageClassification(_UpperCAmelCase ).eval()
model.load_state_dict(_UpperCAmelCase )
# Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor
if "deit" in vit_name:
SCREAMING_SNAKE_CASE_: int = DeiTImageProcessor(size=config.image_size )
else:
SCREAMING_SNAKE_CASE_: Union[str, Any] = ViTImageProcessor(size=config.image_size )
SCREAMING_SNAKE_CASE_: List[Any] = image_processor(images=prepare_img() , return_tensors="pt" )
SCREAMING_SNAKE_CASE_: Dict = encoding["pixel_values"]
SCREAMING_SNAKE_CASE_: int = model(_UpperCAmelCase )
if base_model:
SCREAMING_SNAKE_CASE_: Tuple = timm_model.forward_features(_UpperCAmelCase )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(_UpperCAmelCase , outputs.pooler_output , atol=1e-3 )
else:
SCREAMING_SNAKE_CASE_: Tuple = timm_model(_UpperCAmelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_UpperCAmelCase , outputs.logits , atol=1e-3 )
Path(_UpperCAmelCase ).mkdir(exist_ok=_UpperCAmelCase )
print(f"Saving model {vit_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_UpperCAmelCase )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(_UpperCAmelCase )
if __name__ == "__main__":
lowerCAmelCase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--vit_name""",
default="""vit_base_patch16_224""",
type=str,
help="""Name of the ViT timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
lowerCAmelCase : Union[str, Any] = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
| 13
|
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class A ( enum.Enum ):
UpperCamelCase_ : Optional[int] =0
UpperCamelCase_ : Tuple =1
UpperCamelCase_ : Optional[int] =2
@add_end_docstrings(A_ )
class A ( A_ ):
UpperCamelCase_ : Union[str, Any] ='''
In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The
voice of Nicholas\'s young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western
Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision
and denounces one of the men as a horse thief. Although his father initially slaps him for making such an
accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of
the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,
begging for his blessing. <eod> </s> <eos>
'''
def __init__(self , *lowerCAmelCase , **lowerCAmelCase ):
super().__init__(*lowerCAmelCase , **lowerCAmelCase )
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == 'tf' else MODEL_FOR_CAUSAL_LM_MAPPING )
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
__lowercase= None
if self.model.config.prefix is not None:
__lowercase= self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
__lowercase= self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
__lowercase, __lowercase, __lowercase= self._sanitize_parameters(prefix=lowerCAmelCase , **self._forward_params )
__lowercase= {**self._preprocess_params, **preprocess_params}
__lowercase= {**self._forward_params, **forward_params}
def _A (self , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , **lowerCAmelCase , ):
__lowercase= {}
if prefix is not None:
__lowercase= prefix
if prefix:
__lowercase= self.tokenizer(
lowerCAmelCase , padding=lowerCAmelCase , add_special_tokens=lowerCAmelCase , return_tensors=self.framework )
__lowercase= prefix_inputs['input_ids'].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
f'{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected'
' [None, \'hole\']' )
__lowercase= handle_long_generation
preprocess_params.update(lowerCAmelCase )
__lowercase= generate_kwargs
__lowercase= {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError('`return_text` is mutually exclusive with `return_full_text`' )
if return_tensors is not None:
raise ValueError('`return_full_text` is mutually exclusive with `return_tensors`' )
__lowercase= ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError('`return_text` is mutually exclusive with `return_tensors`' )
__lowercase= ReturnType.TENSORS
if return_type is not None:
__lowercase= return_type
if clean_up_tokenization_spaces is not None:
__lowercase= clean_up_tokenization_spaces
if stop_sequence is not None:
__lowercase= self.tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase )
if len(lowerCAmelCase ) > 1:
warnings.warn(
'Stopping on a multiple token sequence is not yet supported on transformers. The first token of'
' the stop sequence will be used as the stop sequence string in the interim.' )
__lowercase= stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def _A (self , *lowerCAmelCase , **lowerCAmelCase ):
# Parse arguments
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({'add_space_before_punct_symbol': True} )
return super()._parse_and_tokenize(*lowerCAmelCase , **lowerCAmelCase )
def __call__(self , lowerCAmelCase , **lowerCAmelCase ):
return super().__call__(lowerCAmelCase , **lowerCAmelCase )
def _A (self , lowerCAmelCase , lowerCAmelCase="" , lowerCAmelCase=None , **lowerCAmelCase ):
__lowercase= self.tokenizer(
prefix + prompt_text , padding=lowerCAmelCase , add_special_tokens=lowerCAmelCase , return_tensors=self.framework )
__lowercase= prompt_text
if handle_long_generation == "hole":
__lowercase= inputs['input_ids'].shape[-1]
if "max_new_tokens" in generate_kwargs:
__lowercase= generate_kwargs['max_new_tokens']
else:
__lowercase= generate_kwargs.get('max_length' , self.model.config.max_length ) - cur_len
if new_tokens < 0:
raise ValueError('We cannot infer how many new tokens are expected' )
if cur_len + new_tokens > self.tokenizer.model_max_length:
__lowercase= self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
'We cannot use `hole` to handle this generation the number of desired tokens exceeds the'
' models max length' )
__lowercase= inputs['input_ids'][:, -keep_length:]
if "attention_mask" in inputs:
__lowercase= inputs['attention_mask'][:, -keep_length:]
return inputs
def _A (self , lowerCAmelCase , **lowerCAmelCase ):
__lowercase= model_inputs['input_ids']
__lowercase= model_inputs.get('attention_mask' , lowerCAmelCase )
# Allow empty prompts
if input_ids.shape[1] == 0:
__lowercase= None
__lowercase= None
__lowercase= 1
else:
__lowercase= input_ids.shape[0]
__lowercase= model_inputs.pop('prompt_text' )
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
__lowercase= generate_kwargs.pop('prefix_length' , 0 )
if prefix_length > 0:
__lowercase= 'max_new_tokens' in generate_kwargs or (
'generation_config' in generate_kwargs
and generate_kwargs['generation_config'].max_new_tokens is not None
)
if not has_max_new_tokens:
__lowercase= generate_kwargs.get('max_length' ) or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
__lowercase= 'min_new_tokens' in generate_kwargs or (
'generation_config' in generate_kwargs
and generate_kwargs['generation_config'].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
__lowercase= self.model.generate(input_ids=lowerCAmelCase , attention_mask=lowerCAmelCase , **lowerCAmelCase )
__lowercase= generated_sequence.shape[0]
if self.framework == "pt":
__lowercase= generated_sequence.reshape(lowerCAmelCase , out_b // in_b , *generated_sequence.shape[1:] )
elif self.framework == "tf":
__lowercase= tf.reshape(lowerCAmelCase , (in_b, out_b // in_b, *generated_sequence.shape[1:]) )
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def _A (self , lowerCAmelCase , lowerCAmelCase=ReturnType.FULL_TEXT , lowerCAmelCase=True ):
__lowercase= model_outputs['generated_sequence'][0]
__lowercase= model_outputs['input_ids']
__lowercase= model_outputs['prompt_text']
__lowercase= generated_sequence.numpy().tolist()
__lowercase= []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
__lowercase= {'generated_token_ids': sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
__lowercase= self.tokenizer.decode(
lowerCAmelCase , skip_special_tokens=lowerCAmelCase , clean_up_tokenization_spaces=lowerCAmelCase , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
__lowercase= 0
else:
__lowercase= len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=lowerCAmelCase , clean_up_tokenization_spaces=lowerCAmelCase , ) )
if return_type == ReturnType.FULL_TEXT:
__lowercase= prompt_text + text[prompt_length:]
else:
__lowercase= text[prompt_length:]
__lowercase= {'generated_text': all_text}
records.append(lowerCAmelCase )
return records
| 295
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ : str = {
"configuration_nllb_moe": [
"NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP",
"NllbMoeConfig",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Tuple = [
"NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST",
"NllbMoeForConditionalGeneration",
"NllbMoeModel",
"NllbMoePreTrainedModel",
"NllbMoeTop2Router",
"NllbMoeSparseMLP",
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
a_ : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 104
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ : str = {
"configuration_nllb_moe": [
"NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP",
"NllbMoeConfig",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Tuple = [
"NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST",
"NllbMoeForConditionalGeneration",
"NllbMoeModel",
"NllbMoePreTrainedModel",
"NllbMoeTop2Router",
"NllbMoeSparseMLP",
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
a_ : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 104
| 1
|
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def UpperCAmelCase ( a_ ) -> bool:
"""simple docstring"""
__A = int(number**0.5 )
return number == sq * sq
def UpperCAmelCase ( a_ , a_ , a_ , a_ , a_ , a_ ) -> tuple[int, int]:
"""simple docstring"""
__A = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
__A = x_den * y_den * z_den
__A = gcd(a_ , a_ )
top //= hcf
bottom //= hcf
return top, bottom
def UpperCAmelCase ( a_ = 3_5 ) -> int:
"""simple docstring"""
__A = set()
__A = 42
__A = Fraction(0 )
__A = 42
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
__A = x_num * y_den + x_den * y_num
__A = x_den * y_den
__A = gcd(a_ , a_ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
__A = add_three(
a_ , a_ , a_ , a_ , a_ , a_ )
unique_s.add(a_ )
# n=2
__A = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
__A = x_den * x_den * y_den * y_den
if is_sq(a_ ) and is_sq(a_ ):
__A = int(sqrt(a_ ) )
__A = int(sqrt(a_ ) )
__A = gcd(a_ , a_ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
__A = add_three(
a_ , a_ , a_ , a_ , a_ , a_ )
unique_s.add(a_ )
# n=-1
__A = x_num * y_num
__A = x_den * y_num + x_num * y_den
__A = gcd(a_ , a_ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
__A = add_three(
a_ , a_ , a_ , a_ , a_ , a_ )
unique_s.add(a_ )
# n=2
__A = x_num * x_num * y_num * y_num
__A = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(a_ ) and is_sq(a_ ):
__A = int(sqrt(a_ ) )
__A = int(sqrt(a_ ) )
__A = gcd(a_ , a_ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
__A = add_three(
a_ , a_ , a_ , a_ , a_ , a_ )
unique_s.add(a_ )
for num, den in unique_s:
total += Fraction(a_ , a_ )
return total.denominator + total.numerator
if __name__ == "__main__":
print(f'''{solution() = }''')
| 15
|
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
SCREAMING_SNAKE_CASE :Optional[int] = NewType('DataClass', Any)
SCREAMING_SNAKE_CASE :int = NewType('DataClassType', Any)
def UpperCAmelCase ( a_ ) -> Optional[int]:
"""simple docstring"""
if isinstance(a_ , a_ ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
F'''Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).''' )
def UpperCAmelCase ( a_ ) -> Callable[[str], Any]:
"""simple docstring"""
__A = {str(a_ ): choice for choice in choices}
return lambda a_ : str_to_choice.get(a_ , a_ )
def UpperCAmelCase ( *,
a_ = None , a_ = None , a_ = dataclasses.MISSING , a_ = dataclasses.MISSING , a_ = None , **a_ , ) -> dataclasses.Field:
"""simple docstring"""
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
__A = {}
if aliases is not None:
__A = aliases
if help is not None:
__A = help
return dataclasses.field(metadata=a_ , default=a_ , default_factory=a_ , **a_ )
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case_ = 42
def __init__( self : Union[str, Any] ,A : Union[DataClassType, Iterable[DataClassType]] ,**A : List[Any] ):
# To make the default appear when using --help
if "formatter_class" not in kwargs:
__A = ArgumentDefaultsHelpFormatter
super().__init__(**A )
if dataclasses.is_dataclass(A ):
__A = [dataclass_types]
__A = list(A )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(A )
@staticmethod
def UpperCamelCase_ ( A : ArgumentParser ,A : dataclasses.Field ):
__A = f'''--{field.name}'''
__A = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type ,A ):
raise RuntimeError(
"Unresolved type detected, which should have been done with the help of "
"`typing.get_type_hints` method by default" )
__A = kwargs.pop("aliases" ,[] )
if isinstance(A ,A ):
__A = [aliases]
__A = getattr(field.type ,"__origin__" ,field.type )
if origin_type is Union or (hasattr(A ,"UnionType" ) and isinstance(A ,types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(A ) not in field.type.__args__
):
raise ValueError(
"Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because"
" the argument parser only supports one type per argument."
f''' Problem encountered in field \'{field.name}\'.''' )
if type(A ) not in field.type.__args__:
# filter `str` in Union
__A = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
__A = getattr(field.type ,"__origin__" ,field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
__A = (
field.type.__args__[0] if isinstance(A ,field.type.__args__[1] ) else field.type.__args__[1]
)
__A = getattr(field.type ,"__origin__" ,field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
__A = {}
if origin_type is Literal or (isinstance(field.type ,A ) and issubclass(field.type ,A )):
if origin_type is Literal:
__A = field.type.__args__
else:
__A = [x.value for x in field.type]
__A = make_choice_type_function(kwargs["choices"] )
if field.default is not dataclasses.MISSING:
__A = field.default
else:
__A = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
__A = copy(A )
# Hack because type=bool in argparse does not behave as we want.
__A = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
__A = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
__A = default
# This tells argparse we accept 0 or 1 value after --field_name
__A = "?"
# This is the value that will get picked if we do --field_name (without value)
__A = True
elif isclass(A ) and issubclass(A ,A ):
__A = field.type.__args__[0]
__A = "+"
if field.default_factory is not dataclasses.MISSING:
__A = field.default_factory()
elif field.default is dataclasses.MISSING:
__A = True
else:
__A = field.type
if field.default is not dataclasses.MISSING:
__A = field.default
elif field.default_factory is not dataclasses.MISSING:
__A = field.default_factory()
else:
__A = True
parser.add_argument(A ,*A ,**A )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
__A = False
parser.add_argument(f'''--no_{field.name}''' ,action="store_false" ,dest=field.name ,**A )
def UpperCamelCase_ ( self : Union[str, Any] ,A : DataClassType ):
if hasattr(A ,"_argument_group_name" ):
__A = self.add_argument_group(dtype._argument_group_name )
else:
__A = self
try:
__A = get_type_hints(A )
except NameError:
raise RuntimeError(
f'''Type resolution failed for {dtype}. Try declaring the class in global scope or '''
"removing line of `from __future__ import annotations` which opts in Postponed "
"Evaluation of Annotations (PEP 563)" )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(A ):
__A = ".".join(map(A ,sys.version_info[:3] ) )
raise RuntimeError(
f'''Type resolution failed for {dtype} on Python {python_version}. Try removing '''
"line of `from __future__ import annotations` which opts in union types as "
"`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To "
"support Python versions that lower than 3.10, you need to use "
"`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of "
"`X | None`." ) from ex
raise
for field in dataclasses.fields(A ):
if not field.init:
continue
__A = type_hints[field.name]
self._parse_dataclass_field(A ,A )
def UpperCamelCase_ ( self : Union[str, Any] ,A : List[Any]=None ,A : List[Any]=False ,A : Optional[Any]=True ,A : Union[str, Any]=None ,A : Union[str, Any]=None ,):
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
__A = []
if args_filename:
args_files.append(Path(A ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix(".args" ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
__A = ArgumentParser()
args_file_parser.add_argument(A ,type=A ,action="append" )
# Use only remaining args for further parsing (remove the args_file_flag)
__A , __A = args_file_parser.parse_known_args(args=A )
__A = vars(A ).get(args_file_flag.lstrip("-" ) ,A )
if cmd_args_file_paths:
args_files.extend([Path(A ) for p in cmd_args_file_paths] )
__A = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
__A = file_args + args if args is not None else file_args + sys.argv[1:]
__A , __A = self.parse_known_args(args=A )
__A = []
for dtype in self.dataclass_types:
__A = {f.name for f in dataclasses.fields(A ) if f.init}
__A = {k: v for k, v in vars(A ).items() if k in keys}
for k in keys:
delattr(A ,A )
__A = dtype(**A )
outputs.append(A )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(A )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(f'''Some specified arguments are not used by the HfArgumentParser: {remaining_args}''' )
return (*outputs,)
def UpperCamelCase_ ( self : Dict ,A : Dict[str, Any] ,A : bool = False ):
__A = set(args.keys() )
__A = []
for dtype in self.dataclass_types:
__A = {f.name for f in dataclasses.fields(A ) if f.init}
__A = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
__A = dtype(**A )
outputs.append(A )
if not allow_extra_keys and unused_keys:
raise ValueError(f'''Some keys are not used by the HfArgumentParser: {sorted(A )}''' )
return tuple(A )
def UpperCamelCase_ ( self : List[str] ,A : str ,A : bool = False ):
with open(Path(A ) ,encoding="utf-8" ) as open_json_file:
__A = json.loads(open_json_file.read() )
__A = self.parse_dict(A ,allow_extra_keys=A )
return tuple(A )
def UpperCamelCase_ ( self : int ,A : str ,A : bool = False ):
__A = self.parse_dict(yaml.safe_load(Path(A ).read_text() ) ,allow_extra_keys=A )
return tuple(A )
| 15
| 1
|
"""simple docstring"""
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class UpperCAmelCase_ ( unittest.TestCase ):
def __init__( self : List[Any] , snake_case_ : Optional[int] , snake_case_ : str=7 , snake_case_ : List[Any]=3 , snake_case_ : Union[str, Any]=18 , snake_case_ : int=30 , snake_case_ : Optional[Any]=400 , snake_case_ : List[str]=True , snake_case_ : Tuple=None , snake_case_ : List[Any]=True , ) -> List[str]:
'''simple docstring'''
A__ = size if size is not None else {"height": 18, "width": 18}
A__ = parent
A__ = batch_size
A__ = num_channels
A__ = image_size
A__ = min_resolution
A__ = max_resolution
A__ = do_resize
A__ = size
A__ = do_normalize
def __magic_name__ ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.8866_4436_3403_3203, 0.6618_8293_6954_4983, 0.3891_7464_0178_6804],
[-0.6042_5591_4688_1104, -0.0_2295_0088_6052_8469, 0.5423_7973_6900_3296],
] ),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class UpperCAmelCase_ ( A_, unittest.TestCase ):
lowercase__ = ImageGPTImageProcessor if is_vision_available() else None
def __magic_name__ ( self : int ) -> Tuple:
'''simple docstring'''
A__ = ImageGPTImageProcessingTester(self )
@property
def __magic_name__ ( self : int ) -> Dict:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __magic_name__ ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case_ , "clusters" ) )
self.assertTrue(hasattr(snake_case_ , "do_resize" ) )
self.assertTrue(hasattr(snake_case_ , "size" ) )
self.assertTrue(hasattr(snake_case_ , "do_normalize" ) )
def __magic_name__ ( self : int ) -> Optional[int]:
'''simple docstring'''
A__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 18, "width": 18} )
A__ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"height": 42, "width": 42} )
def __magic_name__ ( self : Dict ) -> Dict:
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict )
A__ = json.loads(image_processor.to_json_string() )
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(snake_case_ , obj[key] ) )
else:
self.assertEqual(obj[key] , snake_case_ )
def __magic_name__ ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
A__ = os.path.join(snake_case_ , "image_processor.json" )
image_processor_first.to_json_file(snake_case_ )
A__ = self.image_processing_class.from_json_file(snake_case_ ).to_dict()
A__ = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(snake_case_ , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , snake_case_ )
def __magic_name__ ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(snake_case_ )
A__ = self.image_processing_class.from_pretrained(snake_case_ ).to_dict()
A__ = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(snake_case_ , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , snake_case_ )
@unittest.skip("ImageGPT requires clusters at initialization" )
def __magic_name__ ( self : int ) -> Optional[Any]:
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE ( ) -> str:
A__ = load_dataset("hf-internal-testing/fixtures_image_utils" , split="test" )
A__ = Image.open(dataset[4]["file"] )
A__ = Image.open(dataset[5]["file"] )
A__ = [imagea, imagea]
return images
@require_vision
@require_torch
class UpperCAmelCase_ ( unittest.TestCase ):
@slow
def __magic_name__ ( self : Optional[int] ) -> str:
'''simple docstring'''
A__ = ImageGPTImageProcessor.from_pretrained("openai/imagegpt-small" )
A__ = prepare_images()
# test non-batched
A__ = image_processing(images[0] , return_tensors="pt" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (1, 1_024) )
A__ = [306, 191, 191]
self.assertEqual(encoding.input_ids[0, :3].tolist() , snake_case_ )
# test batched
A__ = image_processing(snake_case_ , return_tensors="pt" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (2, 1_024) )
A__ = [303, 13, 13]
self.assertEqual(encoding.input_ids[1, -3:].tolist() , snake_case_ )
| 230
|
"""simple docstring"""
from typing import Any
class UpperCAmelCase_ :
def __init__( self : Optional[Any] , snake_case_ : Any ) -> List[str]:
'''simple docstring'''
A__ = data
A__ = None
def __repr__( self : Optional[int] ) -> str:
'''simple docstring'''
return F"""Node({self.data})"""
class UpperCAmelCase_ :
def __init__( self : Dict ) -> Any:
'''simple docstring'''
A__ = None
def __iter__( self : List[Any] ) -> Any:
'''simple docstring'''
A__ = self.head
while node:
yield node.data
A__ = node.next
def __len__( self : Any ) -> int:
'''simple docstring'''
return sum(1 for _ in self )
def __repr__( self : List[str] ) -> str:
'''simple docstring'''
return "->".join([str(snake_case_ ) for item in self] )
def __getitem__( self : str , snake_case_ : int ) -> Any:
'''simple docstring'''
if not 0 <= index < len(self ):
raise ValueError("list index out of range." )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self : Tuple , snake_case_ : int , snake_case_ : Any ) -> None:
'''simple docstring'''
if not 0 <= index < len(self ):
raise ValueError("list index out of range." )
A__ = self.head
for _ in range(snake_case_ ):
A__ = current.next
A__ = data
def __magic_name__ ( self : List[Any] , snake_case_ : Any ) -> None:
'''simple docstring'''
self.insert_nth(len(self ) , snake_case_ )
def __magic_name__ ( self : Tuple , snake_case_ : Any ) -> None:
'''simple docstring'''
self.insert_nth(0 , snake_case_ )
def __magic_name__ ( self : Dict , snake_case_ : int , snake_case_ : Any ) -> None:
'''simple docstring'''
if not 0 <= index <= len(self ):
raise IndexError("list index out of range" )
A__ = Node(snake_case_ )
if self.head is None:
A__ = new_node
elif index == 0:
A__ = self.head # link new_node to head
A__ = new_node
else:
A__ = self.head
for _ in range(index - 1 ):
A__ = temp.next
A__ = temp.next
A__ = new_node
def __magic_name__ ( self : Dict ) -> None: # print every node data
'''simple docstring'''
print(self )
def __magic_name__ ( self : Dict ) -> Any:
'''simple docstring'''
return self.delete_nth(0 )
def __magic_name__ ( self : Optional[Any] ) -> Any: # delete from tail
'''simple docstring'''
return self.delete_nth(len(self ) - 1 )
def __magic_name__ ( self : Any , snake_case_ : int = 0 ) -> Any:
'''simple docstring'''
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError("List index out of range." )
A__ = self.head # default first node
if index == 0:
A__ = self.head.next
else:
A__ = self.head
for _ in range(index - 1 ):
A__ = temp.next
A__ = temp.next
A__ = temp.next.next
return delete_node.data
def __magic_name__ ( self : Dict ) -> bool:
'''simple docstring'''
return self.head is None
def __magic_name__ ( self : List[Any] ) -> None:
'''simple docstring'''
A__ = None
A__ = self.head
while current:
# Store the current node's next node.
A__ = current.next
# Make the current node's next point backwards
A__ = prev
# Make the previous node be the current node
A__ = current
# Make the current node the next node (to progress iteration)
A__ = next_node
# Return prev in order to put the head at the end
A__ = prev
def _SCREAMING_SNAKE_CASE ( ) -> None:
A__ = LinkedList()
assert linked_list.is_empty() is True
assert str(lowercase_ ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(10 ):
assert len(lowercase_ ) == i
linked_list.insert_nth(lowercase_ , i + 1 )
assert str(lowercase_ ) == "->".join(str(lowercase_ ) for i in range(1 , 11 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(11 )
assert str(lowercase_ ) == "->".join(str(lowercase_ ) for i in range(0 , 12 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 10
assert linked_list.delete_tail() == 11
assert len(lowercase_ ) == 9
assert str(lowercase_ ) == "->".join(str(lowercase_ ) for i in range(1 , 10 ) )
assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True
for i in range(0 , 9 ):
A__ = -i
assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True
linked_list.reverse()
assert str(lowercase_ ) == "->".join(str(lowercase_ ) for i in range(-8 , 1 ) )
def _SCREAMING_SNAKE_CASE ( ) -> None:
A__ = [
-9,
1_00,
Node(77_34_51_12 ),
"dlrow olleH",
7,
55_55,
0,
-1_9_2.5_5_5_5_5,
"Hello, world!",
7_7.9,
Node(10 ),
None,
None,
1_2.2_0,
]
A__ = LinkedList()
for i in test_input:
linked_list.insert_tail(lowercase_ )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(lowercase_ ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
A__ = linked_list.delete_head()
assert result == -9
assert (
str(lowercase_ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
A__ = linked_list.delete_tail()
assert result == 1_2.2
assert (
str(lowercase_ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
A__ = linked_list.delete_nth(10 )
assert result is None
assert (
str(lowercase_ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node("Hello again, world!" ) )
assert (
str(lowercase_ )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(lowercase_ )
assert (
str(lowercase_ )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(lowercase_ )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def _SCREAMING_SNAKE_CASE ( ) -> Optional[int]:
from doctest import testmod
testmod()
A__ = LinkedList()
linked_list.insert_head(input("Inserting 1st at head " ).strip() )
linked_list.insert_head(input("Inserting 2nd at head " ).strip() )
print("\nPrint list:" )
linked_list.print_list()
linked_list.insert_tail(input("\nInserting 1st at tail " ).strip() )
linked_list.insert_tail(input("Inserting 2nd at tail " ).strip() )
print("\nPrint list:" )
linked_list.print_list()
print("\nDelete head" )
linked_list.delete_head()
print("Delete tail" )
linked_list.delete_tail()
print("\nPrint list:" )
linked_list.print_list()
print("\nReverse linked list" )
linked_list.reverse()
print("\nPrint list:" )
linked_list.print_list()
print("\nString representation of linked list:" )
print(lowercase_ )
print("\nReading/changing Node data using indexing:" )
print(f"""Element at Position 1: {linked_list[1]}""" )
A__ = input("Enter New Value: " ).strip()
print("New list:" )
print(lowercase_ )
print(f"""length of linked_list is : {len(lowercase_ )}""" )
if __name__ == "__main__":
main()
| 230
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_torch_available,
)
lowercase__ = {
"""configuration_speecht5""": [
"""SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP""",
"""SpeechT5Config""",
"""SpeechT5HifiGanConfig""",
],
"""feature_extraction_speecht5""": ["""SpeechT5FeatureExtractor"""],
"""processing_speecht5""": ["""SpeechT5Processor"""],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = ["""SpeechT5Tokenizer"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
"""SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SpeechT5ForSpeechToText""",
"""SpeechT5ForSpeechToSpeech""",
"""SpeechT5ForTextToSpeech""",
"""SpeechT5Model""",
"""SpeechT5PreTrainedModel""",
"""SpeechT5HifiGan""",
]
if TYPE_CHECKING:
from .configuration_speechta import (
SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP,
SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP,
SpeechTaConfig,
SpeechTaHifiGanConfig,
)
from .feature_extraction_speechta import SpeechTaFeatureExtractor
from .processing_speechta import SpeechTaProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speechta import SpeechTaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speechta import (
SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaHifiGan,
SpeechTaModel,
SpeechTaPreTrainedModel,
)
else:
import sys
lowercase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 241
|
'''simple docstring'''
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A : Union[str, Any] = '''linear'''
A : int = '''cosine'''
A : Optional[Any] = '''cosine_with_restarts'''
A : Optional[int] = '''polynomial'''
A : str = '''constant'''
A : Union[str, Any] = '''constant_with_warmup'''
A : Optional[Any] = '''piecewise_constant'''
def lowercase__( __UpperCamelCase: Optimizer ,__UpperCamelCase: int = -1 ):
"""simple docstring"""
return LambdaLR(__UpperCamelCase ,lambda __UpperCamelCase : 1 ,last_epoch=__UpperCamelCase )
def lowercase__( __UpperCamelCase: Optimizer ,__UpperCamelCase: int ,__UpperCamelCase: int = -1 ):
"""simple docstring"""
def lr_lambda(__UpperCamelCase: int ):
if current_step < num_warmup_steps:
return float(__UpperCamelCase ) / float(max(1.0 ,__UpperCamelCase ) )
return 1.0
return LambdaLR(__UpperCamelCase ,__UpperCamelCase ,last_epoch=__UpperCamelCase )
def lowercase__( __UpperCamelCase: Optimizer ,__UpperCamelCase: str ,__UpperCamelCase: int = -1 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = {}
SCREAMING_SNAKE_CASE : Optional[Any] = step_rules.split(',' )
for rule_str in rule_list[:-1]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = rule_str.split(':' )
SCREAMING_SNAKE_CASE : int = int(__UpperCamelCase )
SCREAMING_SNAKE_CASE : Any = float(__UpperCamelCase )
SCREAMING_SNAKE_CASE : List[str] = value
SCREAMING_SNAKE_CASE : Any = float(rule_list[-1] )
def create_rules_function(__UpperCamelCase: Union[str, Any] ,__UpperCamelCase: Optional[Any] ):
def rule_func(__UpperCamelCase: int ) -> float:
SCREAMING_SNAKE_CASE : Union[str, Any] = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(__UpperCamelCase ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
SCREAMING_SNAKE_CASE : Any = create_rules_function(__UpperCamelCase ,__UpperCamelCase )
return LambdaLR(__UpperCamelCase ,__UpperCamelCase ,last_epoch=__UpperCamelCase )
def lowercase__( __UpperCamelCase: int ,__UpperCamelCase: List[Any] ,__UpperCamelCase: Dict ,__UpperCamelCase: int=-1 ):
"""simple docstring"""
def lr_lambda(__UpperCamelCase: int ):
if current_step < num_warmup_steps:
return float(__UpperCamelCase ) / float(max(1 ,__UpperCamelCase ) )
return max(
0.0 ,float(num_training_steps - current_step ) / float(max(1 ,num_training_steps - num_warmup_steps ) ) )
return LambdaLR(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
def lowercase__( __UpperCamelCase: Optimizer ,__UpperCamelCase: int ,__UpperCamelCase: int ,__UpperCamelCase: float = 0.5 ,__UpperCamelCase: int = -1 ):
"""simple docstring"""
def lr_lambda(__UpperCamelCase: Any ):
if current_step < num_warmup_steps:
return float(__UpperCamelCase ) / float(max(1 ,__UpperCamelCase ) )
SCREAMING_SNAKE_CASE : str = float(current_step - num_warmup_steps ) / float(max(1 ,num_training_steps - num_warmup_steps ) )
return max(0.0 ,0.5 * (1.0 + math.cos(math.pi * float(__UpperCamelCase ) * 2.0 * progress )) )
return LambdaLR(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
def lowercase__( __UpperCamelCase: Optimizer ,__UpperCamelCase: int ,__UpperCamelCase: int ,__UpperCamelCase: int = 1 ,__UpperCamelCase: int = -1 ):
"""simple docstring"""
def lr_lambda(__UpperCamelCase: Dict ):
if current_step < num_warmup_steps:
return float(__UpperCamelCase ) / float(max(1 ,__UpperCamelCase ) )
SCREAMING_SNAKE_CASE : int = float(current_step - num_warmup_steps ) / float(max(1 ,num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 ,0.5 * (1.0 + math.cos(math.pi * ((float(__UpperCamelCase ) * progress) % 1.0) )) )
return LambdaLR(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
def lowercase__( __UpperCamelCase: Optional[int] ,__UpperCamelCase: Any ,__UpperCamelCase: Optional[int] ,__UpperCamelCase: Optional[Any]=1e-7 ,__UpperCamelCase: Dict=1.0 ,__UpperCamelCase: Optional[Any]=-1 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = optimizer.defaults['lr']
if not (lr_init > lr_end):
raise ValueError(f"lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})" )
def lr_lambda(__UpperCamelCase: int ):
if current_step < num_warmup_steps:
return float(__UpperCamelCase ) / float(max(1 ,__UpperCamelCase ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
SCREAMING_SNAKE_CASE : List[str] = lr_init - lr_end
SCREAMING_SNAKE_CASE : Optional[Any] = num_training_steps - num_warmup_steps
SCREAMING_SNAKE_CASE : Union[str, Any] = 1 - (current_step - num_warmup_steps) / decay_steps
SCREAMING_SNAKE_CASE : str = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
UpperCamelCase_ = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def lowercase__( __UpperCamelCase: Union[str, SchedulerType] ,__UpperCamelCase: Optimizer ,__UpperCamelCase: Optional[str] = None ,__UpperCamelCase: Optional[int] = None ,__UpperCamelCase: Optional[int] = None ,__UpperCamelCase: int = 1 ,__UpperCamelCase: float = 1.0 ,__UpperCamelCase: int = -1 ,):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = SchedulerType(__UpperCamelCase )
SCREAMING_SNAKE_CASE : str = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(__UpperCamelCase ,last_epoch=__UpperCamelCase )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(__UpperCamelCase ,step_rules=__UpperCamelCase ,last_epoch=__UpperCamelCase )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(f"{name} requires `num_warmup_steps`, please provide that argument." )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(__UpperCamelCase ,num_warmup_steps=__UpperCamelCase ,last_epoch=__UpperCamelCase )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(f"{name} requires `num_training_steps`, please provide that argument." )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
__UpperCamelCase ,num_warmup_steps=__UpperCamelCase ,num_training_steps=__UpperCamelCase ,num_cycles=__UpperCamelCase ,last_epoch=__UpperCamelCase ,)
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
__UpperCamelCase ,num_warmup_steps=__UpperCamelCase ,num_training_steps=__UpperCamelCase ,power=__UpperCamelCase ,last_epoch=__UpperCamelCase ,)
return schedule_func(
__UpperCamelCase ,num_warmup_steps=__UpperCamelCase ,num_training_steps=__UpperCamelCase ,last_epoch=__UpperCamelCase )
| 251
| 0
|
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_mobilenet_va import MobileNetVaConfig
_lowerCamelCase : str = logging.get_logger(__name__)
# General docstring
_lowerCamelCase : Any = '''MobileNetV1Config'''
# Base docstring
_lowerCamelCase : Tuple = '''google/mobilenet_v1_1.0_224'''
_lowerCamelCase : List[Any] = [1, 1_0_2_4, 7, 7]
# Image classification docstring
_lowerCamelCase : str = '''google/mobilenet_v1_1.0_224'''
_lowerCamelCase : Any = '''tabby, tabby cat'''
_lowerCamelCase : Union[str, Any] = [
'''google/mobilenet_v1_1.0_224''',
'''google/mobilenet_v1_0.75_192''',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
]
def _a ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = {}
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
SCREAMING_SNAKE_CASE__ : Optional[int] = model.mobilenet_va
else:
SCREAMING_SNAKE_CASE__ : Dict = model
SCREAMING_SNAKE_CASE__ : Optional[Any] = "MobilenetV1/Conv2d_0/"
SCREAMING_SNAKE_CASE__ : str = backbone.conv_stem.convolution.weight
SCREAMING_SNAKE_CASE__ : List[Any] = backbone.conv_stem.normalization.bias
SCREAMING_SNAKE_CASE__ : Any = backbone.conv_stem.normalization.weight
SCREAMING_SNAKE_CASE__ : List[str] = backbone.conv_stem.normalization.running_mean
SCREAMING_SNAKE_CASE__ : Optional[int] = backbone.conv_stem.normalization.running_var
for i in range(13 ):
SCREAMING_SNAKE_CASE__ : List[Any] = i + 1
SCREAMING_SNAKE_CASE__ : Dict = i * 2
SCREAMING_SNAKE_CASE__ : Optional[Any] = backbone.layer[pt_index]
SCREAMING_SNAKE_CASE__ : List[str] = f'''MobilenetV1/Conv2d_{tf_index}_depthwise/'''
SCREAMING_SNAKE_CASE__ : List[str] = pointer.convolution.weight
SCREAMING_SNAKE_CASE__ : Optional[Any] = pointer.normalization.bias
SCREAMING_SNAKE_CASE__ : int = pointer.normalization.weight
SCREAMING_SNAKE_CASE__ : Tuple = pointer.normalization.running_mean
SCREAMING_SNAKE_CASE__ : Any = pointer.normalization.running_var
SCREAMING_SNAKE_CASE__ : Any = backbone.layer[pt_index + 1]
SCREAMING_SNAKE_CASE__ : Optional[int] = f'''MobilenetV1/Conv2d_{tf_index}_pointwise/'''
SCREAMING_SNAKE_CASE__ : List[str] = pointer.convolution.weight
SCREAMING_SNAKE_CASE__ : List[Any] = pointer.normalization.bias
SCREAMING_SNAKE_CASE__ : str = pointer.normalization.weight
SCREAMING_SNAKE_CASE__ : Any = pointer.normalization.running_mean
SCREAMING_SNAKE_CASE__ : int = pointer.normalization.running_var
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
SCREAMING_SNAKE_CASE__ : List[str] = "MobilenetV1/Logits/Conv2d_1c_1x1/"
SCREAMING_SNAKE_CASE__ : str = model.classifier.weight
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model.classifier.bias
return tf_to_pt_map
def _a ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Any ) -> Tuple:
'''simple docstring'''
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions." )
raise
# Load weights from TF model
SCREAMING_SNAKE_CASE__ : Optional[int] = tf.train.list_variables(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Dict = {}
for name, shape in init_vars:
logger.info(f'''Loading TF weight {name} with shape {shape}''' )
SCREAMING_SNAKE_CASE__ : int = tf.train.load_variable(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = array
# Build TF to PyTorch weights loading map
SCREAMING_SNAKE_CASE__ : List[Any] = _build_tf_to_pytorch_map(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
for name, pointer in tf_to_pt_map.items():
logger.info(f'''Importing {name}''' )
if name not in tf_weights:
logger.info(f'''{name} not in tf pre-trained weights, skipping''' )
continue
SCREAMING_SNAKE_CASE__ : List[str] = tf_weights[name]
if "depthwise_weights" in name:
logger.info("Transposing depthwise" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = np.transpose(SCREAMING_SNAKE_CASE__ , (2, 3, 0, 1) )
elif "weights" in name:
logger.info("Transposing" )
if len(pointer.shape ) == 2: # copying into linear layer
SCREAMING_SNAKE_CASE__ : Optional[int] = array.squeeze().transpose()
else:
SCREAMING_SNAKE_CASE__ : int = np.transpose(SCREAMING_SNAKE_CASE__ , (3, 2, 0, 1) )
if pointer.shape != array.shape:
raise ValueError(f'''Pointer shape {pointer.shape} and array shape {array.shape} mismatched''' )
logger.info(f'''Initialize PyTorch weight {name} {array.shape}''' )
SCREAMING_SNAKE_CASE__ : List[Any] = torch.from_numpy(SCREAMING_SNAKE_CASE__ )
tf_weights.pop(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
tf_weights.pop(name + "/RMSProp" , SCREAMING_SNAKE_CASE__ )
tf_weights.pop(name + "/RMSProp_1" , SCREAMING_SNAKE_CASE__ )
tf_weights.pop(name + "/ExponentialMovingAverage" , SCREAMING_SNAKE_CASE__ )
logger.info(f'''Weights not copied to PyTorch model: {", ".join(tf_weights.keys() )}''' )
return model
def _a ( SCREAMING_SNAKE_CASE__ : torch.Tensor , SCREAMING_SNAKE_CASE__ : nn.Convad ) -> torch.Tensor:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : Tuple = features.shape[-2:]
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : Dict = conv_layer.stride
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : List[str] = conv_layer.kernel_size
if in_height % stride_height == 0:
SCREAMING_SNAKE_CASE__ : int = max(kernel_height - stride_height , 0 )
else:
SCREAMING_SNAKE_CASE__ : Dict = max(kernel_height - (in_height % stride_height) , 0 )
if in_width % stride_width == 0:
SCREAMING_SNAKE_CASE__ : Optional[int] = max(kernel_width - stride_width , 0 )
else:
SCREAMING_SNAKE_CASE__ : Any = max(kernel_width - (in_width % stride_width) , 0 )
SCREAMING_SNAKE_CASE__ : List[Any] = pad_along_width // 2
SCREAMING_SNAKE_CASE__ : List[Any] = pad_along_width - pad_left
SCREAMING_SNAKE_CASE__ : str = pad_along_height // 2
SCREAMING_SNAKE_CASE__ : Dict = pad_along_height - pad_top
SCREAMING_SNAKE_CASE__ : List[Any] = (pad_left, pad_right, pad_top, pad_bottom)
return nn.functional.pad(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , "constant" , 0.0 )
class lowerCamelCase (nn.Module ):
"""simple docstring"""
def __init__( self : int, _UpperCAmelCase : MobileNetVaConfig, _UpperCAmelCase : int, _UpperCAmelCase : int, _UpperCAmelCase : int, _UpperCAmelCase : Optional[int] = 1, _UpperCAmelCase : Optional[int] = 1, _UpperCAmelCase : bool = False, _UpperCAmelCase : Optional[bool] = True, _UpperCAmelCase : Optional[bool or str] = True, ) -> None:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE__ : str = config
if in_channels % groups != 0:
raise ValueError(F'''Input channels ({in_channels}) are not divisible by {groups} groups.''' )
if out_channels % groups != 0:
raise ValueError(F'''Output channels ({out_channels}) are not divisible by {groups} groups.''' )
SCREAMING_SNAKE_CASE__ : Optional[Any] = 0 if config.tf_padding else int((kernel_size - 1) / 2 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = nn.Convad(
in_channels=_UpperCAmelCase, out_channels=_UpperCAmelCase, kernel_size=_UpperCAmelCase, stride=_UpperCAmelCase, padding=_UpperCAmelCase, groups=_UpperCAmelCase, bias=_UpperCAmelCase, padding_mode="zeros", )
if use_normalization:
SCREAMING_SNAKE_CASE__ : Tuple = nn.BatchNormad(
num_features=_UpperCAmelCase, eps=config.layer_norm_eps, momentum=0.9997, affine=_UpperCAmelCase, track_running_stats=_UpperCAmelCase, )
else:
SCREAMING_SNAKE_CASE__ : str = None
if use_activation:
if isinstance(_UpperCAmelCase, _UpperCAmelCase ):
SCREAMING_SNAKE_CASE__ : Optional[int] = ACTaFN[use_activation]
elif isinstance(config.hidden_act, _UpperCAmelCase ):
SCREAMING_SNAKE_CASE__ : List[Any] = ACTaFN[config.hidden_act]
else:
SCREAMING_SNAKE_CASE__ : Any = config.hidden_act
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = None
def A_ ( self : Tuple, _UpperCAmelCase : torch.Tensor ) -> torch.Tensor:
"""simple docstring"""
if self.config.tf_padding:
SCREAMING_SNAKE_CASE__ : Optional[Any] = apply_tf_padding(_UpperCAmelCase, self.convolution )
SCREAMING_SNAKE_CASE__ : str = self.convolution(_UpperCAmelCase )
if self.normalization is not None:
SCREAMING_SNAKE_CASE__ : Optional[int] = self.normalization(_UpperCAmelCase )
if self.activation is not None:
SCREAMING_SNAKE_CASE__ : List[str] = self.activation(_UpperCAmelCase )
return features
class lowerCamelCase (__lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase_ = MobileNetVaConfig
UpperCAmelCase_ = load_tf_weights_in_mobilenet_va
UpperCAmelCase_ = "mobilenet_v1"
UpperCAmelCase_ = "pixel_values"
UpperCAmelCase_ = False
def A_ ( self : Any, _UpperCAmelCase : Union[nn.Linear, nn.Convad] ) -> None:
"""simple docstring"""
if isinstance(_UpperCAmelCase, (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(_UpperCAmelCase, nn.BatchNormad ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
_lowerCamelCase : Optional[int] = r'''
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
_lowerCamelCase : Dict = r'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`MobileNetV1ImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
"The bare MobileNetV1 model outputting raw hidden-states without any specific head on top." , __lowerCamelCase , )
class lowerCamelCase (__lowerCamelCase ):
"""simple docstring"""
def __init__( self : str, _UpperCAmelCase : MobileNetVaConfig, _UpperCAmelCase : bool = True ) -> Tuple:
"""simple docstring"""
super().__init__(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = config
SCREAMING_SNAKE_CASE__ : str = 3_2
SCREAMING_SNAKE_CASE__ : Tuple = max(int(depth * config.depth_multiplier ), config.min_depth )
SCREAMING_SNAKE_CASE__ : Optional[Any] = MobileNetVaConvLayer(
_UpperCAmelCase, in_channels=config.num_channels, out_channels=_UpperCAmelCase, kernel_size=3, stride=2, )
SCREAMING_SNAKE_CASE__ : Optional[int] = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
SCREAMING_SNAKE_CASE__ : List[str] = nn.ModuleList()
for i in range(1_3 ):
SCREAMING_SNAKE_CASE__ : Dict = out_channels
if strides[i] == 2 or i == 0:
depth *= 2
SCREAMING_SNAKE_CASE__ : Dict = max(int(depth * config.depth_multiplier ), config.min_depth )
self.layer.append(
MobileNetVaConvLayer(
_UpperCAmelCase, in_channels=_UpperCAmelCase, out_channels=_UpperCAmelCase, kernel_size=3, stride=strides[i], groups=_UpperCAmelCase, ) )
self.layer.append(
MobileNetVaConvLayer(
_UpperCAmelCase, in_channels=_UpperCAmelCase, out_channels=_UpperCAmelCase, kernel_size=1, ) )
SCREAMING_SNAKE_CASE__ : str = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def A_ ( self : List[str], _UpperCAmelCase : List[str] ) -> Union[str, Any]:
"""simple docstring"""
raise NotImplementedError
@add_start_docstrings_to_model_forward(_UpperCAmelCase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC, output_type=_UpperCAmelCase, config_class=_CONFIG_FOR_DOC, modality="vision", expected_output=_EXPECTED_OUTPUT_SHAPE, )
def A_ ( self : str, _UpperCAmelCase : Optional[torch.Tensor] = None, _UpperCAmelCase : Optional[bool] = None, _UpperCAmelCase : Optional[bool] = None, ) -> Union[tuple, BaseModelOutputWithPoolingAndNoAttention]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
SCREAMING_SNAKE_CASE__ : Optional[int] = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("You have to specify pixel_values" )
SCREAMING_SNAKE_CASE__ : Any = self.conv_stem(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] = () if output_hidden_states else None
for i, layer_module in enumerate(self.layer ):
SCREAMING_SNAKE_CASE__ : Tuple = layer_module(_UpperCAmelCase )
if output_hidden_states:
SCREAMING_SNAKE_CASE__ : Optional[Any] = all_hidden_states + (hidden_states,)
SCREAMING_SNAKE_CASE__ : Tuple = hidden_states
if self.pooler is not None:
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.flatten(self.pooler(_UpperCAmelCase ), start_dim=1 )
else:
SCREAMING_SNAKE_CASE__ : List[str] = None
if not return_dict:
return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None )
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=_UpperCAmelCase, pooler_output=_UpperCAmelCase, hidden_states=_UpperCAmelCase, )
@add_start_docstrings(
"\n MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , __lowerCamelCase , )
class lowerCamelCase (__lowerCamelCase ):
"""simple docstring"""
def __init__( self : Union[str, Any], _UpperCAmelCase : MobileNetVaConfig ) -> None:
"""simple docstring"""
super().__init__(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : str = config.num_labels
SCREAMING_SNAKE_CASE__ : List[str] = MobileNetVaModel(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] = self.mobilenet_va.layer[-1].convolution.out_channels
# Classifier head
SCREAMING_SNAKE_CASE__ : List[str] = nn.Dropout(config.classifier_dropout_prob, inplace=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] = nn.Linear(_UpperCAmelCase, config.num_labels ) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_UpperCAmelCase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT, output_type=_UpperCAmelCase, config_class=_CONFIG_FOR_DOC, expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT, )
def A_ ( self : Tuple, _UpperCAmelCase : Optional[torch.Tensor] = None, _UpperCAmelCase : Optional[bool] = None, _UpperCAmelCase : Optional[torch.Tensor] = None, _UpperCAmelCase : Optional[bool] = None, ) -> Union[tuple, ImageClassifierOutputWithNoAttention]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = return_dict if return_dict is not None else self.config.use_return_dict
SCREAMING_SNAKE_CASE__ : str = self.mobilenet_va(_UpperCAmelCase, output_hidden_states=_UpperCAmelCase, return_dict=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] = outputs.pooler_output if return_dict else outputs[1]
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.classifier(self.dropout(_UpperCAmelCase ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
SCREAMING_SNAKE_CASE__ : Any = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
SCREAMING_SNAKE_CASE__ : Optional[int] = "single_label_classification"
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = "multi_label_classification"
if self.config.problem_type == "regression":
SCREAMING_SNAKE_CASE__ : Dict = MSELoss()
if self.num_labels == 1:
SCREAMING_SNAKE_CASE__ : Optional[Any] = loss_fct(logits.squeeze(), labels.squeeze() )
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = loss_fct(_UpperCAmelCase, _UpperCAmelCase )
elif self.config.problem_type == "single_label_classification":
SCREAMING_SNAKE_CASE__ : int = CrossEntropyLoss()
SCREAMING_SNAKE_CASE__ : int = loss_fct(logits.view(-1, self.num_labels ), labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
SCREAMING_SNAKE_CASE__ : str = BCEWithLogitsLoss()
SCREAMING_SNAKE_CASE__ : Dict = loss_fct(_UpperCAmelCase, _UpperCAmelCase )
if not return_dict:
SCREAMING_SNAKE_CASE__ : Dict = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=_UpperCAmelCase, logits=_UpperCAmelCase, hidden_states=outputs.hidden_states, )
| 191
|
def _a ( SCREAMING_SNAKE_CASE__ : str ) -> str:
'''simple docstring'''
if not all(char in "01" for char in bin_string ):
raise ValueError("Non-binary value was passed to the function" )
if not bin_string:
raise ValueError("Empty string was passed to the function" )
SCREAMING_SNAKE_CASE__ : List[Any] = ""
while len(SCREAMING_SNAKE_CASE__ ) % 3 != 0:
SCREAMING_SNAKE_CASE__ : str = "0" + bin_string
SCREAMING_SNAKE_CASE__ : List[Any] = [
bin_string[index : index + 3]
for index in range(len(SCREAMING_SNAKE_CASE__ ) )
if index % 3 == 0
]
for bin_group in bin_string_in_3_list:
SCREAMING_SNAKE_CASE__ : List[Any] = 0
for index, val in enumerate(SCREAMING_SNAKE_CASE__ ):
oct_val += int(2 ** (2 - index) * int(SCREAMING_SNAKE_CASE__ ) )
oct_string += str(SCREAMING_SNAKE_CASE__ )
return oct_string
if __name__ == "__main__":
from doctest import testmod
testmod()
| 191
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_a = {
'configuration_graphormer': ['GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GraphormerConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = [
'GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'GraphormerForGraphClassification',
'GraphormerModel',
'GraphormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_graphormer import (
GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
GraphormerForGraphClassification,
GraphormerModel,
GraphormerPreTrainedModel,
)
else:
import sys
_a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 17
|
"""simple docstring"""
import torch
from torch import nn
from transformers import CLIPPreTrainedModel, CLIPVisionModel
from ...models.attention import BasicTransformerBlock
from ...utils import logging
lowercase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
def __init__( self , lowercase , lowercase=768 ):
super().__init__(lowercase )
_lowerCamelCase : Any = proj_size
_lowerCamelCase : Dict = CLIPVisionModel(lowercase )
_lowerCamelCase : List[str] = PaintByExampleMapper(lowercase )
_lowerCamelCase : Optional[Any] = nn.LayerNorm(config.hidden_size )
_lowerCamelCase : int = nn.Linear(config.hidden_size , self.proj_size )
# uncondition for scaling
_lowerCamelCase : str = nn.Parameter(torch.randn((1, 1, self.proj_size) ) )
def A_ ( self , lowercase , lowercase=False ):
_lowerCamelCase : Union[str, Any] = self.model(pixel_values=lowercase )
_lowerCamelCase : int = clip_output.pooler_output
_lowerCamelCase : str = self.mapper(latent_states[:, None] )
_lowerCamelCase : List[Any] = self.final_layer_norm(lowercase )
_lowerCamelCase : Dict = self.proj_out(lowercase )
if return_uncond_vector:
return latent_states, self.uncond_vector
return latent_states
class lowerCAmelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self , lowercase ):
super().__init__()
_lowerCamelCase : Tuple = (config.num_hidden_layers + 1) // 5
_lowerCamelCase : int = config.hidden_size
_lowerCamelCase : Optional[Any] = 1
_lowerCamelCase : str = nn.ModuleList(
[
BasicTransformerBlock(lowercase , lowercase , lowercase , activation_fn='gelu' , attention_bias=lowercase )
for _ in range(lowercase )
] )
def A_ ( self , lowercase ):
for block in self.blocks:
_lowerCamelCase : Tuple = block(lowercase )
return hidden_states
| 96
| 0
|
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
__A : List[Any] = logging.get_logger(__name__)
__A : Union[str, Any] = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
__A : Union[str, Any] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class __A :
lowerCAmelCase_ : Optional[Any] = field(
default=a__ , metadata={"help": "Model type selected in the list: " + ", ".join(a__ )} )
lowerCAmelCase_ : Union[str, Any] = field(
default=a__ , metadata={"help": "The input data dir. Should contain the .json files for the SQuAD task."} )
lowerCAmelCase_ : str = field(
default=128 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
lowerCAmelCase_ : Dict = field(
default=128 , metadata={"help": "When splitting up a long document into chunks, how much stride to take between chunks."} , )
lowerCAmelCase_ : Tuple = field(
default=64 , metadata={
"help": (
"The maximum number of tokens for the question. Questions longer than this will "
"be truncated to this length."
)
} , )
lowerCAmelCase_ : Optional[Any] = field(
default=30 , metadata={
"help": (
"The maximum length of an answer that can be generated. This is needed because the start "
"and end predictions are not conditioned on one another."
)
} , )
lowerCAmelCase_ : str = field(
default=a__ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
lowerCAmelCase_ : Optional[int] = field(
default=a__ , metadata={"help": "If true, the SQuAD examples contain some that do not have an answer."} )
lowerCAmelCase_ : List[str] = field(
default=0.0 , metadata={"help": "If null_score - best_non_null is greater than the threshold predict null."} )
lowerCAmelCase_ : Any = field(
default=20 , metadata={"help": "If null_score - best_non_null is greater than the threshold predict null."} )
lowerCAmelCase_ : Union[str, Any] = field(
default=0 , metadata={
"help": (
"language id of input for language-specific xlm models (see"
" tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)"
)
} , )
lowerCAmelCase_ : List[str] = field(default=1 , metadata={"help": "multiple threads for converting example to features"} )
class __A ( a__ ):
lowerCAmelCase_ : Dict = "train"
lowerCAmelCase_ : List[Any] = "dev"
class __A ( a__ ):
lowerCAmelCase_ : Tuple = 42
lowerCAmelCase_ : Optional[int] = 42
lowerCAmelCase_ : Tuple = 42
lowerCAmelCase_ : Any = 42
def __init__( self : Optional[Any] , UpperCAmelCase_ : SquadDataTrainingArguments , UpperCAmelCase_ : PreTrainedTokenizer , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : Union[str, Split] = Split.train , UpperCAmelCase_ : Optional[bool] = False , UpperCAmelCase_ : Optional[str] = None , UpperCAmelCase_ : Optional[str] = "pt" , ):
lowerCAmelCase : Dict = args
lowerCAmelCase : List[Any] = is_language_sensitive
lowerCAmelCase : List[str] = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
try:
lowerCAmelCase : str = Split[mode]
except KeyError:
raise KeyError('mode is not a valid split name' )
lowerCAmelCase : Optional[Any] = mode
# Load data features from cache or dataset file
lowerCAmelCase : str = "v2" if args.version_2_with_negative else "v1"
lowerCAmelCase : List[Any] = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f"cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}" , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowerCAmelCase : Any = cached_features_file + ".lock"
with FileLock(UpperCAmelCase_ ):
if os.path.exists(UpperCAmelCase_ ) and not args.overwrite_cache:
lowerCAmelCase : Tuple = time.time()
lowerCAmelCase : Dict = torch.load(UpperCAmelCase_ )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
lowerCAmelCase : List[Any] = self.old_features["features"]
lowerCAmelCase : List[Any] = self.old_features.get('dataset' , UpperCAmelCase_ )
lowerCAmelCase : str = self.old_features.get('examples' , UpperCAmelCase_ )
logger.info(
f"Loading features from cached file {cached_features_file} [took %.3f s]" , time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
f"Deleting cached file {cached_features_file} will allow dataset and examples to be cached in"
' future run' )
else:
if mode == Split.dev:
lowerCAmelCase : Any = self.processor.get_dev_examples(args.data_dir )
else:
lowerCAmelCase : Optional[int] = self.processor.get_train_examples(args.data_dir )
lowerCAmelCase : int = squad_convert_examples_to_features(
examples=self.examples , tokenizer=UpperCAmelCase_ , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=UpperCAmelCase_ , )
lowerCAmelCase : Optional[Any] = time.time()
torch.save(
{'features': self.features, 'dataset': self.dataset, 'examples': self.examples} , UpperCAmelCase_ , )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f"Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]" )
def __len__( self : Union[str, Any] ):
return len(self.features )
def __getitem__( self : Union[str, Any] , UpperCAmelCase_ : Optional[int] ):
# Convert to Tensors and build dataset
lowerCAmelCase : List[Any] = self.features[i]
lowerCAmelCase : Any = torch.tensor(feature.input_ids , dtype=torch.long )
lowerCAmelCase : Optional[int] = torch.tensor(feature.attention_mask , dtype=torch.long )
lowerCAmelCase : str = torch.tensor(feature.token_type_ids , dtype=torch.long )
lowerCAmelCase : Optional[int] = torch.tensor(feature.cls_index , dtype=torch.long )
lowerCAmelCase : Union[str, Any] = torch.tensor(feature.p_mask , dtype=torch.float )
lowerCAmelCase : List[str] = torch.tensor(feature.is_impossible , dtype=torch.float )
lowerCAmelCase : str = {
"input_ids": input_ids,
"attention_mask": attention_mask,
"token_type_ids": token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({'cls_index': cls_index, 'p_mask': p_mask} )
if self.args.version_2_with_negative:
inputs.update({'is_impossible': is_impossible} )
if self.is_language_sensitive:
inputs.update({'langs': (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
lowerCAmelCase : Any = torch.tensor(feature.start_position , dtype=torch.long )
lowerCAmelCase : Optional[Any] = torch.tensor(feature.end_position , dtype=torch.long )
inputs.update({'start_positions': start_positions, 'end_positions': end_positions} )
return inputs
| 370
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class __A ( unittest.TestCase ):
def lowercase__ ( self : Optional[int] ):
lowerCAmelCase : Tuple = tempfile.mkdtemp()
# fmt: off
lowerCAmelCase : List[Any] = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
lowerCAmelCase : str = dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_ ) ) ) )
lowerCAmelCase : Optional[Any] = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', '']
lowerCAmelCase : Tuple = {'unk_token': '<unk>'}
lowerCAmelCase : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
lowerCAmelCase : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(UpperCAmelCase_ ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(UpperCAmelCase_ ) )
lowerCAmelCase : Dict = {
'do_resize': True,
'size': 20,
'do_center_crop': True,
'crop_size': 18,
'do_normalize': True,
'image_mean': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
'image_std': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
}
lowerCAmelCase : Union[str, Any] = os.path.join(self.tmpdirname , UpperCAmelCase_ )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(UpperCAmelCase_ , UpperCAmelCase_ )
def lowercase__ ( self : Any , **UpperCAmelCase_ : Dict ):
return CLIPTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase_ )
def lowercase__ ( self : Tuple , **UpperCAmelCase_ : str ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **UpperCAmelCase_ )
def lowercase__ ( self : Optional[int] , **UpperCAmelCase_ : Optional[int] ):
return CLIPImageProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase_ )
def lowercase__ ( self : Union[str, Any] ):
shutil.rmtree(self.tmpdirname )
def lowercase__ ( self : List[str] ):
lowerCAmelCase : str = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowerCAmelCase : List[Any] = [Image.fromarray(np.moveaxis(UpperCAmelCase_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowercase__ ( self : Any ):
lowerCAmelCase : List[str] = self.get_tokenizer()
lowerCAmelCase : List[str] = self.get_rust_tokenizer()
lowerCAmelCase : Optional[int] = self.get_image_processor()
lowerCAmelCase : Optional[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ )
processor_slow.save_pretrained(self.tmpdirname )
lowerCAmelCase : int = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=UpperCAmelCase_ )
lowerCAmelCase : Optional[int] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ )
processor_fast.save_pretrained(self.tmpdirname )
lowerCAmelCase : Dict = CLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , UpperCAmelCase_ )
self.assertIsInstance(processor_fast.tokenizer , UpperCAmelCase_ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , UpperCAmelCase_ )
self.assertIsInstance(processor_fast.image_processor , UpperCAmelCase_ )
def lowercase__ ( self : Tuple ):
lowerCAmelCase : Any = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase : Tuple = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
lowerCAmelCase : Union[str, Any] = self.get_image_processor(do_normalize=UpperCAmelCase_ , padding_value=1.0 )
lowerCAmelCase : Dict = CLIPProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=UpperCAmelCase_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCAmelCase_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCAmelCase_ )
def lowercase__ ( self : List[str] ):
lowerCAmelCase : Any = self.get_image_processor()
lowerCAmelCase : Union[str, Any] = self.get_tokenizer()
lowerCAmelCase : str = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ )
lowerCAmelCase : Dict = self.prepare_image_inputs()
lowerCAmelCase : List[str] = image_processor(UpperCAmelCase_ , return_tensors='np' )
lowerCAmelCase : int = processor(images=UpperCAmelCase_ , return_tensors='np' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def lowercase__ ( self : Union[str, Any] ):
lowerCAmelCase : Union[str, Any] = self.get_image_processor()
lowerCAmelCase : Union[str, Any] = self.get_tokenizer()
lowerCAmelCase : Dict = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ )
lowerCAmelCase : Optional[int] = 'lower newer'
lowerCAmelCase : List[str] = processor(text=UpperCAmelCase_ )
lowerCAmelCase : Union[str, Any] = tokenizer(UpperCAmelCase_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowercase__ ( self : Optional[Any] ):
lowerCAmelCase : Tuple = self.get_image_processor()
lowerCAmelCase : Dict = self.get_tokenizer()
lowerCAmelCase : List[str] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ )
lowerCAmelCase : Optional[Any] = 'lower newer'
lowerCAmelCase : Optional[int] = self.prepare_image_inputs()
lowerCAmelCase : Union[str, Any] = processor(text=UpperCAmelCase_ , images=UpperCAmelCase_ )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(UpperCAmelCase_ ):
processor()
def lowercase__ ( self : List[str] ):
lowerCAmelCase : Optional[Any] = self.get_image_processor()
lowerCAmelCase : str = self.get_tokenizer()
lowerCAmelCase : Union[str, Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ )
lowerCAmelCase : List[str] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCAmelCase : Any = processor.batch_decode(UpperCAmelCase_ )
lowerCAmelCase : List[Any] = tokenizer.batch_decode(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def lowercase__ ( self : Union[str, Any] ):
lowerCAmelCase : List[Any] = self.get_image_processor()
lowerCAmelCase : Dict = self.get_tokenizer()
lowerCAmelCase : List[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ )
lowerCAmelCase : Dict = 'lower newer'
lowerCAmelCase : Tuple = self.prepare_image_inputs()
lowerCAmelCase : List[str] = processor(text=UpperCAmelCase_ , images=UpperCAmelCase_ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 323
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import _LazyModule
lowerCAmelCase__ = {'''tokenization_bertweet''': ['''BertweetTokenizer''']}
if TYPE_CHECKING:
from .tokenization_bertweet import BertweetTokenizer
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 104
|
'''simple docstring'''
import argparse
import json
import logging
import os
import shutil
import sys
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.utils import write_basic_config
from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device
from transformers.utils import is_apex_available
logging.basicConfig(level=logging.DEBUG)
lowerCAmelCase__ = logging.getLogger()
def _A ( ):
"""simple docstring"""
__lowercase = argparse.ArgumentParser()
parser.add_argument('''-f''' )
__lowercase = parser.parse_args()
return args.f
def _A ( A__ ):
"""simple docstring"""
__lowercase = {}
__lowercase = os.path.join(A__ , '''all_results.json''' )
if os.path.exists(A__ ):
with open(A__ , '''r''' ) as f:
__lowercase = json.load(A__ )
else:
raise ValueError(F"can't find {path}" )
return results
def _A ( ):
"""simple docstring"""
__lowercase = torch.cuda.is_available() and torch_device == '''cuda'''
return is_using_cuda and is_apex_available()
lowerCAmelCase__ = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
@classmethod
def SCREAMING_SNAKE_CASE ( cls : List[str] ):
# Write Accelerate config, will pick up on CPU, GPU, and multi-GPU
__lowercase = tempfile.mkdtemp()
__lowercase = os.path.join(cls.tmpdir ,'''default_config.yml''' )
write_basic_config(save_location=cls.configPath )
__lowercase = ['''accelerate''', '''launch''', '''--config_file''', cls.configPath]
@classmethod
def SCREAMING_SNAKE_CASE ( cls : str ):
shutil.rmtree(cls.tmpdir )
@mock.patch.dict(os.environ ,{'''WANDB_MODE''': '''offline'''} )
def SCREAMING_SNAKE_CASE ( self : Any ):
__lowercase = self.get_auto_remove_tmp_dir()
__lowercase = F"\n {self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --seed=42\n --checkpointing_steps epoch\n --with_tracking\n ".split()
if is_cuda_and_apex_available():
testargs.append('''--fp16''' )
run_command(self._launch_args + testargs )
__lowercase = get_results(lowercase__ )
self.assertGreaterEqual(result['''eval_accuracy'''] ,0.7_5 )
self.assertTrue(os.path.exists(os.path.join(lowercase__ ,'''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(lowercase__ ,'''glue_no_trainer''' ) ) )
@mock.patch.dict(os.environ ,{'''WANDB_MODE''': '''offline'''} )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
__lowercase = self.get_auto_remove_tmp_dir()
__lowercase = F"\n {self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py\n --model_name_or_path distilgpt2\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --block_size 128\n --per_device_train_batch_size 5\n --per_device_eval_batch_size 5\n --num_train_epochs 2\n --output_dir {tmp_dir}\n --checkpointing_steps epoch\n --with_tracking\n ".split()
if torch.cuda.device_count() > 1:
# Skipping because there are not enough batches to train the model + would need a drop_last to work.
return
run_command(self._launch_args + testargs )
__lowercase = get_results(lowercase__ )
self.assertLess(result['''perplexity'''] ,1_0_0 )
self.assertTrue(os.path.exists(os.path.join(lowercase__ ,'''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(lowercase__ ,'''clm_no_trainer''' ) ) )
@mock.patch.dict(os.environ ,{'''WANDB_MODE''': '''offline'''} )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
__lowercase = self.get_auto_remove_tmp_dir()
__lowercase = F"\n {self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py\n --model_name_or_path distilroberta-base\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --output_dir {tmp_dir}\n --num_train_epochs=1\n --checkpointing_steps epoch\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
__lowercase = get_results(lowercase__ )
self.assertLess(result['''perplexity'''] ,4_2 )
self.assertTrue(os.path.exists(os.path.join(lowercase__ ,'''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(lowercase__ ,'''mlm_no_trainer''' ) ) )
@mock.patch.dict(os.environ ,{'''WANDB_MODE''': '''offline'''} )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
__lowercase = 7 if get_gpu_count() > 1 else 2
__lowercase = self.get_auto_remove_tmp_dir()
__lowercase = F"\n {self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/conll/sample.json\n --validation_file tests/fixtures/tests_samples/conll/sample.json\n --output_dir {tmp_dir}\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=2\n --num_train_epochs={epochs}\n --seed 7\n --checkpointing_steps epoch\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
__lowercase = get_results(lowercase__ )
self.assertGreaterEqual(result['''eval_accuracy'''] ,0.7_5 )
self.assertLess(result['''train_loss'''] ,0.5 )
self.assertTrue(os.path.exists(os.path.join(lowercase__ ,'''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(lowercase__ ,'''ner_no_trainer''' ) ) )
@unittest.skip(reason='''Fix me @muellerzr''' )
@mock.patch.dict(os.environ ,{'''WANDB_MODE''': '''offline'''} )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
__lowercase = self.get_auto_remove_tmp_dir()
__lowercase = F"\n {self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py\n --model_name_or_path bert-base-uncased\n --version_2_with_negative\n --train_file tests/fixtures/tests_samples/SQUAD/sample.json\n --validation_file tests/fixtures/tests_samples/SQUAD/sample.json\n --output_dir {tmp_dir}\n --seed=42\n --max_train_steps=10\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
__lowercase = get_results(lowercase__ )
# Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics.
self.assertGreaterEqual(result['''eval_f1'''] ,2_8 )
self.assertGreaterEqual(result['''eval_exact'''] ,2_8 )
self.assertTrue(os.path.exists(os.path.join(lowercase__ ,'''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(lowercase__ ,'''qa_no_trainer''' ) ) )
@mock.patch.dict(os.environ ,{'''WANDB_MODE''': '''offline'''} )
def SCREAMING_SNAKE_CASE ( self : Dict ):
__lowercase = self.get_auto_remove_tmp_dir()
__lowercase = F"\n {self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/swag/sample.json\n --validation_file tests/fixtures/tests_samples/swag/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=20\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
__lowercase = get_results(lowercase__ )
self.assertGreaterEqual(result['''eval_accuracy'''] ,0.8 )
self.assertTrue(os.path.exists(os.path.join(lowercase__ ,'''swag_no_trainer''' ) ) )
@slow
@mock.patch.dict(os.environ ,{'''WANDB_MODE''': '''offline'''} )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
__lowercase = self.get_auto_remove_tmp_dir()
__lowercase = F"\n {self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py\n --model_name_or_path t5-small\n --train_file tests/fixtures/tests_samples/xsum/sample.json\n --validation_file tests/fixtures/tests_samples/xsum/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=50\n --num_warmup_steps=8\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
__lowercase = get_results(lowercase__ )
self.assertGreaterEqual(result['''eval_rouge1'''] ,1_0 )
self.assertGreaterEqual(result['''eval_rouge2'''] ,2 )
self.assertGreaterEqual(result['''eval_rougeL'''] ,7 )
self.assertGreaterEqual(result['''eval_rougeLsum'''] ,7 )
self.assertTrue(os.path.exists(os.path.join(lowercase__ ,'''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(lowercase__ ,'''summarization_no_trainer''' ) ) )
@slow
@mock.patch.dict(os.environ ,{'''WANDB_MODE''': '''offline'''} )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
__lowercase = self.get_auto_remove_tmp_dir()
__lowercase = F"\n {self.examples_dir}/pytorch/translation/run_translation_no_trainer.py\n --model_name_or_path sshleifer/student_marian_en_ro_6_1\n --source_lang en\n --target_lang ro\n --train_file tests/fixtures/tests_samples/wmt16/sample.json\n --validation_file tests/fixtures/tests_samples/wmt16/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=50\n --num_warmup_steps=8\n --num_beams=6\n --learning_rate=3e-3\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --source_lang en_XX\n --target_lang ro_RO\n --checkpointing_steps epoch\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
__lowercase = get_results(lowercase__ )
self.assertGreaterEqual(result['''eval_bleu'''] ,3_0 )
self.assertTrue(os.path.exists(os.path.join(lowercase__ ,'''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(lowercase__ ,'''translation_no_trainer''' ) ) )
@slow
def SCREAMING_SNAKE_CASE ( self : str ):
__lowercase = logging.StreamHandler(sys.stdout )
logger.addHandler(lowercase__ )
__lowercase = self.get_auto_remove_tmp_dir()
__lowercase = F"\n {self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py\n --dataset_name huggingface/semantic-segmentation-test-sample\n --output_dir {tmp_dir}\n --max_train_steps=10\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n ".split()
run_command(self._launch_args + testargs )
__lowercase = get_results(lowercase__ )
self.assertGreaterEqual(result['''eval_overall_accuracy'''] ,0.1_0 )
@mock.patch.dict(os.environ ,{'''WANDB_MODE''': '''offline'''} )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
__lowercase = self.get_auto_remove_tmp_dir()
__lowercase = F"\n {self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py\n --model_name_or_path google/vit-base-patch16-224-in21k\n --dataset_name hf-internal-testing/cats_vs_dogs_sample\n --learning_rate 1e-4\n --per_device_train_batch_size 2\n --per_device_eval_batch_size 1\n --max_train_steps 2\n --train_val_split 0.1\n --seed 42\n --output_dir {tmp_dir}\n --with_tracking\n --checkpointing_steps 1\n ".split()
if is_cuda_and_apex_available():
testargs.append('''--fp16''' )
run_command(self._launch_args + testargs )
__lowercase = get_results(lowercase__ )
# The base model scores a 25%
self.assertGreaterEqual(result['''eval_accuracy'''] ,0.6 )
self.assertTrue(os.path.exists(os.path.join(lowercase__ ,'''step_1''' ) ) )
self.assertTrue(os.path.exists(os.path.join(lowercase__ ,'''image_classification_no_trainer''' ) ) )
| 104
| 1
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCAmelCase =logging.get_logger(__name__)
__UpperCAmelCase ={
"bert-base-uncased": "https://huggingface.co/bert-base-uncased/resolve/main/config.json",
"bert-large-uncased": "https://huggingface.co/bert-large-uncased/resolve/main/config.json",
"bert-base-cased": "https://huggingface.co/bert-base-cased/resolve/main/config.json",
"bert-large-cased": "https://huggingface.co/bert-large-cased/resolve/main/config.json",
"bert-base-multilingual-uncased": "https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json",
"bert-base-multilingual-cased": "https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json",
"bert-base-chinese": "https://huggingface.co/bert-base-chinese/resolve/main/config.json",
"bert-base-german-cased": "https://huggingface.co/bert-base-german-cased/resolve/main/config.json",
"bert-large-uncased-whole-word-masking": (
"https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json"
),
"bert-large-cased-whole-word-masking": (
"https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json"
),
"bert-large-uncased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json"
),
"bert-large-cased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json"
),
"bert-base-cased-finetuned-mrpc": "https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json",
"bert-base-german-dbmdz-cased": "https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json",
"bert-base-german-dbmdz-uncased": "https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json",
"cl-tohoku/bert-base-japanese": "https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json",
"cl-tohoku/bert-base-japanese-whole-word-masking": (
"https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json"
),
"cl-tohoku/bert-base-japanese-char": (
"https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json"
),
"cl-tohoku/bert-base-japanese-char-whole-word-masking": (
"https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json"
),
"TurkuNLP/bert-base-finnish-cased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json"
),
"TurkuNLP/bert-base-finnish-uncased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json"
),
"wietsedv/bert-base-dutch-cased": "https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json",
# See all BERT models at https://huggingface.co/models?filter=bert
}
class a__ ( UpperCAmelCase__ ):
lowerCamelCase : Tuple ="bert"
def __init__( self : str , a : Any=3_05_22 , a : Any=7_68 , a : Any=12 , a : Union[str, Any]=12 , a : List[Any]=30_72 , a : Any="gelu" , a : List[str]=0.1 , a : Optional[Any]=0.1 , a : List[str]=5_12 , a : str=2 , a : Dict=0.02 , a : List[str]=1e-1_2 , a : Any=0 , a : Optional[Any]="absolute" , a : Any=True , a : Optional[Any]=None , **a : Dict , ):
"""simple docstring"""
super().__init__(pad_token_id=a , **a )
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = hidden_act
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = type_vocab_size
__lowerCamelCase = initializer_range
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = position_embedding_type
__lowerCamelCase = use_cache
__lowerCamelCase = classifier_dropout
class a__ ( UpperCAmelCase__ ):
@property
def SCREAMING_SNAKE_CASE__ ( self : int ):
"""simple docstring"""
if self.task == "multiple-choice":
__lowerCamelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__lowerCamelCase = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] )
| 237
|
'''simple docstring'''
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> float:
if principal <= 0:
raise Exception('''Principal borrowed must be > 0''' )
if rate_per_annum < 0:
raise Exception('''Rate of interest must be >= 0''' )
if years_to_repay <= 0 or not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise Exception('''Years to repay must be an integer > 0''' )
# Yearly rate is divided by 12 to get monthly rate
__lowerCamelCase = rate_per_annum / 12
# Years to repay is multiplied by 12 to get number of payments as payment is monthly
__lowerCamelCase = years_to_repay * 12
return (
principal
* rate_per_month
* (1 + rate_per_month) ** number_of_payments
/ ((1 + rate_per_month) ** number_of_payments - 1)
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 237
| 1
|
import argparse
import json
from pathlib import Path
import torch
import torchaudio
from datasets import load_dataset
from huggingface_hub import hf_hub_download
from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification
from transformers.utils import logging
logging.set_verbosity_info()
A__ = logging.get_logger(__name__)
def _lowerCAmelCase ( __lowerCAmelCase ) -> Optional[int]:
"""simple docstring"""
snake_case__ : Optional[Any] = ASTConfig()
if "10-10" in model_name:
pass
elif "speech-commands" in model_name:
snake_case__ : Any = 128
elif "12-12" in model_name:
snake_case__ : Dict = 12
snake_case__ : Any = 12
elif "14-14" in model_name:
snake_case__ : str = 14
snake_case__ : int = 14
elif "16-16" in model_name:
snake_case__ : int = 16
snake_case__ : Optional[int] = 16
else:
raise ValueError('''Model not supported''' )
snake_case__ : List[Any] = '''huggingface/label-files'''
if "speech-commands" in model_name:
snake_case__ : Tuple = 35
snake_case__ : Any = '''speech-commands-v2-id2label.json'''
else:
snake_case__ : Tuple = 527
snake_case__ : Union[str, Any] = '''audioset-id2label.json'''
snake_case__ : List[str] = json.load(open(hf_hub_download(__lowerCAmelCase , __lowerCAmelCase , repo_type='''dataset''' ) , '''r''' ) )
snake_case__ : str = {int(__lowerCAmelCase ): v for k, v in idalabel.items()}
snake_case__ : int = idalabel
snake_case__ : Optional[int] = {v: k for k, v in idalabel.items()}
return config
def _lowerCAmelCase ( __lowerCAmelCase ) -> Tuple:
"""simple docstring"""
if "module.v" in name:
snake_case__ : Optional[int] = name.replace('''module.v''' , '''audio_spectrogram_transformer''' )
if "cls_token" in name:
snake_case__ : Union[str, Any] = name.replace('''cls_token''' , '''embeddings.cls_token''' )
if "dist_token" in name:
snake_case__ : List[Any] = name.replace('''dist_token''' , '''embeddings.distillation_token''' )
if "pos_embed" in name:
snake_case__ : List[Any] = name.replace('''pos_embed''' , '''embeddings.position_embeddings''' )
if "patch_embed.proj" in name:
snake_case__ : Optional[int] = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
# transformer blocks
if "blocks" in name:
snake_case__ : int = name.replace('''blocks''' , '''encoder.layer''' )
if "attn.proj" in name:
snake_case__ : Optional[Any] = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
snake_case__ : Any = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
snake_case__ : Optional[int] = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
snake_case__ : Tuple = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
snake_case__ : int = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
snake_case__ : int = name.replace('''mlp.fc2''' , '''output.dense''' )
# final layernorm
if "audio_spectrogram_transformer.norm" in name:
snake_case__ : List[str] = name.replace('''audio_spectrogram_transformer.norm''' , '''audio_spectrogram_transformer.layernorm''' )
# classifier head
if "module.mlp_head.0" in name:
snake_case__ : Optional[int] = name.replace('''module.mlp_head.0''' , '''classifier.layernorm''' )
if "module.mlp_head.1" in name:
snake_case__ : int = name.replace('''module.mlp_head.1''' , '''classifier.dense''' )
return name
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase ) -> Optional[int]:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
snake_case__ : Optional[Any] = orig_state_dict.pop(__lowerCAmelCase )
if "qkv" in key:
snake_case__ : Any = key.split('''.''' )
snake_case__ : Any = int(key_split[3] )
snake_case__ : int = config.hidden_size
if "weight" in key:
snake_case__ : Optional[int] = val[:dim, :]
snake_case__ : Dict = val[dim : dim * 2, :]
snake_case__ : str = val[-dim:, :]
else:
snake_case__ : Tuple = val[:dim]
snake_case__ : Any = val[dim : dim * 2]
snake_case__ : Optional[Any] = val[-dim:]
else:
snake_case__ : Tuple = val
return orig_state_dict
def _lowerCAmelCase ( __lowerCAmelCase ) -> Any:
"""simple docstring"""
snake_case__ : List[Any] = [
'''module.v.head.weight''',
'''module.v.head.bias''',
'''module.v.head_dist.weight''',
'''module.v.head_dist.bias''',
]
for k in ignore_keys:
state_dict.pop(__lowerCAmelCase , __lowerCAmelCase )
@torch.no_grad()
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=False ) -> Union[str, Any]:
"""simple docstring"""
snake_case__ : Optional[Any] = get_audio_spectrogram_transformer_config(__lowerCAmelCase )
snake_case__ : Any = {
'''ast-finetuned-audioset-10-10-0.4593''': (
'''https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1'''
),
'''ast-finetuned-audioset-10-10-0.450''': (
'''https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1'''
),
'''ast-finetuned-audioset-10-10-0.448''': (
'''https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1'''
),
'''ast-finetuned-audioset-10-10-0.448-v2''': (
'''https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1'''
),
'''ast-finetuned-audioset-12-12-0.447''': (
'''https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1'''
),
'''ast-finetuned-audioset-14-14-0.443''': (
'''https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1'''
),
'''ast-finetuned-audioset-16-16-0.442''': (
'''https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1'''
),
'''ast-finetuned-speech-commands-v2''': (
'''https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1'''
),
}
# load original state_dict
snake_case__ : Optional[int] = model_name_to_url[model_name]
snake_case__ : Tuple = torch.hub.load_state_dict_from_url(__lowerCAmelCase , map_location='''cpu''' )
# remove some keys
remove_keys(__lowerCAmelCase )
# rename some keys
snake_case__ : Any = convert_state_dict(__lowerCAmelCase , __lowerCAmelCase )
# load 🤗 model
snake_case__ : str = ASTForAudioClassification(__lowerCAmelCase )
model.eval()
model.load_state_dict(__lowerCAmelCase )
# verify outputs on dummy input
# source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62
snake_case__ : List[Any] = -4.2_677_393 if '''speech-commands''' not in model_name else -6.845_978
snake_case__ : Union[str, Any] = 4.5_689_974 if '''speech-commands''' not in model_name else 5.5_654_526
snake_case__ : Any = 1024 if '''speech-commands''' not in model_name else 128
snake_case__ : Tuple = ASTFeatureExtractor(mean=__lowerCAmelCase , std=__lowerCAmelCase , max_length=__lowerCAmelCase )
if "speech-commands" in model_name:
snake_case__ : Dict = load_dataset('''speech_commands''' , '''v0.02''' , split='''validation''' )
snake_case__ : Union[str, Any] = dataset[0]['''audio''']['''array''']
else:
snake_case__ : Optional[int] = hf_hub_download(
repo_id='''nielsr/audio-spectogram-transformer-checkpoint''' , filename='''sample_audio.flac''' , repo_type='''dataset''' , )
snake_case__ , snake_case__ : List[Any] = torchaudio.load(__lowerCAmelCase )
snake_case__ : List[Any] = waveform.squeeze().numpy()
snake_case__ : int = feature_extractor(__lowerCAmelCase , sampling_rate=16000 , return_tensors='''pt''' )
# forward pass
snake_case__ : List[str] = model(**__lowerCAmelCase )
snake_case__ : Any = outputs.logits
if model_name == "ast-finetuned-audioset-10-10-0.4593":
snake_case__ : Optional[Any] = torch.tensor([-0.8_760, -7.0_042, -8.6_602] )
elif model_name == "ast-finetuned-audioset-10-10-0.450":
snake_case__ : str = torch.tensor([-1.1_986, -7.0_903, -8.2_718] )
elif model_name == "ast-finetuned-audioset-10-10-0.448":
snake_case__ : Union[str, Any] = torch.tensor([-2.6_128, -8.0_080, -9.4_344] )
elif model_name == "ast-finetuned-audioset-10-10-0.448-v2":
snake_case__ : int = torch.tensor([-1.5_080, -7.4_534, -8.8_917] )
elif model_name == "ast-finetuned-audioset-12-12-0.447":
snake_case__ : List[str] = torch.tensor([-0.5_050, -6.5_833, -8.0_843] )
elif model_name == "ast-finetuned-audioset-14-14-0.443":
snake_case__ : Tuple = torch.tensor([-0.3_826, -7.0_336, -8.2_413] )
elif model_name == "ast-finetuned-audioset-16-16-0.442":
snake_case__ : Optional[Any] = torch.tensor([-1.2_113, -6.9_101, -8.3_470] )
elif model_name == "ast-finetuned-speech-commands-v2":
snake_case__ : List[Any] = torch.tensor([6.1_589, -8.0_566, -8.7_984] )
else:
raise ValueError('''Unknown model name''' )
if not torch.allclose(logits[0, :3] , __lowerCAmelCase , atol=1E-4 ):
raise ValueError('''Logits don\'t match''' )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase )
print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__lowerCAmelCase )
print(f"""Saving feature extractor to {pytorch_dump_folder_path}""" )
feature_extractor.save_pretrained(__lowerCAmelCase )
if push_to_hub:
print('''Pushing model and feature extractor to the hub...''' )
model.push_to_hub(f"""MIT/{model_name}""" )
feature_extractor.push_to_hub(f"""MIT/{model_name}""" )
if __name__ == "__main__":
A__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''ast-finetuned-audioset-10-10-0.4593''',
type=str,
help='''Name of the Audio Spectrogram Transformer model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
A__ = parser.parse_args()
convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 230
|
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
A__ = abspath(join(dirname(dirname(dirname(__file__))), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def _lowerCAmelCase ( __lowerCAmelCase ) -> str:
"""simple docstring"""
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(__lowerCAmelCase )
def _lowerCAmelCase ( __lowerCAmelCase ) -> List[Any]:
"""simple docstring"""
from transformers.testing_utils import pytest_terminal_summary_main
snake_case__ : Dict = terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(__lowerCAmelCase , id=__lowerCAmelCase )
| 230
| 1
|
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TextClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
__snake_case : Any = {'LayoutLMv2Config', 'LayoutLMv3Config'}
@is_pipeline_test
class A__ ( unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
SCREAMING_SNAKE_CASE = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
SCREAMING_SNAKE_CASE = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
SCREAMING_SNAKE_CASE = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
@require_torch
def _SCREAMING_SNAKE_CASE ( self: str) -> int:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = pipeline(
task="text-classification" , model="hf-internal-testing/tiny-random-distilbert" , framework="pt")
__lowerCAmelCase : Optional[int] = text_classifier("This is great !")
self.assertEqual(nested_simplify(_SCREAMING_SNAKE_CASE) , [{"label": "LABEL_0", "score": 0.504}])
__lowerCAmelCase : Optional[Any] = text_classifier("This is great !" , top_k=2)
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE) , [{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}])
__lowerCAmelCase : List[Any] = text_classifier(["This is great !", "This is bad"] , top_k=2)
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE) , [
[{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}],
[{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}],
] , )
__lowerCAmelCase : Any = text_classifier("This is great !" , top_k=1)
self.assertEqual(nested_simplify(_SCREAMING_SNAKE_CASE) , [{"label": "LABEL_0", "score": 0.504}])
# Legacy behavior
__lowerCAmelCase : Any = text_classifier("This is great !" , return_all_scores=_SCREAMING_SNAKE_CASE)
self.assertEqual(nested_simplify(_SCREAMING_SNAKE_CASE) , [{"label": "LABEL_0", "score": 0.504}])
__lowerCAmelCase : List[str] = text_classifier("This is great !" , return_all_scores=_SCREAMING_SNAKE_CASE)
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE) , [[{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}]])
__lowerCAmelCase : List[str] = text_classifier(["This is great !", "Something else"] , return_all_scores=_SCREAMING_SNAKE_CASE)
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE) , [
[{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}],
[{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}],
] , )
__lowerCAmelCase : int = text_classifier(["This is great !", "Something else"] , return_all_scores=_SCREAMING_SNAKE_CASE)
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE) , [
{"label": "LABEL_0", "score": 0.504},
{"label": "LABEL_0", "score": 0.504},
] , )
@require_torch
def _SCREAMING_SNAKE_CASE ( self: Optional[Any]) -> Optional[int]:
"""simple docstring"""
import torch
__lowerCAmelCase : Union[str, Any] = pipeline(
task="text-classification" , model="hf-internal-testing/tiny-random-distilbert" , framework="pt" , device=torch.device("cpu") , )
__lowerCAmelCase : Union[str, Any] = text_classifier("This is great !")
self.assertEqual(nested_simplify(_SCREAMING_SNAKE_CASE) , [{"label": "LABEL_0", "score": 0.504}])
@require_tf
def _SCREAMING_SNAKE_CASE ( self: Any) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : Tuple = pipeline(
task="text-classification" , model="hf-internal-testing/tiny-random-distilbert" , framework="tf")
__lowerCAmelCase : List[str] = text_classifier("This is great !")
self.assertEqual(nested_simplify(_SCREAMING_SNAKE_CASE) , [{"label": "LABEL_0", "score": 0.504}])
@slow
@require_torch
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any]) -> Dict:
"""simple docstring"""
__lowerCAmelCase : List[Any] = pipeline("text-classification")
__lowerCAmelCase : List[str] = text_classifier("This is great !")
self.assertEqual(nested_simplify(_SCREAMING_SNAKE_CASE) , [{"label": "POSITIVE", "score": 1.0}])
__lowerCAmelCase : str = text_classifier("This is bad !")
self.assertEqual(nested_simplify(_SCREAMING_SNAKE_CASE) , [{"label": "NEGATIVE", "score": 1.0}])
__lowerCAmelCase : Optional[int] = text_classifier("Birds are a type of animal")
self.assertEqual(nested_simplify(_SCREAMING_SNAKE_CASE) , [{"label": "POSITIVE", "score": 0.988}])
@slow
@require_tf
def _SCREAMING_SNAKE_CASE ( self: int) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Tuple = pipeline("text-classification" , framework="tf")
__lowerCAmelCase : str = text_classifier("This is great !")
self.assertEqual(nested_simplify(_SCREAMING_SNAKE_CASE) , [{"label": "POSITIVE", "score": 1.0}])
__lowerCAmelCase : Dict = text_classifier("This is bad !")
self.assertEqual(nested_simplify(_SCREAMING_SNAKE_CASE) , [{"label": "NEGATIVE", "score": 1.0}])
__lowerCAmelCase : str = text_classifier("Birds are a type of animal")
self.assertEqual(nested_simplify(_SCREAMING_SNAKE_CASE) , [{"label": "POSITIVE", "score": 0.988}])
def _SCREAMING_SNAKE_CASE ( self: str , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: Optional[Any] , _SCREAMING_SNAKE_CASE: Any) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = TextClassificationPipeline(model=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE)
return text_classifier, ["HuggingFace is in", "This is another test"]
def _SCREAMING_SNAKE_CASE ( self: Optional[Any] , _SCREAMING_SNAKE_CASE: str , _SCREAMING_SNAKE_CASE: Any) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : List[Any] = text_classifier.model
# Small inputs because BartTokenizer tiny has maximum position embeddings = 22
__lowerCAmelCase : Union[str, Any] = "HuggingFace is in"
__lowerCAmelCase : Optional[int] = text_classifier(_SCREAMING_SNAKE_CASE)
self.assertEqual(nested_simplify(_SCREAMING_SNAKE_CASE) , [{"label": ANY(_SCREAMING_SNAKE_CASE), "score": ANY(_SCREAMING_SNAKE_CASE)}])
self.assertTrue(outputs[0]["label"] in model.config.idalabel.values())
__lowerCAmelCase : Tuple = ["HuggingFace is in ", "Paris is in France"]
__lowerCAmelCase : Optional[Any] = text_classifier(_SCREAMING_SNAKE_CASE)
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE) , [{"label": ANY(_SCREAMING_SNAKE_CASE), "score": ANY(_SCREAMING_SNAKE_CASE)}, {"label": ANY(_SCREAMING_SNAKE_CASE), "score": ANY(_SCREAMING_SNAKE_CASE)}] , )
self.assertTrue(outputs[0]["label"] in model.config.idalabel.values())
self.assertTrue(outputs[1]["label"] in model.config.idalabel.values())
# Forcing to get all results with `top_k=None`
# This is NOT the legacy format
__lowerCAmelCase : int = text_classifier(_SCREAMING_SNAKE_CASE , top_k=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Union[str, Any] = len(model.config.idalabel.values())
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE) , [[{"label": ANY(_SCREAMING_SNAKE_CASE), "score": ANY(_SCREAMING_SNAKE_CASE)}] * N, [{"label": ANY(_SCREAMING_SNAKE_CASE), "score": ANY(_SCREAMING_SNAKE_CASE)}] * N] , )
__lowerCAmelCase : Optional[int] = {"text": "HuggingFace is in ", "text_pair": "Paris is in France"}
__lowerCAmelCase : str = text_classifier(_SCREAMING_SNAKE_CASE)
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE) , {"label": ANY(_SCREAMING_SNAKE_CASE), "score": ANY(_SCREAMING_SNAKE_CASE)} , )
self.assertTrue(outputs["label"] in model.config.idalabel.values())
# This might be used a text pair, but tokenizer + pipe interaction
# makes it hard to understand that it's not using the pair properly
# https://github.com/huggingface/transformers/issues/17305
# We disabled this usage instead as it was outputting wrong outputs.
__lowerCAmelCase : Optional[int] = [["HuggingFace is in ", "Paris is in France"]]
with self.assertRaises(_SCREAMING_SNAKE_CASE):
text_classifier(_SCREAMING_SNAKE_CASE)
# This used to be valid for doing text pairs
# We're keeping it working because of backward compatibility
__lowerCAmelCase : List[Any] = text_classifier([[["HuggingFace is in ", "Paris is in France"]]])
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE) , [{"label": ANY(_SCREAMING_SNAKE_CASE), "score": ANY(_SCREAMING_SNAKE_CASE)}] , )
self.assertTrue(outputs[0]["label"] in model.config.idalabel.values())
| 364
|
"""simple docstring"""
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
__snake_case : List[str] = logging.get_logger(__name__)
@add_end_docstrings(__SCREAMING_SNAKE_CASE )
class A__ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self: List[str] , **_SCREAMING_SNAKE_CASE: Union[str, Any]) -> Optional[Any]:
"""simple docstring"""
super().__init__(**_SCREAMING_SNAKE_CASE)
if self.framework == "tf":
raise ValueError(F"""The {self.__class__} is only available in PyTorch.""")
requires_backends(self , "vision")
self.check_model_type(_SCREAMING_SNAKE_CASE)
def __call__( self: str , _SCREAMING_SNAKE_CASE: Union[str, "Image.Image", List[Dict[str, Any]]] , _SCREAMING_SNAKE_CASE: Union[str, List[str]] = None , **_SCREAMING_SNAKE_CASE: Optional[Any] , ) -> int:
"""simple docstring"""
if "text_queries" in kwargs:
__lowerCAmelCase : List[str] = kwargs.pop("text_queries")
if isinstance(_SCREAMING_SNAKE_CASE , (str, Image.Image)):
__lowerCAmelCase : Any = {"image": image, "candidate_labels": candidate_labels}
else:
__lowerCAmelCase : Dict = image
__lowerCAmelCase : Optional[int] = super().__call__(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE)
return results
def _SCREAMING_SNAKE_CASE ( self: Any , **_SCREAMING_SNAKE_CASE: Tuple) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = {}
if "threshold" in kwargs:
__lowerCAmelCase : Optional[int] = kwargs["threshold"]
if "top_k" in kwargs:
__lowerCAmelCase : int = kwargs["top_k"]
return {}, {}, postprocess_params
def _SCREAMING_SNAKE_CASE ( self: List[Any] , _SCREAMING_SNAKE_CASE: Dict) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : List[str] = load_image(inputs["image"])
__lowerCAmelCase : Union[str, Any] = inputs["candidate_labels"]
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE):
__lowerCAmelCase : Optional[int] = candidate_labels.split(",")
__lowerCAmelCase : Union[str, Any] = torch.tensor([[image.height, image.width]] , dtype=torch.intaa)
for i, candidate_label in enumerate(_SCREAMING_SNAKE_CASE):
__lowerCAmelCase : Optional[Any] = self.tokenizer(_SCREAMING_SNAKE_CASE , return_tensors=self.framework)
__lowerCAmelCase : Dict = self.image_processor(_SCREAMING_SNAKE_CASE , return_tensors=self.framework)
yield {
"is_last": i == len(_SCREAMING_SNAKE_CASE) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any] , _SCREAMING_SNAKE_CASE: List[str]) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = model_inputs.pop("target_size")
__lowerCAmelCase : Any = model_inputs.pop("candidate_label")
__lowerCAmelCase : List[str] = model_inputs.pop("is_last")
__lowerCAmelCase : Dict = self.model(**_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : str = {"target_size": target_size, "candidate_label": candidate_label, "is_last": is_last, **outputs}
return model_outputs
def _SCREAMING_SNAKE_CASE ( self: List[Any] , _SCREAMING_SNAKE_CASE: Tuple , _SCREAMING_SNAKE_CASE: str=0.1 , _SCREAMING_SNAKE_CASE: Optional[int]=None) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : int = []
for model_output in model_outputs:
__lowerCAmelCase : Dict = model_output["candidate_label"]
__lowerCAmelCase : int = BaseModelOutput(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Dict = self.image_processor.post_process_object_detection(
outputs=_SCREAMING_SNAKE_CASE , threshold=_SCREAMING_SNAKE_CASE , target_sizes=model_output["target_size"])[0]
for index in outputs["scores"].nonzero():
__lowerCAmelCase : Any = outputs["scores"][index].item()
__lowerCAmelCase : int = self._get_bounding_box(outputs["boxes"][index][0])
__lowerCAmelCase : List[str] = {"score": score, "label": label, "box": box}
results.append(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[Any] = sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE: x["score"] , reverse=_SCREAMING_SNAKE_CASE)
if top_k:
__lowerCAmelCase : str = results[:top_k]
return results
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any] , _SCREAMING_SNAKE_CASE: "torch.Tensor") -> Dict[str, int]:
"""simple docstring"""
if self.framework != "pt":
raise ValueError("The ZeroShotObjectDetectionPipeline is only available in PyTorch.")
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : int = box.int().tolist()
__lowerCAmelCase : Any = {
"xmin": xmin,
"ymin": ymin,
"xmax": xmax,
"ymax": ymax,
}
return bbox
| 58
| 0
|
"""simple docstring"""
from manim import *
class _SCREAMING_SNAKE_CASE( A ):
def _UpperCamelCase ( self ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Optional[Any] = Rectangle(height=0.5 ,width=0.5 )
__SCREAMING_SNAKE_CASE :List[str] = Rectangle(height=0.4_6 ,width=0.4_6 ).set_stroke(width=0 )
__SCREAMING_SNAKE_CASE :List[str] = [mem.copy() for i in range(6 )]
__SCREAMING_SNAKE_CASE :List[str] = [mem.copy() for i in range(6 )]
__SCREAMING_SNAKE_CASE :Optional[int] = VGroup(*SCREAMING_SNAKE_CASE__ ).arrange(SCREAMING_SNAKE_CASE__ ,buff=0 )
__SCREAMING_SNAKE_CASE :Optional[Any] = VGroup(*SCREAMING_SNAKE_CASE__ ).arrange(SCREAMING_SNAKE_CASE__ ,buff=0 )
__SCREAMING_SNAKE_CASE :Any = VGroup(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ).arrange(SCREAMING_SNAKE_CASE__ ,buff=0 )
__SCREAMING_SNAKE_CASE :Tuple = Text('''CPU''' ,font_size=24 )
__SCREAMING_SNAKE_CASE :Optional[Any] = Group(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ).arrange(SCREAMING_SNAKE_CASE__ ,buff=0.5 ,aligned_edge=SCREAMING_SNAKE_CASE__ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :List[Any] = [mem.copy() for i in range(1 )]
__SCREAMING_SNAKE_CASE :str = VGroup(*SCREAMING_SNAKE_CASE__ ).arrange(SCREAMING_SNAKE_CASE__ ,buff=0 )
__SCREAMING_SNAKE_CASE :Union[str, Any] = Text('''GPU''' ,font_size=24 )
__SCREAMING_SNAKE_CASE :int = Group(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ).arrange(SCREAMING_SNAKE_CASE__ ,buff=0.5 ,aligned_edge=SCREAMING_SNAKE_CASE__ )
gpu.align_to(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
gpu.set_x(gpu.get_x() - 1 )
self.add(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :str = [mem.copy() for i in range(6 )]
__SCREAMING_SNAKE_CASE :int = VGroup(*SCREAMING_SNAKE_CASE__ ).arrange(SCREAMING_SNAKE_CASE__ ,buff=0 )
__SCREAMING_SNAKE_CASE :List[Any] = Text('''Model''' ,font_size=24 )
__SCREAMING_SNAKE_CASE :int = Group(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ).arrange(SCREAMING_SNAKE_CASE__ ,buff=0.5 ,aligned_edge=SCREAMING_SNAKE_CASE__ )
model.move_to([3, -1.0, 0] )
self.play(
Create(SCREAMING_SNAKE_CASE__ ,run_time=1 ) ,Create(SCREAMING_SNAKE_CASE__ ,run_time=1 ) ,Create(SCREAMING_SNAKE_CASE__ ,run_time=1 ) ,)
__SCREAMING_SNAKE_CASE :List[str] = MarkupText(
f'''First, an empty model skeleton is loaded\ninto <span fgcolor=\'{YELLOW}\'>memory</span> without using much RAM.''' ,font_size=24 ,)
__SCREAMING_SNAKE_CASE :List[Any] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
__SCREAMING_SNAKE_CASE :Optional[Any] = MarkupText(
f'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' ,font_size=18 ,)
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(SCREAMING_SNAKE_CASE__ ,run_time=2.5 ) ,Write(SCREAMING_SNAKE_CASE__ ) ,Write(SCREAMING_SNAKE_CASE__ ) )
self.add(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Union[str, Any] = []
__SCREAMING_SNAKE_CASE :int = []
__SCREAMING_SNAKE_CASE :List[Any] = []
for i, rect in enumerate(SCREAMING_SNAKE_CASE__ ):
__SCREAMING_SNAKE_CASE :Any = Rectangle(height=0.4_6 ,width=0.4_6 ).set_stroke(width=0.0 ).set_fill(SCREAMING_SNAKE_CASE__ ,opacity=0.7 )
cpu_target.move_to(SCREAMING_SNAKE_CASE__ )
cpu_target.generate_target()
__SCREAMING_SNAKE_CASE :Union[str, Any] = 0.4_6 / 4
__SCREAMING_SNAKE_CASE :Tuple = 0.4_6 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) ,buff=0.0_2 ,direction=SCREAMING_SNAKE_CASE__ )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target ,direction=SCREAMING_SNAKE_CASE__ ,buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target ,direction=SCREAMING_SNAKE_CASE__ ,buff=0.0 )
cpu_targs.append(SCREAMING_SNAKE_CASE__ )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(SCREAMING_SNAKE_CASE__ ) )
second_animations.append(MoveToTarget(SCREAMING_SNAKE_CASE__ ,run_time=1.5 ) )
self.play(*SCREAMING_SNAKE_CASE__ )
self.play(*SCREAMING_SNAKE_CASE__ )
self.wait()
| 191
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
lowerCamelCase_ = {"configuration_dpt": ["DPT_PRETRAINED_CONFIG_ARCHIVE_MAP", "DPTConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = ["DPTFeatureExtractor"]
lowerCamelCase_ = ["DPTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
"DPT_PRETRAINED_MODEL_ARCHIVE_LIST",
"DPTForDepthEstimation",
"DPTForSemanticSegmentation",
"DPTModel",
"DPTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
lowerCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 191
| 1
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class __magic_name__ ( unittest.TestCase ):
def __init__( self : Optional[Any] , snake_case__ : Tuple , snake_case__ : Any=7 , snake_case__ : Dict=3 , snake_case__ : Dict=1_8 , snake_case__ : List[Any]=3_0 , snake_case__ : List[str]=4_0_0 , snake_case__ : Dict=True , snake_case__ : List[Any]=None , snake_case__ : str=True , snake_case__ : Dict=None , snake_case__ : Dict=True , snake_case__ : Union[str, Any]=[0.5, 0.5, 0.5] , snake_case__ : Dict=[0.5, 0.5, 0.5] , ):
'''simple docstring'''
lowercase :Tuple = size if size is not None else {'''shortest_edge''': 1_8}
lowercase :Dict = crop_size if crop_size is not None else {'''height''': 1_8, '''width''': 1_8}
lowercase :Any = parent
lowercase :List[str] = batch_size
lowercase :List[Any] = num_channels
lowercase :Dict = image_size
lowercase :Any = min_resolution
lowercase :List[str] = max_resolution
lowercase :int = do_resize
lowercase :List[Any] = size
lowercase :str = do_center_crop
lowercase :int = crop_size
lowercase :str = do_normalize
lowercase :Optional[Any] = image_mean
lowercase :Dict = image_std
def __snake_case ( self : Tuple ):
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"do_center_crop": self.do_center_crop,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class __magic_name__ ( __UpperCAmelCase , unittest.TestCase ):
__A : int = LevitImageProcessor if is_vision_available() else None
def __snake_case ( self : List[Any] ):
'''simple docstring'''
lowercase :List[Any] = LevitImageProcessingTester(self )
@property
def __snake_case ( self : int ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __snake_case ( self : str ):
'''simple docstring'''
lowercase :int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case__ , '''image_mean''' ) )
self.assertTrue(hasattr(snake_case__ , '''image_std''' ) )
self.assertTrue(hasattr(snake_case__ , '''do_normalize''' ) )
self.assertTrue(hasattr(snake_case__ , '''do_resize''' ) )
self.assertTrue(hasattr(snake_case__ , '''do_center_crop''' ) )
self.assertTrue(hasattr(snake_case__ , '''size''' ) )
def __snake_case ( self : Union[str, Any] ):
'''simple docstring'''
lowercase :str = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 1_8} )
self.assertEqual(image_processor.crop_size , {'''height''': 1_8, '''width''': 1_8} )
lowercase :Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 4_2} )
self.assertEqual(image_processor.crop_size , {'''height''': 8_4, '''width''': 8_4} )
def __snake_case ( self : Union[str, Any] ):
'''simple docstring'''
pass
def __snake_case ( self : Tuple ):
'''simple docstring'''
lowercase :int = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase :Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , Image.Image )
# Test not batched input
lowercase :Dict = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
lowercase :Any = image_processing(snake_case__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def __snake_case ( self : Optional[int] ):
'''simple docstring'''
lowercase :Dict = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase :Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ , numpify=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , np.ndarray )
# Test not batched input
lowercase :Tuple = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
lowercase :List[str] = image_processing(snake_case__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def __snake_case ( self : List[str] ):
'''simple docstring'''
lowercase :Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase :List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ , torchify=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , torch.Tensor )
# Test not batched input
lowercase :Tuple = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
lowercase :Tuple = image_processing(snake_case__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 353
|
"""simple docstring"""
import io
import os
import unicodedata
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = '''▁'''
UpperCAmelCase = {'''vocab_file''': '''vocab.txt''', '''sentencepiece_model_ckpt''': '''sentencepiece.bpe.model'''}
UpperCAmelCase = {
'''sentencepiece_model_file''': '''sentencepiece.bpe.model''',
'''vocab_file''': '''vocab.txt''',
}
UpperCAmelCase = {
'''vocab_file''': {
'''ernie-m-base''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt''',
'''ernie-m-large''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt''',
},
'''sentencepiece_model_file''': {
'''ernie-m-base''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model''',
'''ernie-m-large''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model''',
},
}
UpperCAmelCase = {
'''ernie-m-base''': 514,
'''ernie-m-large''': 514,
}
UpperCAmelCase = {
'''ernie-m-base''': {'''do_lower_case''': False},
'''ernie-m-large''': {'''do_lower_case''': False},
}
class __magic_name__ ( __UpperCAmelCase ):
__A : List[str] = ["input_ids"]
__A : Optional[Any] = VOCAB_FILES_NAMES
__A : str = PRETRAINED_INIT_CONFIGURATION
__A : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__A : List[str] = PRETRAINED_VOCAB_FILES_MAP
__A : List[str] = RESOURCE_FILES_NAMES
def __init__( self : Dict , snake_case__ : List[Any] , snake_case__ : List[Any]=None , snake_case__ : int=False , snake_case__ : Optional[int]="utf8" , snake_case__ : List[str]="[UNK]" , snake_case__ : Tuple="[SEP]" , snake_case__ : List[Any]="[PAD]" , snake_case__ : Dict="[CLS]" , snake_case__ : Dict="[MASK]" , snake_case__ : Optional[Dict[str, Any]] = None , **snake_case__ : str , ):
'''simple docstring'''
lowercase :Any = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=snake_case__ , unk_token=snake_case__ , sep_token=snake_case__ , pad_token=snake_case__ , cls_token=snake_case__ , mask_token=snake_case__ , vocab_file=snake_case__ , encoding=snake_case__ , sp_model_kwargs=self.sp_model_kwargs , **snake_case__ , )
lowercase :Dict = do_lower_case
lowercase :str = sentencepiece_model_ckpt
lowercase :Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(snake_case__ )
# to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning
if vocab_file is not None:
lowercase :Tuple = self.load_vocab(filepath=snake_case__ )
else:
lowercase :str = {self.sp_model.id_to_piece(snake_case__ ): id for id in range(self.sp_model.get_piece_size() )}
lowercase :Any = {v: k for k, v in self.vocab.items()}
def __snake_case ( self : List[str] , snake_case__ : str ):
'''simple docstring'''
if text is None:
return None
lowercase :List[Any] = self.tokenize(snake_case__ )
lowercase , lowercase :List[str] = '''''', []
for i, ch in enumerate(snake_case__ ):
if ch in self.SP_CHAR_MAPPING:
lowercase :Optional[int] = self.SP_CHAR_MAPPING.get(snake_case__ )
else:
lowercase :Optional[int] = unicodedata.normalize('''NFKC''' , snake_case__ )
if self.is_whitespace(snake_case__ ):
continue
normalized_text += ch
char_mapping.extend([i] * len(snake_case__ ) )
lowercase , lowercase , lowercase :int = normalized_text, [], 0
if self.do_lower_case:
lowercase :Any = text.lower()
for token in split_tokens:
if token[:1] == "▁":
lowercase :Tuple = token[1:]
lowercase :List[str] = text[offset:].index(snake_case__ ) + offset
lowercase :Tuple = start + len(snake_case__ )
token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) )
lowercase :int = end
return token_mapping
@property
def __snake_case ( self : List[Any] ):
'''simple docstring'''
return len(self.vocab )
def __snake_case ( self : Optional[Any] ):
'''simple docstring'''
return dict(self.vocab , **self.added_tokens_encoder )
def __getstate__( self : Optional[int] ):
'''simple docstring'''
lowercase :Any = self.__dict__.copy()
lowercase :Optional[int] = None
return state
def __setstate__( self : Tuple , snake_case__ : Dict ):
'''simple docstring'''
lowercase :Optional[int] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowercase :Dict = {}
lowercase :List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.sentencepiece_model_ckpt )
def __snake_case ( self : int , snake_case__ : List[Any] ):
'''simple docstring'''
return "".join((self.SP_CHAR_MAPPING.get(snake_case__ , snake_case__ ) for c in text) )
def __snake_case ( self : List[str] , snake_case__ : Optional[int] , snake_case__ : int=False , snake_case__ : Dict=6_4 , snake_case__ : Any=0.1 ):
'''simple docstring'''
if self.sp_model_kwargs.get('''enable_sampling''' ) is True:
lowercase :Any = True
if self.sp_model_kwargs.get('''alpha''' ) is not None:
lowercase :Any = self.sp_model_kwargs.get('''alpha''' )
if self.sp_model_kwargs.get('''nbest_size''' ) is not None:
lowercase :Optional[Any] = self.sp_model_kwargs.get('''nbest_size''' )
if not enable_sampling:
lowercase :Any = self.sp_model.EncodeAsPieces(snake_case__ )
else:
lowercase :List[Any] = self.sp_model.SampleEncodeAsPieces(snake_case__ , snake_case__ , snake_case__ )
lowercase :str = []
for pi, piece in enumerate(snake_case__ ):
if piece == SPIECE_UNDERLINE:
if not pieces[pi + 1].startswith(snake_case__ ) and pi != 0:
new_pieces.append(snake_case__ )
continue
else:
continue
lowercase :int = 0
for i, chunk in enumerate(snake_case__ ):
if chunk == SPIECE_UNDERLINE:
continue
if self.is_ch_char(snake_case__ ) or self.is_punct(snake_case__ ):
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
new_pieces.append(snake_case__ )
lowercase :Optional[int] = i + 1
elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
lowercase :str = i
elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
lowercase :Dict = i
if len(snake_case__ ) > lst_i:
new_pieces.append(piece[lst_i:] )
return new_pieces
def __snake_case ( self : Dict , snake_case__ : str ):
'''simple docstring'''
lowercase :int = ''''''.join(snake_case__ ).replace(snake_case__ , ''' ''' ).strip()
return out_string
def __snake_case ( self : int , snake_case__ : str ):
'''simple docstring'''
lowercase :Tuple = self.convert_ids_to_tokens(snake_case__ )
lowercase :Any = ''''''.join(snake_case__ ).replace(snake_case__ , ''' ''' ).strip()
return out_string
def __snake_case ( self : int , snake_case__ : Union[str, Any] ):
'''simple docstring'''
return self.vocab.get(snake_case__ , self.vocab.get(self.unk_token ) )
def __snake_case ( self : List[Any] , snake_case__ : List[str] ):
'''simple docstring'''
return self.reverse_vocab.get(snake_case__ , self.unk_token )
def __snake_case ( self : Union[str, Any] , snake_case__ : List[Any] , snake_case__ : Any=None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase :int = [self.cls_token_id]
lowercase :str = [self.sep_token_id]
return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep
def __snake_case ( self : Any , snake_case__ : Dict , snake_case__ : str=None ):
'''simple docstring'''
if offset_mapping_a is None:
return [(0, 0)] + offset_mapping_a + [(0, 0)]
return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)]
def __snake_case ( self : List[Any] , snake_case__ : Optional[int] , snake_case__ : Any=None , snake_case__ : Optional[int]=False ):
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(snake_case__ )) + [1, 1] + ([0] * len(snake_case__ )) + [1]
return [1] + ([0] * len(snake_case__ )) + [1]
def __snake_case ( self : List[Any] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
# [CLS] X [SEP]
return (len(snake_case__ ) + 2) * [0]
# [CLS] A [SEP] [SEP] B [SEP]
return [0] * (len(snake_case__ ) + 1) + [1] * (len(snake_case__ ) + 3)
def __snake_case ( self : List[Any] , snake_case__ : Any ):
'''simple docstring'''
if "\u4e00" <= char <= "\u9fff":
return True
return False
def __snake_case ( self : List[str] , snake_case__ : Any ):
'''simple docstring'''
if ("a" <= char <= "z") or ("A" <= char <= "Z"):
return True
return False
def __snake_case ( self : List[str] , snake_case__ : Union[str, Any] ):
'''simple docstring'''
if char in ",;:.?!~,;:。?!《》【】":
return True
return False
def __snake_case ( self : Optional[int] , snake_case__ : List[str] ):
'''simple docstring'''
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
if len(snake_case__ ) == 1:
lowercase :str = unicodedata.category(snake_case__ )
if cat == "Zs":
return True
return False
def __snake_case ( self : str , snake_case__ : Any ):
'''simple docstring'''
lowercase :Dict = {}
with io.open(snake_case__ , '''r''' , encoding='''utf-8''' ) as f:
for index, line in enumerate(snake_case__ ):
lowercase :Dict = line.rstrip('''\n''' )
lowercase :str = int(snake_case__ )
return token_to_idx
def __snake_case ( self : Dict , snake_case__ : str , snake_case__ : Optional[str] = None ):
'''simple docstring'''
lowercase :Optional[int] = 0
if os.path.isdir(snake_case__ ):
lowercase :str = os.path.join(
snake_case__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
else:
lowercase :Any = (filename_prefix + '''-''' if filename_prefix else '''''') + save_directory
with open(snake_case__ , '''w''' , encoding='''utf-8''' ) as writer:
for token, token_index in sorted(self.vocab.items() , key=lambda snake_case__ : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
''' Please check that the vocabulary is not corrupted!''' )
lowercase :Optional[int] = token_index
writer.write(token + '''\n''' )
index += 1
lowercase :int = os.path.join(snake_case__ , '''sentencepiece.bpe.model''' )
with open(snake_case__ , '''wb''' ) as fi:
lowercase :Tuple = self.sp_model.serialized_model_proto()
fi.write(snake_case__ )
return (vocab_file,)
| 172
| 0
|
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class lowercase( metaclass=__a ):
'''simple docstring'''
lowercase__ = ["note_seq"]
def __init__( self: Dict, *a_: Union[str, Any], **a_: List[str] ):
'''simple docstring'''
requires_backends(self, ["""note_seq"""] )
@classmethod
def UpperCamelCase_ ( cls: Optional[int], *a_: Any, **a_: Optional[Any] ):
'''simple docstring'''
requires_backends(cls, ["""note_seq"""] )
@classmethod
def UpperCamelCase_ ( cls: Tuple, *a_: Optional[Any], **a_: List[str] ):
'''simple docstring'''
requires_backends(cls, ["""note_seq"""] )
| 64
|
'''simple docstring'''
import torch
from diffusers import CMStochasticIterativeScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = (CMStochasticIterativeScheduler,)
SCREAMING_SNAKE_CASE__ = 10
def lowerCamelCase_ ( self : List[str] , **lowerCamelCase_ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = {
"""num_train_timesteps""": 2_01,
"""sigma_min""": 0.002,
"""sigma_max""": 80.0,
}
config.update(**lowerCamelCase_ )
return config
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = 10
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : int = self.scheduler_classes[0](**lowerCamelCase_ )
scheduler.set_timesteps(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = scheduler.timesteps[0]
SCREAMING_SNAKE_CASE : Dict = scheduler.timesteps[1]
SCREAMING_SNAKE_CASE : Optional[Any] = self.dummy_sample
SCREAMING_SNAKE_CASE : List[str] = 0.1 * sample
SCREAMING_SNAKE_CASE : Union[str, Any] = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ).prev_sample
SCREAMING_SNAKE_CASE : Optional[Any] = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=lowerCamelCase_ )
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
for clip_denoised in [True, False]:
self.check_over_configs(clip_denoised=lowerCamelCase_ )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : List[str] = scheduler_class(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = 1
scheduler.set_timesteps(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = scheduler.timesteps
SCREAMING_SNAKE_CASE : str = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = self.dummy_model()
SCREAMING_SNAKE_CASE : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
for i, t in enumerate(lowerCamelCase_ ):
# 1. scale model input
SCREAMING_SNAKE_CASE : Optional[int] = scheduler.scale_model_input(lowerCamelCase_ , lowerCamelCase_ )
# 2. predict noise residual
SCREAMING_SNAKE_CASE : Optional[int] = model(lowerCamelCase_ , lowerCamelCase_ )
# 3. predict previous sample x_t-1
SCREAMING_SNAKE_CASE : List[str] = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , generator=lowerCamelCase_ ).prev_sample
SCREAMING_SNAKE_CASE : Union[str, Any] = pred_prev_sample
SCREAMING_SNAKE_CASE : Any = torch.sum(torch.abs(lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : Optional[int] = torch.mean(torch.abs(lowerCamelCase_ ) )
assert abs(result_sum.item() - 192.7_614 ) < 1e-2
assert abs(result_mean.item() - 0.2_510 ) < 1e-3
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : Tuple = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : int = scheduler_class(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = [1_06, 0]
scheduler.set_timesteps(timesteps=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = scheduler.timesteps
SCREAMING_SNAKE_CASE : Tuple = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = self.dummy_model()
SCREAMING_SNAKE_CASE : List[str] = self.dummy_sample_deter * scheduler.init_noise_sigma
for t in timesteps:
# 1. scale model input
SCREAMING_SNAKE_CASE : Optional[Any] = scheduler.scale_model_input(lowerCamelCase_ , lowerCamelCase_ )
# 2. predict noise residual
SCREAMING_SNAKE_CASE : Any = model(lowerCamelCase_ , lowerCamelCase_ )
# 3. predict previous sample x_t-1
SCREAMING_SNAKE_CASE : str = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , generator=lowerCamelCase_ ).prev_sample
SCREAMING_SNAKE_CASE : Dict = pred_prev_sample
SCREAMING_SNAKE_CASE : Any = torch.sum(torch.abs(lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : Tuple = torch.mean(torch.abs(lowerCamelCase_ ) )
assert abs(result_sum.item() - 347.6_357 ) < 1e-2
assert abs(result_mean.item() - 0.4_527 ) < 1e-3
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : Optional[int] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : Any = scheduler_class(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = [39, 30, 12, 15, 0]
with self.assertRaises(lowerCamelCase_ , msg="""`timesteps` must be in descending order.""" ):
scheduler.set_timesteps(timesteps=lowerCamelCase_ )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : Dict = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : Optional[int] = scheduler_class(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = [39, 30, 12, 1, 0]
SCREAMING_SNAKE_CASE : Optional[Any] = len(lowerCamelCase_ )
with self.assertRaises(lowerCamelCase_ , msg="""Can only pass one of `num_inference_steps` or `timesteps`.""" ):
scheduler.set_timesteps(num_inference_steps=lowerCamelCase_ , timesteps=lowerCamelCase_ )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : Any = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : int = scheduler_class(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = [scheduler.config.num_train_timesteps]
with self.assertRaises(
lowerCamelCase_ , msg="""`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}""" , ):
scheduler.set_timesteps(timesteps=lowerCamelCase_ )
| 323
| 0
|
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
A_ : Union[str, Any] = (DPMSolverSinglestepScheduler,)
A_ : Union[str, Any] = (('num_inference_steps', 25),)
def a (self : Dict , **a__ : Tuple ):
"""simple docstring"""
__snake_case = {
'''num_train_timesteps''': 1000,
'''beta_start''': 0.0_0_0_1,
'''beta_end''': 0.0_2,
'''beta_schedule''': '''linear''',
'''solver_order''': 2,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
'''sample_max_value''': 1.0,
'''algorithm_type''': '''dpmsolver++''',
'''solver_type''': '''midpoint''',
'''lambda_min_clipped''': -float('''inf''' ),
'''variance_type''': None,
}
config.update(**a__ )
return config
def a (self : str , a__ : Any=0 , **a__ : Tuple ):
"""simple docstring"""
__snake_case = dict(self.forward_default_kwargs )
__snake_case = kwargs.pop('''num_inference_steps''' , a__ )
__snake_case = self.dummy_sample
__snake_case = 0.1 * sample
__snake_case = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
__snake_case = self.get_scheduler_config(**a__ )
__snake_case = scheduler_class(**a__ )
scheduler.set_timesteps(a__ )
# copy over dummy past residuals
__snake_case = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(a__ )
__snake_case = scheduler_class.from_pretrained(a__ )
new_scheduler.set_timesteps(a__ )
# copy over dummy past residuals
__snake_case = dummy_past_residuals[: new_scheduler.config.solver_order]
__snake_case , __snake_case = sample, sample
for t in range(a__ , time_step + scheduler.config.solver_order + 1 ):
__snake_case = scheduler.step(a__ , a__ , a__ , **a__ ).prev_sample
__snake_case = new_scheduler.step(a__ , a__ , a__ , **a__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def a (self : Union[str, Any] ):
"""simple docstring"""
pass
def a (self : List[Any] , a__ : Dict=0 , **a__ : List[str] ):
"""simple docstring"""
__snake_case = dict(self.forward_default_kwargs )
__snake_case = kwargs.pop('''num_inference_steps''' , a__ )
__snake_case = self.dummy_sample
__snake_case = 0.1 * sample
__snake_case = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
__snake_case = self.get_scheduler_config()
__snake_case = scheduler_class(**a__ )
scheduler.set_timesteps(a__ )
# copy over dummy past residuals (must be after setting timesteps)
__snake_case = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(a__ )
__snake_case = scheduler_class.from_pretrained(a__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(a__ )
# copy over dummy past residual (must be after setting timesteps)
__snake_case = dummy_past_residuals[: new_scheduler.config.solver_order]
__snake_case = scheduler.step(a__ , a__ , a__ , **a__ ).prev_sample
__snake_case = new_scheduler.step(a__ , a__ , a__ , **a__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def a (self : int , a__ : Tuple=None , **a__ : List[str] ):
"""simple docstring"""
if scheduler is None:
__snake_case = self.scheduler_classes[0]
__snake_case = self.get_scheduler_config(**a__ )
__snake_case = scheduler_class(**a__ )
__snake_case = self.scheduler_classes[0]
__snake_case = self.get_scheduler_config(**a__ )
__snake_case = scheduler_class(**a__ )
__snake_case = 10
__snake_case = self.dummy_model()
__snake_case = self.dummy_sample_deter
scheduler.set_timesteps(a__ )
for i, t in enumerate(scheduler.timesteps ):
__snake_case = model(a__ , a__ )
__snake_case = scheduler.step(a__ , a__ , a__ ).prev_sample
return sample
def a (self : str ):
"""simple docstring"""
__snake_case = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
__snake_case = 50
__snake_case = self.dummy_model()
__snake_case = self.dummy_sample_deter
scheduler.set_timesteps(a__ )
# make sure that the first t is uneven
for i, t in enumerate(scheduler.timesteps[3:] ):
__snake_case = model(a__ , a__ )
__snake_case = scheduler.step(a__ , a__ , a__ ).prev_sample
__snake_case = torch.mean(torch.abs(a__ ) )
assert abs(result_mean.item() - 0.2_5_7_4 ) < 1E-3
def a (self : int ):
"""simple docstring"""
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=a__ )
def a (self : List[str] ):
"""simple docstring"""
__snake_case = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
__snake_case = self.full_loop(scheduler=a__ )
__snake_case = torch.mean(torch.abs(a__ ) )
assert abs(result_mean.item() - 0.2_7_9_1 ) < 1E-3
__snake_case = DEISMultistepScheduler.from_config(scheduler.config )
__snake_case = DPMSolverMultistepScheduler.from_config(scheduler.config )
__snake_case = UniPCMultistepScheduler.from_config(scheduler.config )
__snake_case = DPMSolverSinglestepScheduler.from_config(scheduler.config )
__snake_case = self.full_loop(scheduler=a__ )
__snake_case = torch.mean(torch.abs(a__ ) )
assert abs(result_mean.item() - 0.2_7_9_1 ) < 1E-3
def a (self : List[str] ):
"""simple docstring"""
self.check_over_configs(thresholding=a__ )
for order in [1, 2, 3]:
for solver_type in ["midpoint", "heun"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=a__ , prediction_type=a__ , sample_max_value=a__ , algorithm_type='''dpmsolver++''' , solver_order=a__ , solver_type=a__ , )
def a (self : Union[str, Any] ):
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=a__ )
def a (self : Union[str, Any] ):
"""simple docstring"""
for algorithm_type in ["dpmsolver", "dpmsolver++"]:
for solver_type in ["midpoint", "heun"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=a__ , solver_type=a__ , prediction_type=a__ , algorithm_type=a__ , )
__snake_case = self.full_loop(
solver_order=a__ , solver_type=a__ , prediction_type=a__ , algorithm_type=a__ , )
assert not torch.isnan(a__ ).any(), "Samples have nan numbers"
def a (self : List[str] ):
"""simple docstring"""
self.check_over_configs(lower_order_final=a__ )
self.check_over_configs(lower_order_final=a__ )
def a (self : Optional[Any] ):
"""simple docstring"""
self.check_over_configs(lambda_min_clipped=-float('''inf''' ) )
self.check_over_configs(lambda_min_clipped=-5.1 )
def a (self : Tuple ):
"""simple docstring"""
self.check_over_configs(variance_type=a__ )
self.check_over_configs(variance_type='''learned_range''' )
def a (self : int ):
"""simple docstring"""
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=a__ , time_step=0 )
def a (self : Optional[Any] ):
"""simple docstring"""
__snake_case = self.full_loop()
__snake_case = torch.mean(torch.abs(a__ ) )
assert abs(result_mean.item() - 0.2_7_9_1 ) < 1E-3
def a (self : int ):
"""simple docstring"""
__snake_case = self.full_loop(use_karras_sigmas=a__ )
__snake_case = torch.mean(torch.abs(a__ ) )
assert abs(result_mean.item() - 0.2_2_4_8 ) < 1E-3
def a (self : Tuple ):
"""simple docstring"""
__snake_case = self.full_loop(prediction_type='''v_prediction''' )
__snake_case = torch.mean(torch.abs(a__ ) )
assert abs(result_mean.item() - 0.1_4_5_3 ) < 1E-3
def a (self : List[Any] ):
"""simple docstring"""
__snake_case = self.full_loop(prediction_type='''v_prediction''' , use_karras_sigmas=a__ )
__snake_case = torch.mean(torch.abs(a__ ) )
assert abs(result_mean.item() - 0.0_6_4_9 ) < 1E-3
def a (self : int ):
"""simple docstring"""
__snake_case = self.scheduler_classes[0]
__snake_case = self.get_scheduler_config(thresholding=a__ , dynamic_thresholding_ratio=0 )
__snake_case = scheduler_class(**a__ )
__snake_case = 10
__snake_case = self.dummy_model()
__snake_case = self.dummy_sample_deter.half()
scheduler.set_timesteps(a__ )
for i, t in enumerate(scheduler.timesteps ):
__snake_case = model(a__ , a__ )
__snake_case = scheduler.step(a__ , a__ , a__ ).prev_sample
assert sample.dtype == torch.floataa
| 238
|
# Algorithm for the pigeonhole sorting
def lowerCamelCase__ ( snake_case_ : int ) -> Optional[int]:
__snake_case = min(snake_case_ ) # min() finds the minimum value
__snake_case = max(snake_case_ ) # max() finds the maximum value
__snake_case = max_val - min_val + 1 # size is difference of max and min values plus one
# list of pigeonholes of size equal to the variable size
__snake_case = [0] * size
# Populate the pigeonholes.
for x in a:
assert isinstance(snake_case_ , snake_case_ ), "integers only please"
holes[x - min_val] += 1
# Putting the elements back into the array in an order.
__snake_case = 0
for count in range(snake_case_ ):
while holes[count] > 0:
holes[count] -= 1
__snake_case = count + min_val
i += 1
def lowerCamelCase__ ( ) -> Union[str, Any]:
__snake_case = [8, 3, 2, 7, 4, 6, 8]
pigeonhole_sort(snake_case_ )
print('''Sorted order is:''' , ''' '''.join(snake_case_ ) )
if __name__ == "__main__":
main()
| 238
| 1
|
'''simple docstring'''
def UpperCamelCase ( _lowerCamelCase : bytes ):
return "".join([hex(_lowerCamelCase )[2:].zfill(2 ).upper() for byte in list(_lowerCamelCase )] )
def UpperCamelCase ( _lowerCamelCase : str ):
# Check data validity, following RFC3548
# https://www.ietf.org/rfc/rfc3548.txt
if (len(_lowerCamelCase ) % 2) != 0:
raise ValueError(
"Base16 encoded data is invalid:\nData does not have an even number of hex digits." )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(_lowerCamelCase ) <= set("0123456789ABCDEF" ):
raise ValueError(
"Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters." )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(_lowerCamelCase ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 237
|
'''simple docstring'''
import functools
def UpperCamelCase ( _lowerCamelCase : str , _lowerCamelCase : str ):
A__ = len(_lowerCamelCase )
A__ = len(_lowerCamelCase )
@functools.cache
def min_distance(_lowerCamelCase : int , _lowerCamelCase : int ) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
A__ = int(worda[indexa] != worda[indexa] ) # current letters not identical
return min(
1 + min_distance(indexa + 1 , _lowerCamelCase ) , 1 + min_distance(_lowerCamelCase , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , )
return min_distance(0 , 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 237
| 1
|
'''simple docstring'''
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ ) -> list[list]:
'''simple docstring'''
snake_case : Any = current_set.copy()
for row_index, row in enumerate(SCREAMING_SNAKE_CASE__ ):
snake_case : Optional[int] = row[0]
for column_index, column in enumerate(SCREAMING_SNAKE_CASE__ ):
if magnitude == 0:
snake_case : Any = column
continue
snake_case : str = column / magnitude
# Subtract to cancel term
snake_case : Any = current_set[0]
snake_case : List[Any] = [first_row]
snake_case : str = current_set[1::]
for row in current_set:
snake_case : Tuple = []
# If first term is 0, it is already in form we want, so we preserve it
if row[0] == 0:
final_set.append(SCREAMING_SNAKE_CASE__ )
continue
for column_index in range(len(SCREAMING_SNAKE_CASE__ ) ):
temp_row.append(first_row[column_index] - row[column_index] )
final_set.append(SCREAMING_SNAKE_CASE__ )
# Create next recursion iteration set
if len(final_set[0] ) != 3:
snake_case : str = final_set[0]
snake_case : Optional[Any] = []
snake_case : Union[str, Any] = []
for row in final_set[1::]:
current_first_column.append(row[0] )
next_iteration.append(row[1::] )
snake_case : List[Any] = simplify(SCREAMING_SNAKE_CASE__ )
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
resultant[i].insert(0 , current_first_column[i] )
resultant.insert(0 , SCREAMING_SNAKE_CASE__ )
snake_case : str = resultant
return final_set
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ ) -> list:
'''simple docstring'''
if len(SCREAMING_SNAKE_CASE__ ) == 0:
raise IndexError('''solve_simultaneous() requires n lists of length n+1''' )
snake_case : List[str] = len(SCREAMING_SNAKE_CASE__ ) + 1
if any(len(SCREAMING_SNAKE_CASE__ ) != _length for item in equations ):
raise IndexError('''solve_simultaneous() requires n lists of length n+1''' )
for row in equations:
if any(not isinstance(SCREAMING_SNAKE_CASE__ , (int, float) ) for column in row ):
raise ValueError('''solve_simultaneous() requires lists of integers''' )
if len(SCREAMING_SNAKE_CASE__ ) == 1:
return [equations[0][-1] / equations[0][0]]
snake_case : int = equations.copy()
if any(0 in row for row in data_set ):
snake_case : Optional[int] = data_set.copy()
snake_case : Dict = []
for row_index, row in enumerate(SCREAMING_SNAKE_CASE__ ):
if 0 not in row:
snake_case : Union[str, Any] = data_set.pop(SCREAMING_SNAKE_CASE__ )
break
if not full_row:
raise ValueError('''solve_simultaneous() requires at least 1 full equation''' )
data_set.insert(0 , SCREAMING_SNAKE_CASE__ )
snake_case : Optional[int] = data_set.copy()
snake_case : List[Any] = simplify(SCREAMING_SNAKE_CASE__ )
snake_case : Optional[Any] = simplified[::-1]
snake_case : list = []
for row in simplified:
snake_case : Tuple = row[-1]
if not solutions:
if row[-2] == 0:
solutions.append(0 )
continue
solutions.append(current_solution / row[-2] )
continue
snake_case : str = row.copy()[: len(SCREAMING_SNAKE_CASE__ ) - 1 :]
while temp_row[0] == 0:
temp_row.pop(0 )
if len(SCREAMING_SNAKE_CASE__ ) == 0:
solutions.append(0 )
continue
snake_case : int = temp_row[1::]
snake_case : Dict = temp_row[::-1]
for column_index, column in enumerate(SCREAMING_SNAKE_CASE__ ):
current_solution -= column * solutions[column_index]
solutions.append(SCREAMING_SNAKE_CASE__ )
snake_case : Optional[Any] = []
for item in solutions:
final.append(float(round(SCREAMING_SNAKE_CASE__ , 5 ) ) )
return final[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase__ = [
[2, 1, 1, 1, 1, 4],
[1, 2, 1, 1, 1, 5],
[1, 1, 2, 1, 1, 6],
[1, 1, 1, 2, 1, 7],
[1, 1, 1, 1, 2, 8],
]
print(solve_simultaneous(eq))
print(solve_simultaneous([[4, 2]]))
| 351
|
'''simple docstring'''
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class snake_case__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = DebertaTokenizer
lowerCamelCase = True
lowerCamelCase = DebertaTokenizerFast
def lowerCAmelCase ( self : List[str] ) -> str:
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
snake_case : int = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''[UNK]''',
]
snake_case : Optional[int] = dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__ ) ) ) )
snake_case : Tuple = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
snake_case : List[Any] = {'''unk_token''': '''[UNK]'''}
snake_case : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
snake_case : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(UpperCamelCase__ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(UpperCamelCase__ ) )
def lowerCAmelCase ( self : Union[str, Any] , **UpperCamelCase__ : Any ) -> Dict:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def lowerCAmelCase ( self : Any , UpperCamelCase__ : Optional[int] ) -> Optional[int]:
"""simple docstring"""
snake_case : Tuple = '''lower newer'''
snake_case : Optional[Any] = '''lower newer'''
return input_text, output_text
def lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
snake_case : Dict = self.get_tokenizer()
snake_case : Optional[Any] = '''lower newer'''
snake_case : Tuple = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
snake_case : Optional[Any] = tokenizer.tokenize(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
snake_case : Union[str, Any] = tokens + [tokenizer.unk_token]
snake_case : List[str] = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , UpperCamelCase__ )
def lowerCAmelCase ( self : Optional[Any] ) -> str:
"""simple docstring"""
snake_case : int = self.get_tokenizer()
snake_case : Optional[int] = tokenizer('''Hello''' , '''World''' )
snake_case : Optional[Any] = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd['''token_type_ids'''] , UpperCamelCase__ )
@slow
def lowerCAmelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
snake_case : Optional[int] = self.tokenizer_class.from_pretrained('''microsoft/deberta-base''' )
snake_case : Tuple = tokenizer.encode('''sequence builders''' , add_special_tokens=UpperCamelCase__ )
snake_case : List[Any] = tokenizer.encode('''multi-sequence build''' , add_special_tokens=UpperCamelCase__ )
snake_case : Dict = tokenizer.encode(
'''sequence builders''' , add_special_tokens=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ )
snake_case : Optional[int] = tokenizer.encode(
'''sequence builders''' , '''multi-sequence build''' , add_special_tokens=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ )
snake_case : List[Any] = tokenizer.build_inputs_with_special_tokens(UpperCamelCase__ )
snake_case : int = tokenizer.build_inputs_with_special_tokens(UpperCamelCase__ , UpperCamelCase__ )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def lowerCAmelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
snake_case : Dict = [self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class )
for tokenizer_class in tokenizer_classes:
snake_case : Any = tokenizer_class.from_pretrained('''microsoft/deberta-base''' )
snake_case : Optional[Any] = [
'''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''',
'''ALBERT incorporates two parameter reduction techniques''',
'''The first one is a factorized embedding parameterization. By decomposing the large vocabulary'''
''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'''
''' vocabulary embedding.''',
]
snake_case : Optional[Any] = tokenizer(UpperCamelCase__ , padding=UpperCamelCase__ )
snake_case : List[str] = [tokenizer.decode(UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ ) for seq in encoding['''input_ids''']]
# fmt: off
snake_case : Optional[int] = {
'''input_ids''': [
[1, 2118, 1_1126, 565, 35, 83, 2_5191, 163, 1_8854, 13, 1_2156, 12, 1_6101, 2_5376, 1_3807, 9, 2_2205, 2_7893, 1635, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 2118, 1_1126, 565, 2_4536, 80, 4_3797, 4878, 7373, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 133, 78, 65, 16, 10, 3724, 1538, 3_3183, 1_1303, 4_3797, 1938, 4, 870, 2_4165, 2_9105, 5, 739, 3_2644, 3_3183, 1_1303, 3_6173, 88, 80, 650, 7821, 4_5940, 6, 52, 2559, 5, 1836, 9, 5, 7397, 1_3171, 31, 5, 1836, 9, 3_2644, 3_3183, 1_1303, 4, 2]
],
'''token_type_ids''': [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
'''attention_mask''': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
snake_case : Any = [
'''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''',
'''ALBERT incorporates two parameter reduction techniques''',
'''The first one is a factorized embedding parameterization. By decomposing the large vocabulary'''
''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'''
''' vocabulary embedding.''',
]
self.assertDictEqual(encoding.data , UpperCamelCase__ )
for expected, decoded in zip(UpperCamelCase__ , UpperCamelCase__ ):
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
| 83
| 0
|
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase__ = logging.get_logger(__name__)
def A ( _UpperCAmelCase : str , _UpperCAmelCase : Union[str, Any]=False ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"blocks.{i}.norm1.weight", F"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((F"blocks.{i}.norm1.bias", F"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append((F"blocks.{i}.attn.proj.weight", F"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((F"blocks.{i}.attn.proj.bias", F"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((F"blocks.{i}.norm2.weight", F"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((F"blocks.{i}.norm2.bias", F"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((F"blocks.{i}.mlp.fc1.weight", F"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((F"blocks.{i}.mlp.fc1.bias", F"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((F"blocks.{i}.mlp.fc2.weight", F"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((F"blocks.{i}.mlp.fc2.bias", F"vit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
('cls_token', 'vit.embeddings.cls_token'),
('patch_embed.proj.weight', 'vit.embeddings.patch_embeddings.projection.weight'),
('patch_embed.proj.bias', 'vit.embeddings.patch_embeddings.projection.bias'),
('pos_embed', 'vit.embeddings.position_embeddings'),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
('pre_logits.fc.weight', 'pooler.dense.weight'),
('pre_logits.fc.bias', 'pooler.dense.bias'),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
_UpperCAmelCase = [(pair[0], pair[1][4:]) if pair[1].startswith('vit' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('norm.weight', 'vit.layernorm.weight'),
('norm.bias', 'vit.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
return rename_keys
def A ( _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Tuple=False ) -> str:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
_UpperCAmelCase = ''
else:
_UpperCAmelCase = 'vit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_UpperCAmelCase = state_dict.pop(F"blocks.{i}.attn.qkv.weight" )
_UpperCAmelCase = state_dict.pop(F"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
_UpperCAmelCase = in_proj_weight[
: config.hidden_size, :
]
_UpperCAmelCase = in_proj_bias[: config.hidden_size]
_UpperCAmelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_UpperCAmelCase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_UpperCAmelCase = in_proj_weight[
-config.hidden_size :, :
]
_UpperCAmelCase = in_proj_bias[-config.hidden_size :]
def A ( _UpperCAmelCase : List[str] ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase = ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(__lowerCamelCase , __lowerCamelCase )
def A ( _UpperCAmelCase : Tuple , _UpperCAmelCase : Dict , _UpperCAmelCase : str ) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase = dct.pop(__lowerCamelCase )
_UpperCAmelCase = val
def A ( ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_UpperCAmelCase = Image.open(requests.get(__lowerCamelCase , stream=__lowerCamelCase ).raw )
return im
@torch.no_grad()
def A ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = ViTConfig()
_UpperCAmelCase = False
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
if vit_name[-5:] == "in21k":
_UpperCAmelCase = True
_UpperCAmelCase = int(vit_name[-12:-10] )
_UpperCAmelCase = int(vit_name[-9:-6] )
else:
_UpperCAmelCase = 1_000
_UpperCAmelCase = 'huggingface/label-files'
_UpperCAmelCase = 'imagenet-1k-id2label.json'
_UpperCAmelCase = json.load(open(hf_hub_download(__lowerCamelCase , __lowerCamelCase , repo_type='dataset' ) , 'r' ) )
_UpperCAmelCase = {int(__lowerCamelCase ): v for k, v in idalabel.items()}
_UpperCAmelCase = idalabel
_UpperCAmelCase = {v: k for k, v in idalabel.items()}
_UpperCAmelCase = int(vit_name[-6:-4] )
_UpperCAmelCase = int(vit_name[-3:] )
# size of the architecture
if "deit" in vit_name:
if vit_name[9:].startswith('tiny' ):
_UpperCAmelCase = 192
_UpperCAmelCase = 768
_UpperCAmelCase = 12
_UpperCAmelCase = 3
elif vit_name[9:].startswith('small' ):
_UpperCAmelCase = 384
_UpperCAmelCase = 1_536
_UpperCAmelCase = 12
_UpperCAmelCase = 6
else:
pass
else:
if vit_name[4:].startswith('small' ):
_UpperCAmelCase = 768
_UpperCAmelCase = 2_304
_UpperCAmelCase = 8
_UpperCAmelCase = 8
elif vit_name[4:].startswith('base' ):
pass
elif vit_name[4:].startswith('large' ):
_UpperCAmelCase = 1_024
_UpperCAmelCase = 4_096
_UpperCAmelCase = 24
_UpperCAmelCase = 16
elif vit_name[4:].startswith('huge' ):
_UpperCAmelCase = 1_280
_UpperCAmelCase = 5_120
_UpperCAmelCase = 32
_UpperCAmelCase = 16
# load original model from timm
_UpperCAmelCase = timm.create_model(__lowerCamelCase , pretrained=__lowerCamelCase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
_UpperCAmelCase = timm_model.state_dict()
if base_model:
remove_classification_head_(__lowerCamelCase )
_UpperCAmelCase = create_rename_keys(__lowerCamelCase , __lowerCamelCase )
for src, dest in rename_keys:
rename_key(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
read_in_q_k_v(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# load HuggingFace model
if vit_name[-5:] == "in21k":
_UpperCAmelCase = ViTModel(__lowerCamelCase ).eval()
else:
_UpperCAmelCase = ViTForImageClassification(__lowerCamelCase ).eval()
model.load_state_dict(__lowerCamelCase )
# Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor
if "deit" in vit_name:
_UpperCAmelCase = DeiTImageProcessor(size=config.image_size )
else:
_UpperCAmelCase = ViTImageProcessor(size=config.image_size )
_UpperCAmelCase = image_processor(images=prepare_img() , return_tensors='pt' )
_UpperCAmelCase = encoding['pixel_values']
_UpperCAmelCase = model(__lowerCamelCase )
if base_model:
_UpperCAmelCase = timm_model.forward_features(__lowerCamelCase )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(__lowerCamelCase , outputs.pooler_output , atol=1E-3 )
else:
_UpperCAmelCase = timm_model(__lowerCamelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__lowerCamelCase , outputs.logits , atol=1E-3 )
Path(__lowerCamelCase ).mkdir(exist_ok=__lowerCamelCase )
print(F"Saving model {vit_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(__lowerCamelCase )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--vit_name",
default="vit_base_patch16_224",
type=str,
help="Name of the ViT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
UpperCAmelCase__ = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
| 339
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"""facebook/data2vec-text-base""": """https://huggingface.co/data2vec/resolve/main/config.json""",
}
class a_ ( snake_case_ ):
'''simple docstring'''
UpperCamelCase = '''data2vec-text'''
def __init__( self , A=3_0522 , A=768 , A=12 , A=12 , A=3072 , A="gelu" , A=0.1 , A=0.1 , A=512 , A=2 , A=0.02 , A=1e-12 , A=1 , A=0 , A=2 , A="absolute" , A=True , A=None , **A , ) -> int:
super().__init__(pad_token_id=A , bos_token_id=A , eos_token_id=A , **A )
_SCREAMING_SNAKE_CASE = vocab_size
_SCREAMING_SNAKE_CASE = hidden_size
_SCREAMING_SNAKE_CASE = num_hidden_layers
_SCREAMING_SNAKE_CASE = num_attention_heads
_SCREAMING_SNAKE_CASE = hidden_act
_SCREAMING_SNAKE_CASE = intermediate_size
_SCREAMING_SNAKE_CASE = hidden_dropout_prob
_SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE = max_position_embeddings
_SCREAMING_SNAKE_CASE = type_vocab_size
_SCREAMING_SNAKE_CASE = initializer_range
_SCREAMING_SNAKE_CASE = layer_norm_eps
_SCREAMING_SNAKE_CASE = position_embedding_type
_SCREAMING_SNAKE_CASE = use_cache
_SCREAMING_SNAKE_CASE = classifier_dropout
class a_ ( snake_case_ ):
'''simple docstring'''
@property
def snake_case_( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_SCREAMING_SNAKE_CASE = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
_SCREAMING_SNAKE_CASE = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 58
| 0
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor, ResNetConfig
from transformers.utils import logging
logging.set_verbosity_info()
_lowercase : Optional[Any] = logging.get_logger(__name__)
def lowerCamelCase ( UpperCAmelCase__ : Union[str, Any] ) -> Tuple:
# initialize config
if "resnet-50" in model_name:
lowercase_ : Tuple = ResNetConfig.from_pretrained("""microsoft/resnet-50""" )
elif "resnet-101" in model_name:
lowercase_ : str = ResNetConfig.from_pretrained("""microsoft/resnet-101""" )
else:
raise ValueError("""Model name should include either resnet50 or resnet101""" )
lowercase_ : Optional[int] = DetrConfig(use_timm_backbone=UpperCAmelCase__ , backbone_config=UpperCAmelCase__ )
# set label attributes
lowercase_ : Dict = """panoptic""" in model_name
if is_panoptic:
lowercase_ : Optional[int] = 250
else:
lowercase_ : Optional[Any] = 91
lowercase_ : Dict = """huggingface/label-files"""
lowercase_ : Tuple = """coco-detection-id2label.json"""
lowercase_ : str = json.load(open(hf_hub_download(UpperCAmelCase__ , UpperCAmelCase__ , repo_type="""dataset""" ) , """r""" ) )
lowercase_ : List[str] = {int(UpperCAmelCase__ ): v for k, v in idalabel.items()}
lowercase_ : List[str] = idalabel
lowercase_ : Dict = {v: k for k, v in idalabel.items()}
return config, is_panoptic
def lowerCamelCase ( UpperCAmelCase__ : int ) -> Dict:
# here we list all keys to be renamed (original name on the left, our name on the right)
lowercase_ : List[Any] = []
# stem
# fmt: off
rename_keys.append(("""backbone.0.body.conv1.weight""", """backbone.conv_encoder.model.embedder.embedder.convolution.weight""") )
rename_keys.append(("""backbone.0.body.bn1.weight""", """backbone.conv_encoder.model.embedder.embedder.normalization.weight""") )
rename_keys.append(("""backbone.0.body.bn1.bias""", """backbone.conv_encoder.model.embedder.embedder.normalization.bias""") )
rename_keys.append(("""backbone.0.body.bn1.running_mean""", """backbone.conv_encoder.model.embedder.embedder.normalization.running_mean""") )
rename_keys.append(("""backbone.0.body.bn1.running_var""", """backbone.conv_encoder.model.embedder.embedder.normalization.running_var""") )
# stages
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
# shortcut
if layer_idx == 0:
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var''',
) )
# 3 convs
for i in range(3 ):
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i+1}.weight''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.weight''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.bias''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_mean''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_var''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var''',
) )
# fmt: on
for i in range(config.encoder_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(
F'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''',
F'''encoder.layers.{i}.self_attn.out_proj.weight''',
) )
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', F'''encoder.layers.{i}.self_attn.out_proj.bias''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.weight''', F'''encoder.layers.{i}.fc1.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.bias''', F'''encoder.layers.{i}.fc1.bias''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.weight''', F'''encoder.layers.{i}.fc2.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.bias''', F'''encoder.layers.{i}.fc2.bias''') )
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm1.weight''', F'''encoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm1.bias''', F'''encoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm2.weight''', F'''encoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.bias''', F'''encoder.layers.{i}.final_layer_norm.bias''') )
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''',
F'''decoder.layers.{i}.self_attn.out_proj.weight''',
) )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', F'''decoder.layers.{i}.self_attn.out_proj.bias''') )
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.multihead_attn.out_proj.weight''',
F'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
) )
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.multihead_attn.out_proj.bias''',
F'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
) )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.weight''', F'''decoder.layers.{i}.fc1.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.bias''', F'''decoder.layers.{i}.fc1.bias''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.weight''', F'''decoder.layers.{i}.fc2.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.bias''', F'''decoder.layers.{i}.fc2.bias''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm1.weight''', F'''decoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm1.bias''', F'''decoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.weight''', F'''decoder.layers.{i}.encoder_attn_layer_norm.weight''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.bias''', F'''decoder.layers.{i}.encoder_attn_layer_norm.bias''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm3.weight''', F'''decoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.bias''', F'''decoder.layers.{i}.final_layer_norm.bias''') )
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
("""input_proj.weight""", """input_projection.weight"""),
("""input_proj.bias""", """input_projection.bias"""),
("""query_embed.weight""", """query_position_embeddings.weight"""),
("""transformer.decoder.norm.weight""", """decoder.layernorm.weight"""),
("""transformer.decoder.norm.bias""", """decoder.layernorm.bias"""),
("""class_embed.weight""", """class_labels_classifier.weight"""),
("""class_embed.bias""", """class_labels_classifier.bias"""),
("""bbox_embed.layers.0.weight""", """bbox_predictor.layers.0.weight"""),
("""bbox_embed.layers.0.bias""", """bbox_predictor.layers.0.bias"""),
("""bbox_embed.layers.1.weight""", """bbox_predictor.layers.1.weight"""),
("""bbox_embed.layers.1.bias""", """bbox_predictor.layers.1.bias"""),
("""bbox_embed.layers.2.weight""", """bbox_predictor.layers.2.weight"""),
("""bbox_embed.layers.2.bias""", """bbox_predictor.layers.2.bias"""),
] )
return rename_keys
def lowerCamelCase ( UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : int ) -> str:
lowercase_ : Dict = state_dict.pop(UpperCAmelCase__ )
lowercase_ : List[Any] = val
def lowerCamelCase ( UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[Any]=False ) -> Dict:
lowercase_ : Union[str, Any] = """"""
if is_panoptic:
lowercase_ : Any = """detr."""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
lowercase_ : Any = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' )
lowercase_ : Tuple = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
lowercase_ : Any = in_proj_weight[:256, :]
lowercase_ : Union[str, Any] = in_proj_bias[:256]
lowercase_ : str = in_proj_weight[256:512, :]
lowercase_ : str = in_proj_bias[256:512]
lowercase_ : Any = in_proj_weight[-256:, :]
lowercase_ : Optional[Any] = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
lowercase_ : str = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight''' )
lowercase_ : str = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
lowercase_ : Tuple = in_proj_weight[:256, :]
lowercase_ : List[Any] = in_proj_bias[:256]
lowercase_ : str = in_proj_weight[256:512, :]
lowercase_ : str = in_proj_bias[256:512]
lowercase_ : List[Any] = in_proj_weight[-256:, :]
lowercase_ : Optional[Any] = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
lowercase_ : Union[str, Any] = state_dict.pop(
F'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight''' )
lowercase_ : Dict = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) of cross-attention to the state dict
lowercase_ : int = in_proj_weight_cross_attn[:256, :]
lowercase_ : Union[str, Any] = in_proj_bias_cross_attn[:256]
lowercase_ : Optional[Any] = in_proj_weight_cross_attn[256:512, :]
lowercase_ : Optional[Any] = in_proj_bias_cross_attn[256:512]
lowercase_ : str = in_proj_weight_cross_attn[-256:, :]
lowercase_ : Union[str, Any] = in_proj_bias_cross_attn[-256:]
def lowerCamelCase ( ) -> List[str]:
lowercase_ : int = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowercase_ : Dict = Image.open(requests.get(UpperCAmelCase__ , stream=UpperCAmelCase__ ).raw )
return im
@torch.no_grad()
def lowerCamelCase ( UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : str=False ) -> Dict:
lowercase_ : str = get_detr_config(UpperCAmelCase__ )
# load original model from torch hub
lowercase_ : Optional[int] = {
"""detr-resnet-50""": """detr_resnet50""",
"""detr-resnet-101""": """detr_resnet101""",
}
logger.info(F'''Converting model {model_name}...''' )
lowercase_ : Dict = torch.hub.load("""facebookresearch/detr""" , model_name_to_original_name[model_name] , pretrained=UpperCAmelCase__ ).eval()
lowercase_ : List[str] = detr.state_dict()
# rename keys
for src, dest in create_rename_keys(UpperCAmelCase__ ):
if is_panoptic:
lowercase_ : Optional[int] = """detr.""" + src
rename_key(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# query, key and value matrices need special treatment
read_in_q_k_v(UpperCAmelCase__ , is_panoptic=UpperCAmelCase__ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
lowercase_ : List[str] = """detr.model.""" if is_panoptic else """model."""
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("""detr""" )
and not key.startswith("""class_labels_classifier""" )
and not key.startswith("""bbox_predictor""" )
):
lowercase_ : List[Any] = state_dict.pop(UpperCAmelCase__ )
lowercase_ : Dict = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
lowercase_ : Tuple = state_dict.pop(UpperCAmelCase__ )
lowercase_ : List[str] = val
elif key.startswith("""bbox_attention""" ) or key.startswith("""mask_head""" ):
continue
else:
lowercase_ : int = state_dict.pop(UpperCAmelCase__ )
lowercase_ : Any = val
else:
if not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ):
lowercase_ : List[Any] = state_dict.pop(UpperCAmelCase__ )
lowercase_ : Dict = val
# finally, create HuggingFace model and load state dict
lowercase_ : Optional[int] = DetrForSegmentation(UpperCAmelCase__ ) if is_panoptic else DetrForObjectDetection(UpperCAmelCase__ )
model.load_state_dict(UpperCAmelCase__ )
model.eval()
# verify our conversion on an image
lowercase_ : Tuple = """coco_panoptic""" if is_panoptic else """coco_detection"""
lowercase_ : Optional[int] = DetrImageProcessor(format=UpperCAmelCase__ )
lowercase_ : Dict = processor(images=prepare_img() , return_tensors="""pt""" )
lowercase_ : str = encoding["""pixel_values"""]
lowercase_ : Optional[int] = detr(UpperCAmelCase__ )
lowercase_ : List[str] = model(UpperCAmelCase__ )
assert torch.allclose(outputs.logits , original_outputs["""pred_logits"""] , atol=1e-3 )
assert torch.allclose(outputs.pred_boxes , original_outputs["""pred_boxes"""] , atol=1e-3 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["""pred_masks"""] , atol=1e-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(UpperCAmelCase__ ).mkdir(exist_ok=UpperCAmelCase__ )
model.save_pretrained(UpperCAmelCase__ )
processor.save_pretrained(UpperCAmelCase__ )
if push_to_hub:
# Upload model and image processor to the hub
logger.info("""Uploading PyTorch model and image processor to the hub...""" )
model.push_to_hub(F'''nielsr/{model_name}''' )
processor.push_to_hub(F'''nielsr/{model_name}''' )
if __name__ == "__main__":
_lowercase : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
"--model_name",
default="detr-resnet-50",
type=str,
choices=["detr-resnet-50", "detr-resnet-101"],
help="Name of the DETR model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
parser.add_argument("--push_to_hub", action="store_true", help="Whether to push the model to the hub or not.")
_lowercase : List[Any] = parser.parse_args()
convert_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 352
|
'''simple docstring'''
import colorsys
from PIL import Image # type: ignore
def lowerCamelCase ( UpperCAmelCase__ : float , UpperCAmelCase__ : float , UpperCAmelCase__ : int ) -> float:
lowercase_ : List[Any] = x
lowercase_ : Any = y
for step in range(UpperCAmelCase__ ): # noqa: B007
lowercase_ : Dict = a * a - b * b + x
lowercase_ : str = 2 * a * b + y
lowercase_ : Optional[Any] = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def lowerCamelCase ( UpperCAmelCase__ : float ) -> tuple:
if distance == 1:
return (0, 0, 0)
else:
return (255, 255, 255)
def lowerCamelCase ( UpperCAmelCase__ : float ) -> tuple:
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 255 ) for i in colorsys.hsv_to_rgb(UpperCAmelCase__ , 1 , 1 ) )
def lowerCamelCase ( UpperCAmelCase__ : int = 800 , UpperCAmelCase__ : int = 600 , UpperCAmelCase__ : float = -0.6 , UpperCAmelCase__ : float = 0 , UpperCAmelCase__ : float = 3.2 , UpperCAmelCase__ : int = 50 , UpperCAmelCase__ : bool = True , ) -> Image.Image:
lowercase_ : Union[str, Any] = Image.new("""RGB""" , (image_width, image_height) )
lowercase_ : Tuple = img.load()
# loop through the image-coordinates
for image_x in range(UpperCAmelCase__ ):
for image_y in range(UpperCAmelCase__ ):
# determine the figure-coordinates based on the image-coordinates
lowercase_ : Any = figure_width / image_width * image_height
lowercase_ : Tuple = figure_center_x + (image_x / image_width - 0.5) * figure_width
lowercase_ : Union[str, Any] = figure_center_y + (image_y / image_height - 0.5) * figure_height
lowercase_ : str = get_distance(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
lowercase_ : List[Any] = get_color_coded_rgb(UpperCAmelCase__ )
else:
lowercase_ : Dict = get_black_and_white_rgb(UpperCAmelCase__ )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
_lowercase : List[str] = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 21
| 0
|
"""simple docstring"""
import numpy as np
from transformers import Pipeline
def lowercase__ ( _UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
lowercase : Dict = np.max(UpperCAmelCase_ , axis=-1 , keepdims=UpperCAmelCase_ )
lowercase : List[str] = np.exp(outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=UpperCAmelCase_ )
class a__ ( SCREAMING_SNAKE_CASE__ ):
def lowercase ( self : Optional[Any], **lowerCAmelCase : Union[str, Any] ) -> Dict:
lowercase : Optional[Any] = {}
if "second_text" in kwargs:
lowercase : Any = kwargs['second_text']
return preprocess_kwargs, {}, {}
def lowercase ( self : Tuple, lowerCAmelCase : str, lowerCAmelCase : List[Any]=None ) -> Optional[int]:
return self.tokenizer(_A, text_pair=_A, return_tensors=self.framework )
def lowercase ( self : Tuple, lowerCAmelCase : Optional[Any] ) -> List[Any]:
return self.model(**_A )
def lowercase ( self : Tuple, lowerCAmelCase : List[str] ) -> List[Any]:
lowercase : Any = model_outputs.logits[0].numpy()
lowercase : Dict = softmax(_A )
lowercase : List[Any] = np.argmax(_A )
lowercase : List[Any] = self.model.config.idalabel[best_class]
lowercase : Tuple = probabilities[best_class].item()
lowercase : Optional[int] = logits.tolist()
return {"label": label, "score": score, "logits": logits}
| 255
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a : int= logging.get_logger(__name__)
_a : Optional[Any]= {
"SCUT-DLVCLab/lilt-roberta-en-base": (
"https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json"
),
}
class UpperCamelCase ( lowercase ):
UpperCAmelCase : List[Any] = """lilt"""
def __init__(self : Dict , _A : Any=3_05_22 , _A : Union[str, Any]=7_68 , _A : Any=12 , _A : Tuple=12 , _A : Optional[int]=30_72 , _A : Tuple="gelu" , _A : str=0.1 , _A : List[Any]=0.1 , _A : Union[str, Any]=5_12 , _A : Any=2 , _A : Tuple=0.02 , _A : List[str]=1E-12 , _A : Optional[int]=0 , _A : Optional[Any]="absolute" , _A : Any=None , _A : List[Any]=4 , _A : Optional[int]=10_24 , **_A : Union[str, Any] , ) -> Tuple:
super().__init__(pad_token_id=_A , **_A)
__snake_case : Optional[int] = vocab_size
__snake_case : List[Any] = hidden_size
__snake_case : Any = num_hidden_layers
__snake_case : Optional[int] = num_attention_heads
__snake_case : Optional[int] = hidden_act
__snake_case : List[str] = intermediate_size
__snake_case : Union[str, Any] = hidden_dropout_prob
__snake_case : Dict = attention_probs_dropout_prob
__snake_case : List[Any] = max_position_embeddings
__snake_case : Dict = type_vocab_size
__snake_case : List[Any] = initializer_range
__snake_case : Optional[Any] = layer_norm_eps
__snake_case : Optional[int] = position_embedding_type
__snake_case : Any = classifier_dropout
__snake_case : Optional[int] = channel_shrink_ratio
__snake_case : Tuple = max_ad_position_embeddings
| 172
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__a = {
"configuration_convbert": ["CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ConvBertConfig", "ConvBertOnnxConfig"],
"tokenization_convbert": ["ConvBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ["ConvBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"ConvBertForMaskedLM",
"ConvBertForMultipleChoice",
"ConvBertForQuestionAnswering",
"ConvBertForSequenceClassification",
"ConvBertForTokenClassification",
"ConvBertLayer",
"ConvBertModel",
"ConvBertPreTrainedModel",
"load_tf_weights_in_convbert",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFConvBertForMaskedLM",
"TFConvBertForMultipleChoice",
"TFConvBertForQuestionAnswering",
"TFConvBertForSequenceClassification",
"TFConvBertForTokenClassification",
"TFConvBertLayer",
"TFConvBertModel",
"TFConvBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 351
|
def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->list[int]:
"""simple docstring"""
lowercase : Dict = int(_UpperCamelCase )
# Initialize Result
lowercase : Union[str, Any] = []
# Traverse through all denomination
for denomination in reversed(_UpperCamelCase ):
# Find denominations
while int(_UpperCamelCase ) >= int(_UpperCamelCase ):
total_value -= int(_UpperCamelCase )
answer.append(_UpperCamelCase ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
__a = []
__a = '''0'''
if (
input('''Do you want to enter your denominations ? (yY/n): ''').strip().lower()
== "y"
):
__a = int(input('''Enter the number of denominations you want to add: ''').strip())
for i in range(0, n):
denominations.append(int(input(F'''Denomination {i}: ''').strip()))
__a = input('''Enter the change you want to make in Indian Currency: ''').strip()
else:
# All denominations of Indian Currency if user does not enter
__a = [1, 2, 5, 10, 20, 50, 1_00, 5_00, 20_00]
__a = input('''Enter the change you want to make: ''').strip()
if int(value) == 0 or int(value) < 0:
print('''The total value cannot be zero or negative.''')
else:
print(F'''Following is minimal change for {value}: ''')
__a = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=''' ''')
| 173
| 0
|
"""simple docstring"""
import argparse
import os
import pickle
import sys
import torch
from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl
from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils
from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
# We do this to be able to load python 2 datasets pickles
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
_lowercase : Optional[int] = data_utils.TransfoXLTokenizer
_lowercase : Any = data_utils.TransfoXLCorpus
_lowercase : Optional[int] = data_utils
_lowercase : Optional[int] = data_utils
def snake_case__ ( __lowerCamelCase : str , __lowerCamelCase : Dict , __lowerCamelCase : str , __lowerCamelCase : List[str] ):
"""simple docstring"""
if transfo_xl_dataset_file:
# Convert a pre-processed corpus (see original TensorFlow repo)
with open(__lowerCamelCase , '''rb''' ) as fp:
lowerCamelCase__ : List[str] =pickle.load(__lowerCamelCase , encoding='''latin1''' )
# Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
lowerCamelCase__ : Union[str, Any] =pytorch_dump_folder_path + '''/''' + VOCAB_FILES_NAMES['''pretrained_vocab_file''']
print(f'''Save vocabulary to {pytorch_vocab_dump_path}''' )
lowerCamelCase__ : List[Any] =corpus.vocab.__dict__
torch.save(__lowerCamelCase , __lowerCamelCase )
lowerCamelCase__ : List[Any] =corpus.__dict__
corpus_dict_no_vocab.pop('''vocab''' , __lowerCamelCase )
lowerCamelCase__ : Any =pytorch_dump_folder_path + '''/''' + CORPUS_NAME
print(f'''Save dataset to {pytorch_dataset_dump_path}''' )
torch.save(__lowerCamelCase , __lowerCamelCase )
if tf_checkpoint_path:
# Convert a pre-trained TensorFlow model
lowerCamelCase__ : int =os.path.abspath(__lowerCamelCase )
lowerCamelCase__ : str =os.path.abspath(__lowerCamelCase )
print(f'''Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.''' )
# Initialise PyTorch model
if transfo_xl_config_file == "":
lowerCamelCase__ : List[Any] =TransfoXLConfig()
else:
lowerCamelCase__ : List[Any] =TransfoXLConfig.from_json_file(__lowerCamelCase )
print(f'''Building PyTorch model from configuration: {config}''' )
lowerCamelCase__ : int =TransfoXLLMHeadModel(__lowerCamelCase )
lowerCamelCase__ : int =load_tf_weights_in_transfo_xl(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# Save pytorch-model
lowerCamelCase__ : Any =os.path.join(__lowerCamelCase , __lowerCamelCase )
lowerCamelCase__ : Dict =os.path.join(__lowerCamelCase , __lowerCamelCase )
print(f'''Save PyTorch model to {os.path.abspath(__lowerCamelCase )}''' )
torch.save(model.state_dict() , __lowerCamelCase )
print(f'''Save configuration file to {os.path.abspath(__lowerCamelCase )}''' )
with open(__lowerCamelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
_lowercase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=True,
help="Path to the folder to store the PyTorch model or dataset/vocab.",
)
parser.add_argument(
"--tf_checkpoint_path",
default="",
type=str,
help="An optional path to a TensorFlow checkpoint path to be converted.",
)
parser.add_argument(
"--transfo_xl_config_file",
default="",
type=str,
help=(
"An optional config json file corresponding to the pre-trained BERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--transfo_xl_dataset_file",
default="",
type=str,
help="An optional dataset file to be converted in a vocabulary.",
)
_lowercase : Dict = parser.parse_args()
convert_transfo_xl_checkpoint_to_pytorch(
args.tf_checkpoint_path,
args.transfo_xl_config_file,
args.pytorch_dump_folder_path,
args.transfo_xl_dataset_file,
)
| 238
|
"""simple docstring"""
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import VideoMAEConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEModel,
)
from transformers.models.videomae.modeling_videomae import VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Optional[Any], lowerCamelCase : Any, lowerCamelCase : List[Any]=13, lowerCamelCase : Any=10, lowerCamelCase : Optional[Any]=3, lowerCamelCase : Union[str, Any]=2, lowerCamelCase : Dict=2, lowerCamelCase : Tuple=2, lowerCamelCase : List[str]=True, lowerCamelCase : Optional[int]=True, lowerCamelCase : Dict=32, lowerCamelCase : Any=5, lowerCamelCase : Dict=4, lowerCamelCase : Any=37, lowerCamelCase : Union[str, Any]="gelu", lowerCamelCase : Dict=0.1, lowerCamelCase : Union[str, Any]=0.1, lowerCamelCase : Dict=10, lowerCamelCase : str=0.02, lowerCamelCase : List[Any]=0.9, lowerCamelCase : List[Any]=None, )-> str:
lowerCamelCase__ : List[str] =parent
lowerCamelCase__ : Any =batch_size
lowerCamelCase__ : str =image_size
lowerCamelCase__ : Optional[Any] =num_channels
lowerCamelCase__ : Optional[int] =patch_size
lowerCamelCase__ : List[str] =tubelet_size
lowerCamelCase__ : Optional[Any] =num_frames
lowerCamelCase__ : Any =is_training
lowerCamelCase__ : List[Any] =use_labels
lowerCamelCase__ : Union[str, Any] =hidden_size
lowerCamelCase__ : List[str] =num_hidden_layers
lowerCamelCase__ : str =num_attention_heads
lowerCamelCase__ : List[Any] =intermediate_size
lowerCamelCase__ : Any =hidden_act
lowerCamelCase__ : int =hidden_dropout_prob
lowerCamelCase__ : Optional[int] =attention_probs_dropout_prob
lowerCamelCase__ : Optional[Any] =type_sequence_label_size
lowerCamelCase__ : int =initializer_range
lowerCamelCase__ : Optional[Any] =mask_ratio
lowerCamelCase__ : Any =scope
# in VideoMAE, the number of tokens equals num_frames/tubelet_size * num_patches per frame
lowerCamelCase__ : Optional[Any] =(image_size // patch_size) ** 2
lowerCamelCase__ : Any =(num_frames // tubelet_size) * self.num_patches_per_frame
# use this variable to define bool_masked_pos
lowerCamelCase__ : List[Any] =int(mask_ratio * self.seq_length )
def snake_case ( self : Dict )-> Union[str, Any]:
lowerCamelCase__ : str =floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase__ : Any =None
if self.use_labels:
lowerCamelCase__ : Union[str, Any] =ids_tensor([self.batch_size], self.type_sequence_label_size )
lowerCamelCase__ : Optional[Any] =self.get_config()
return config, pixel_values, labels
def snake_case ( self : Union[str, Any] )-> Optional[int]:
return VideoMAEConfig(
image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, num_frames=self.num_frames, tubelet_size=self.tubelet_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=lowerCamelCase, initializer_range=self.initializer_range, )
def snake_case ( self : Dict, lowerCamelCase : Tuple, lowerCamelCase : Optional[Any], lowerCamelCase : Any )-> Union[str, Any]:
lowerCamelCase__ : List[str] =VideoMAEModel(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
lowerCamelCase__ : int =model(lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case ( self : Any, lowerCamelCase : str, lowerCamelCase : Optional[int], lowerCamelCase : str )-> Dict:
lowerCamelCase__ : int =VideoMAEForPreTraining(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
lowerCamelCase__ : Optional[int] =torch.ones((self.num_masks,) )
lowerCamelCase__ : List[str] =torch.cat([mask, torch.zeros(self.seq_length - mask.size(0 ) )] )
lowerCamelCase__ : int =mask.expand(self.batch_size, -1 ).bool()
lowerCamelCase__ : Any =model(lowerCamelCase, lowerCamelCase )
# model only returns predictions for masked patches
lowerCamelCase__ : Optional[int] =mask.sum().item()
lowerCamelCase__ : Dict =3 * self.tubelet_size * self.patch_size**2
self.parent.assertEqual(result.logits.shape, (self.batch_size, num_masked_patches, decoder_num_labels) )
def snake_case ( self : Optional[Any] )-> Tuple:
lowerCamelCase__ : Tuple =self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Dict =config_and_inputs
lowerCamelCase__ : List[str] ={'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_a = (
(VideoMAEModel, VideoMAEForPreTraining, VideoMAEForVideoClassification) if is_torch_available() else ()
)
_a = (
{'feature-extraction': VideoMAEModel, 'video-classification': VideoMAEForVideoClassification}
if is_torch_available()
else {}
)
_a = False
_a = False
_a = False
_a = False
def snake_case ( self : List[Any] )-> Tuple:
lowerCamelCase__ : int =VideoMAEModelTester(self )
lowerCamelCase__ : Optional[int] =ConfigTester(self, config_class=lowerCamelCase, has_text_modality=lowerCamelCase, hidden_size=37 )
def snake_case ( self : Any, lowerCamelCase : List[Any], lowerCamelCase : Optional[Any], lowerCamelCase : List[str]=False )-> Tuple:
lowerCamelCase__ : str =copy.deepcopy(lowerCamelCase )
if model_class == VideoMAEForPreTraining:
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
lowerCamelCase__ : Any =torch.ones((self.model_tester.num_masks,) )
lowerCamelCase__ : Dict =torch.cat([mask, torch.zeros(self.model_tester.seq_length - mask.size(0 ) )] )
lowerCamelCase__ : Optional[int] =mask.expand(self.model_tester.batch_size, -1 ).bool()
lowerCamelCase__ : int =bool_masked_pos.to(lowerCamelCase )
if return_labels:
if model_class in [
*get_values(lowerCamelCase ),
]:
lowerCamelCase__ : List[str] =torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=lowerCamelCase )
return inputs_dict
def snake_case ( self : List[Any] )-> int:
self.config_tester.run_common_tests()
@unittest.skip(reason='''VideoMAE does not use inputs_embeds''' )
def snake_case ( self : List[str] )-> Tuple:
pass
def snake_case ( self : Union[str, Any] )-> Union[str, Any]:
lowerCamelCase__ , lowerCamelCase__ : Optional[int] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : List[str] =model_class(lowerCamelCase )
self.assertIsInstance(model.get_input_embeddings(), (nn.Module) )
lowerCamelCase__ : Optional[Any] =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase, nn.Linear ) )
def snake_case ( self : Optional[int] )-> Optional[Any]:
lowerCamelCase__ , lowerCamelCase__ : Any =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Optional[int] =model_class(lowerCamelCase )
lowerCamelCase__ : Dict =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__ : Tuple =[*signature.parameters.keys()]
lowerCamelCase__ : List[str] =['''pixel_values''']
self.assertListEqual(arg_names[:1], lowerCamelCase )
def snake_case ( self : Tuple )-> Optional[int]:
lowerCamelCase__ : Any =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase )
def snake_case ( self : List[Any] )-> Union[str, Any]:
lowerCamelCase__ : Optional[int] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowerCamelCase )
@slow
def snake_case ( self : List[Any] )-> Dict:
for model_name in VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ : str =VideoMAEModel.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
def snake_case ( self : List[str] )-> Optional[int]:
if not self.has_attentions:
pass
else:
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ : Tuple =True
for model_class in self.all_model_classes:
lowerCamelCase__ : Any =self.model_tester.seq_length - self.model_tester.num_masks
lowerCamelCase__ : Any =(
num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
)
lowerCamelCase__ : Optional[int] =True
lowerCamelCase__ : Optional[int] =False
lowerCamelCase__ : Optional[int] =True
lowerCamelCase__ : int =model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
with torch.no_grad():
lowerCamelCase__ : Union[str, Any] =model(**self._prepare_for_class(lowerCamelCase, lowerCamelCase ) )
lowerCamelCase__ : str =outputs.attentions
self.assertEqual(len(lowerCamelCase ), self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowerCamelCase__ : Tuple =True
lowerCamelCase__ : Union[str, Any] =model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
with torch.no_grad():
lowerCamelCase__ : List[str] =model(**self._prepare_for_class(lowerCamelCase, lowerCamelCase ) )
lowerCamelCase__ : int =outputs.attentions
self.assertEqual(len(lowerCamelCase ), self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ), [self.model_tester.num_attention_heads, seq_len, seq_len], )
lowerCamelCase__ : Union[str, Any] =len(lowerCamelCase )
# Check attention is always last and order is fine
lowerCamelCase__ : List[Any] =True
lowerCamelCase__ : Union[str, Any] =True
lowerCamelCase__ : Dict =model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
with torch.no_grad():
lowerCamelCase__ : Any =model(**self._prepare_for_class(lowerCamelCase, lowerCamelCase ) )
self.assertEqual(out_len + 1, len(lowerCamelCase ) )
lowerCamelCase__ : Optional[Any] =outputs.attentions
self.assertEqual(len(lowerCamelCase ), self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ), [self.model_tester.num_attention_heads, seq_len, seq_len], )
def snake_case ( self : str )-> int:
def check_hidden_states_output(lowerCamelCase : Optional[Any], lowerCamelCase : List[str], lowerCamelCase : Optional[Any] ):
lowerCamelCase__ : List[Any] =model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
with torch.no_grad():
lowerCamelCase__ : Optional[Any] =model(**self._prepare_for_class(lowerCamelCase, lowerCamelCase ) )
lowerCamelCase__ : Dict =outputs.hidden_states
lowerCamelCase__ : Any =self.model_tester.num_hidden_layers + 1
self.assertEqual(len(lowerCamelCase ), lowerCamelCase )
lowerCamelCase__ : Any =self.model_tester.seq_length - self.model_tester.num_masks
lowerCamelCase__ : str =num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ), [seq_length, self.model_tester.hidden_size], )
lowerCamelCase__ , lowerCamelCase__ : List[str] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Union[str, Any] =True
check_hidden_states_output(lowerCamelCase, lowerCamelCase, lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase__ : int =True
check_hidden_states_output(lowerCamelCase, lowerCamelCase, lowerCamelCase )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def snake_case ( self : Optional[int] )-> int:
pass
def snake_case__ ( ):
"""simple docstring"""
lowerCamelCase__ : int =hf_hub_download(
repo_id='''hf-internal-testing/spaghetti-video''' , filename='''eating_spaghetti.npy''' , repo_type='''dataset''' )
lowerCamelCase__ : str =np.load(__lowerCamelCase )
return list(__lowerCamelCase )
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def snake_case ( self : List[str] )-> List[Any]:
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5], image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def snake_case ( self : Optional[Any] )-> Dict:
lowerCamelCase__ : str =VideoMAEForVideoClassification.from_pretrained('''MCG-NJU/videomae-base-finetuned-kinetics''' ).to(
lowerCamelCase )
lowerCamelCase__ : Optional[Any] =self.default_image_processor
lowerCamelCase__ : List[str] =prepare_video()
lowerCamelCase__ : Union[str, Any] =image_processor(lowerCamelCase, return_tensors='''pt''' ).to(lowerCamelCase )
# forward pass
with torch.no_grad():
lowerCamelCase__ : Tuple =model(**lowerCamelCase )
# verify the logits
lowerCamelCase__ : Union[str, Any] =torch.Size((1, 400) )
self.assertEqual(outputs.logits.shape, lowerCamelCase )
lowerCamelCase__ : Tuple =torch.tensor([0.3_669, -0.0_688, -0.2_421] ).to(lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3], lowerCamelCase, atol=1E-4 ) )
@slow
def snake_case ( self : Any )-> Tuple:
lowerCamelCase__ : Tuple =VideoMAEForPreTraining.from_pretrained('''MCG-NJU/videomae-base-short''' ).to(lowerCamelCase )
lowerCamelCase__ : Optional[int] =self.default_image_processor
lowerCamelCase__ : Dict =prepare_video()
lowerCamelCase__ : Dict =image_processor(lowerCamelCase, return_tensors='''pt''' ).to(lowerCamelCase )
# add boolean mask, indicating which patches to mask
lowerCamelCase__ : str =hf_hub_download(repo_id='''hf-internal-testing/bool-masked-pos''', filename='''bool_masked_pos.pt''' )
lowerCamelCase__ : Dict =torch.load(lowerCamelCase )
# forward pass
with torch.no_grad():
lowerCamelCase__ : Union[str, Any] =model(**lowerCamelCase )
# verify the logits
lowerCamelCase__ : Dict =torch.Size([1, 1408, 1536] )
lowerCamelCase__ : Union[str, Any] =torch.tensor(
[[0.7_994, 0.9_612, 0.8_508], [0.7_401, 0.8_958, 0.8_302], [0.5_862, 0.7_468, 0.7_325]], device=lowerCamelCase )
self.assertEqual(outputs.logits.shape, lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3], lowerCamelCase, atol=1E-4 ) )
# verify the loss (`config.norm_pix_loss` = `True`)
lowerCamelCase__ : Optional[int] =torch.tensor([0.5_142], device=lowerCamelCase )
self.assertTrue(torch.allclose(outputs.loss, lowerCamelCase, atol=1E-4 ) )
# verify the loss (`config.norm_pix_loss` = `False`)
lowerCamelCase__ : Union[str, Any] =VideoMAEForPreTraining.from_pretrained('''MCG-NJU/videomae-base-short''', norm_pix_loss=lowerCamelCase ).to(
lowerCamelCase )
with torch.no_grad():
lowerCamelCase__ : Union[str, Any] =model(**lowerCamelCase )
lowerCamelCase__ : Union[str, Any] =torch.tensor(torch.tensor([0.6_469] ), device=lowerCamelCase )
self.assertTrue(torch.allclose(outputs.loss, lowerCamelCase, atol=1E-4 ) )
| 238
| 1
|
import math
import os
import re
import sys
import unittest
from pathlib import Path
from typing import Tuple
from unittest.mock import patch
from parameterized import parameterized
from transformers.testing_utils import (
CaptureStderr,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
get_torch_dist_unique_port,
require_apex,
require_bitsandbytes,
require_fairscale,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
require_torch_non_multi_gpu,
slow,
)
from transformers.trainer_callback import TrainerState
from transformers.trainer_utils import set_seed
a : Union[str, Any] = os.path.abspath(os.path.dirname(__file__))
with ExtendSysPath(F'''{bindir}/../../examples/pytorch/translation'''):
from run_translation import main # noqa
set_seed(42)
a : Tuple = """sshleifer/student_marian_en_ro_6_1"""
a : Dict = """sshleifer/tiny-mbart"""
@require_torch
class _a ( lowerCamelCase__ ):
def __snake_case (self, SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=True, ) -> str:
UpperCAmelCase_: int = self.run_trainer(
eval_steps=1, max_len=12, model_name=SCREAMING_SNAKE_CASE_, num_train_epochs=1, distributed=SCREAMING_SNAKE_CASE_, extra_args_str=SCREAMING_SNAKE_CASE_, predict_with_generate=SCREAMING_SNAKE_CASE_, do_train=SCREAMING_SNAKE_CASE_, do_eval=SCREAMING_SNAKE_CASE_, do_predict=SCREAMING_SNAKE_CASE_, )
UpperCAmelCase_: List[Any] = TrainerState.load_from_json(os.path.join(SCREAMING_SNAKE_CASE_, """trainer_state.json""" ) ).log_history
if not do_eval:
return
UpperCAmelCase_: str = [log for log in logs if """eval_loss""" in log.keys()]
UpperCAmelCase_: Tuple = eval_metrics[0]
if predict_with_generate:
assert "eval_bleu" in first_step_stats
UpperCAmelCase_: int = eval_metrics[-1]
assert isinstance(last_step_stats["""eval_bleu"""], SCREAMING_SNAKE_CASE_ )
assert not math.isnan(float(last_step_stats["""eval_loss"""] ) ), "eval_loss must not be `nan`"
@require_torch_non_multi_gpu
def __snake_case (self ) -> int:
self.run_seqaseq_quick()
@require_torch_multi_gpu
def __snake_case (self ) -> Optional[int]:
self.run_seqaseq_quick(distributed=SCREAMING_SNAKE_CASE_ )
@require_torch_multi_gpu
def __snake_case (self ) -> Union[str, Any]:
self.run_seqaseq_quick(distributed=SCREAMING_SNAKE_CASE_ )
@unittest.skip("""Requires an update of the env running those tests""" )
@require_torch_multi_gpu
@require_fairscale
def __snake_case (self ) -> int:
self.run_seqaseq_quick(distributed=SCREAMING_SNAKE_CASE_, extra_args_str="""--sharded_ddp simple""" )
@unittest.skip("""Requires an update of the env running those tests""" )
@require_torch_multi_gpu
@require_fairscale
def __snake_case (self ) -> int:
self.run_seqaseq_quick(distributed=SCREAMING_SNAKE_CASE_, extra_args_str="""--sharded_ddp simple --fp16""" )
@unittest.skip("""Requires an update of the env running those tests""" )
@require_torch_multi_gpu
@require_fairscale
def __snake_case (self ) -> Union[str, Any]:
self.run_seqaseq_quick(distributed=SCREAMING_SNAKE_CASE_, extra_args_str="""--sharded_ddp zero_dp_2""", predict_with_generate=SCREAMING_SNAKE_CASE_ )
@unittest.skip("""Requires an update of the env running those tests""" )
@require_torch_multi_gpu
@require_fairscale
def __snake_case (self ) -> List[Any]:
self.run_seqaseq_quick(
distributed=SCREAMING_SNAKE_CASE_, extra_args_str="""--sharded_ddp zero_dp_2 --fp16""", predict_with_generate=SCREAMING_SNAKE_CASE_ )
@require_apex
@require_torch_gpu
def __snake_case (self ) -> int:
self.run_seqaseq_quick(distributed=SCREAMING_SNAKE_CASE_, extra_args_str="""--fp16 --fp16_backend=apex""" )
# test 2nd time - was getting eval_loss': nan'
# to reproduce the problem set distributed=False
self.run_seqaseq_quick(distributed=SCREAMING_SNAKE_CASE_, extra_args_str="""--fp16 --fp16_backend=apex""" )
@parameterized.expand(["""base""", """low""", """high""", """mixed"""] )
@require_torch_multi_gpu
def __snake_case (self, SCREAMING_SNAKE_CASE_ ) -> List[Any]:
UpperCAmelCase_: Tuple = {
# test with the default log_level - should be info and thus log info once
"""base""": {"""extra_args_str""": """""", """n_matches""": 1},
# test with low log_level and log_level_replica - should be noisy on all processes
# now the info string should appear twice on 2 processes
"""low""": {"""extra_args_str""": """--log_level debug --log_level_replica debug""", """n_matches""": 2},
# test with high log_level and low log_level_replica
# now the info string should appear once only on the replica
"""high""": {"""extra_args_str""": """--log_level error --log_level_replica debug""", """n_matches""": 1},
# test with high log_level and log_level_replica - should be quiet on all processes
"""mixed""": {"""extra_args_str""": """--log_level error --log_level_replica error""", """n_matches""": 0},
}
UpperCAmelCase_: Optional[Any] = experiments[experiment_id]
UpperCAmelCase_: Optional[Any] = {"""distributed""": True, """predict_with_generate""": False, """do_eval""": False, """do_predict""": False}
UpperCAmelCase_: Union[str, Any] = """Running training"""
with CaptureStderr() as cl:
self.run_seqaseq_quick(**SCREAMING_SNAKE_CASE_, extra_args_str=data["""extra_args_str"""] )
UpperCAmelCase_: Optional[int] = len(re.findall(SCREAMING_SNAKE_CASE_, cl.err ) )
self.assertEqual(SCREAMING_SNAKE_CASE_, data["""n_matches"""] )
@slow
def __snake_case (self ) -> List[Any]:
UpperCAmelCase_: str = self.run_trainer(
eval_steps=2, max_len=128, model_name=SCREAMING_SNAKE_CASE_, learning_rate=3E-4, num_train_epochs=10, distributed=SCREAMING_SNAKE_CASE_, )
# Check metrics
UpperCAmelCase_: Tuple = TrainerState.load_from_json(os.path.join(SCREAMING_SNAKE_CASE_, """trainer_state.json""" ) ).log_history
UpperCAmelCase_: Optional[int] = [log for log in logs if """eval_loss""" in log.keys()]
UpperCAmelCase_: Optional[Any] = eval_metrics[0]
UpperCAmelCase_: Optional[Any] = eval_metrics[-1]
assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing"
assert isinstance(last_step_stats["""eval_bleu"""], SCREAMING_SNAKE_CASE_ )
# test if do_predict saves generations and metrics
UpperCAmelCase_: Dict = os.listdir(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: int = {os.path.basename(SCREAMING_SNAKE_CASE_ ) for p in contents}
assert "generated_predictions.txt" in contents
assert "predict_results.json" in contents
@slow
@require_bitsandbytes
def __snake_case (self ) -> Dict:
from transformers.training_args import OptimizerNames
def train_and_return_metrics(SCREAMING_SNAKE_CASE_ ) -> Tuple[int, float]:
UpperCAmelCase_: Tuple = """--skip_memory_metrics 0"""
UpperCAmelCase_: Optional[int] = self.run_trainer(
max_len=128, model_name=SCREAMING_SNAKE_CASE_, learning_rate=3E-4, num_train_epochs=1, optim=SCREAMING_SNAKE_CASE_, distributed=SCREAMING_SNAKE_CASE_, extra_args_str=SCREAMING_SNAKE_CASE_, do_eval=SCREAMING_SNAKE_CASE_, do_predict=SCREAMING_SNAKE_CASE_, n_gpus_to_use=1, )
# Check metrics
UpperCAmelCase_: Optional[int] = TrainerState.load_from_json(Path(SCREAMING_SNAKE_CASE_, """trainer_state.json""" ) ).log_history
UpperCAmelCase_: Any = int(logs[0]["""train_mem_gpu_peaked_delta"""] / 2**20 )
UpperCAmelCase_: List[Any] = int(logs[0]["""train_mem_gpu_alloc_delta"""] / 2**20 )
UpperCAmelCase_: Dict = logs[0]["""train_loss"""]
return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_: Union[str, Any] = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_: Any = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value )
UpperCAmelCase_: Optional[Any] = gpu_alloc_mem_orig - gpu_alloc_mem_bnb
UpperCAmelCase_: Tuple = gpu_peak_mem_orig + gpu_alloc_mem_orig
UpperCAmelCase_: Any = gpu_peak_mem_bnb + gpu_alloc_mem_bnb
UpperCAmelCase_: Tuple = gpu_total_mem_orig - gpu_total_mem_bnb
# sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which
# doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized
# in 2 bytes and the diff in optim memory usage is derived as so:
#
# - normal 25*8=~200MB (8 bytes per param)
# - bnb 25*2= ~50MB (2 bytes per param)
#
# Thus we should expect ~150MB total memory saved.
#
# Peak memory should be the same - the total should be different by about that same margin
#
# After leaving a small margin to accommodate for differences between gpus let's check
# that we have at least 120MB in savings
UpperCAmelCase_: List[Any] = 120
# uncomment the following if this test starts failing - requires py38 for a new print feature
# gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb
# print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB")
# print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB")
# print(f"{gpu_alloc_mem_diff=}MB")
# print(f"{gpu_peak_mem_diff=}MB")
# print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB")
# print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB")
self.assertGreater(
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, """should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got"""
f' a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and'
f' gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB', )
self.assertGreater(
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, """should use ~150MB less total gpu memory with BNB, compared to without it for this model but got"""
f' a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and'
f' gpu_total_mem_bnb={gpu_total_mem_bnb}MB', )
self.assertEqual(
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, f'loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}' )
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = 3E-3, SCREAMING_SNAKE_CASE_ = "adafactor", SCREAMING_SNAKE_CASE_ = False, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = 0, SCREAMING_SNAKE_CASE_ = True, SCREAMING_SNAKE_CASE_ = True, SCREAMING_SNAKE_CASE_ = True, SCREAMING_SNAKE_CASE_ = True, SCREAMING_SNAKE_CASE_ = None, ) -> int:
UpperCAmelCase_: List[str] = self.test_file_dir / """../fixtures/tests_samples/wmt_en_ro"""
UpperCAmelCase_: List[Any] = self.get_auto_remove_tmp_dir()
UpperCAmelCase_: Optional[int] = f'\n --model_name_or_path {model_name}\n --train_file {data_dir}/train.json\n --validation_file {data_dir}/val.json\n --test_file {data_dir}/test.json\n --output_dir {output_dir}\n --overwrite_output_dir\n --max_train_samples 8\n --max_source_length {max_len}\n --max_target_length {max_len}\n --do_train\n --num_train_epochs {str(SCREAMING_SNAKE_CASE_ )}\n --per_device_train_batch_size 4\n --learning_rate {learning_rate}\n --warmup_steps 8\n --logging_steps 0\n --logging_strategy no\n --save_steps {str(SCREAMING_SNAKE_CASE_ )}\n --group_by_length\n --label_smoothing_factor 0.1\n --target_lang ro_RO\n --source_lang en_XX\n '.split()
UpperCAmelCase_: Dict = f'\n --do_eval\n --per_device_eval_batch_size 4\n --max_eval_samples 8\n --val_max_target_length {max_len}\n --evaluation_strategy steps\n --eval_steps {str(SCREAMING_SNAKE_CASE_ )}\n '.split()
UpperCAmelCase_: Optional[int] = """
--do_predict
""".split()
UpperCAmelCase_: List[str] = []
if do_train:
args += args_train
if do_eval:
args += args_eval
if do_predict:
args += args_predict
if predict_with_generate:
args += "--predict_with_generate".split()
if do_train:
if optim == "adafactor":
args += "--adafactor".split()
else:
args += f'--optim {optim}'.split()
if extra_args_str is not None:
args += extra_args_str.split()
if distributed:
if n_gpus_to_use is None:
UpperCAmelCase_: int = get_gpu_count()
UpperCAmelCase_: List[Any] = get_torch_dist_unique_port()
UpperCAmelCase_: Optional[Any] = f'\n -m torch.distributed.run\n --nproc_per_node={n_gpus_to_use}\n --master_port={master_port}\n {self.examples_dir_str}/pytorch/translation/run_translation.py\n '.split()
UpperCAmelCase_: List[str] = [sys.executable] + distributed_args + args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(SCREAMING_SNAKE_CASE_, env=self.get_env() )
else:
UpperCAmelCase_: Any = ["""run_translation.py"""] + args
with patch.object(SCREAMING_SNAKE_CASE_, """argv""", SCREAMING_SNAKE_CASE_ ):
main()
return output_dir
| 356
|
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
a : Optional[int] = {
'text_branch': 'text_model',
'audio_branch': 'audio_model.audio_encoder',
'attn': 'attention.self',
'self.proj': 'output.dense',
'attention.self_mask': 'attn_mask',
'mlp.fc1': 'intermediate.dense',
'mlp.fc2': 'output.dense',
'norm1': 'layernorm_before',
'norm2': 'layernorm_after',
'bn0': 'batch_norm',
}
a : Any = AutoFeatureExtractor.from_pretrained('laion/clap-htsat-unfused', truncation='rand_trunc')
def lowerCAmelCase_ (lowerCAmelCase__: Optional[Any] , lowerCAmelCase__: int=False ):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_: int = create_model(
"""HTSAT-tiny""" , """roberta""" , lowerCAmelCase__ , precision="""fp32""" , device="""cuda:0""" if torch.cuda.is_available() else """cpu""" , enable_fusion=lowerCAmelCase__ , fusion_type="""aff_2d""" if enable_fusion else None , )
return model, model_cfg
def lowerCAmelCase_ (lowerCAmelCase__: List[Any] ):
"""simple docstring"""
UpperCAmelCase_: List[Any] = {}
UpperCAmelCase_: Optional[Any] = r""".*sequential.(\d+).*"""
UpperCAmelCase_: str = r""".*_projection.(\d+).*"""
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
UpperCAmelCase_: Optional[int] = key.replace(lowerCAmelCase__ , lowerCAmelCase__ )
if re.match(lowerCAmelCase__ , lowerCAmelCase__ ):
# replace sequential layers with list
UpperCAmelCase_: int = re.match(lowerCAmelCase__ , lowerCAmelCase__ ).group(1 )
UpperCAmelCase_: Dict = key.replace(F'sequential.{sequential_layer}.' , F'layers.{int(lowerCAmelCase__ )//3}.linear.' )
elif re.match(lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_: int = int(re.match(lowerCAmelCase__ , lowerCAmelCase__ ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
UpperCAmelCase_: Optional[Any] = 1 if projecton_layer == 0 else 2
UpperCAmelCase_: Tuple = key.replace(F'_projection.{projecton_layer}.' , F'_projection.linear{transformers_projection_layer}.' )
if "audio" and "qkv" in key:
# split qkv into query key and value
UpperCAmelCase_: str = value
UpperCAmelCase_: Optional[int] = mixed_qkv.size(0 ) // 3
UpperCAmelCase_: Optional[int] = mixed_qkv[:qkv_dim]
UpperCAmelCase_: List[Any] = mixed_qkv[qkv_dim : qkv_dim * 2]
UpperCAmelCase_: int = mixed_qkv[qkv_dim * 2 :]
UpperCAmelCase_: str = query_layer
UpperCAmelCase_: List[Any] = key_layer
UpperCAmelCase_: str = value_layer
else:
UpperCAmelCase_: Tuple = value
return model_state_dict
def lowerCAmelCase_ (lowerCAmelCase__: Optional[Any] , lowerCAmelCase__: List[Any] , lowerCAmelCase__: Union[str, Any] , lowerCAmelCase__: List[Any]=False ):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_: Union[str, Any] = init_clap(lowerCAmelCase__ , enable_fusion=lowerCAmelCase__ )
clap_model.eval()
UpperCAmelCase_: Optional[Any] = clap_model.state_dict()
UpperCAmelCase_: Optional[Any] = rename_state_dict(lowerCAmelCase__ )
UpperCAmelCase_: Dict = ClapConfig()
UpperCAmelCase_: Tuple = enable_fusion
UpperCAmelCase_: int = ClapModel(lowerCAmelCase__ )
# ignore the spectrogram embedding layer
model.load_state_dict(lowerCAmelCase__ , strict=lowerCAmelCase__ )
model.save_pretrained(lowerCAmelCase__ )
transformers_config.save_pretrained(lowerCAmelCase__ )
if __name__ == "__main__":
a : Optional[Any] = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument('--enable_fusion', action='store_true', help='Whether to enable fusion or not')
a : Optional[Any] = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
| 82
| 0
|
'''simple docstring'''
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class __snake_case ( _SCREAMING_SNAKE_CASE):
"""simple docstring"""
lowercase = (DEISMultistepScheduler,)
lowercase = (('num_inference_steps', 25),)
def __lowercase ( self : Dict , **lowerCamelCase : Optional[int] ) -> Any:
lowerCAmelCase_ : Union[str, Any] = {
'num_train_timesteps': 10_00,
'beta_start': 0.0_001,
'beta_end': 0.02,
'beta_schedule': 'linear',
'solver_order': 2,
}
config.update(**lowerCamelCase__ )
return config
def __lowercase ( self : List[Any] , lowerCamelCase : Tuple=0 , **lowerCamelCase : List[str] ) -> int:
lowerCAmelCase_ : Dict = dict(self.forward_default_kwargs )
lowerCAmelCase_ : int = kwargs.pop("""num_inference_steps""" , lowerCamelCase__ )
lowerCAmelCase_ : int = self.dummy_sample
lowerCAmelCase_ : Union[str, Any] = 0.1 * sample
lowerCAmelCase_ : str = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
lowerCAmelCase_ : Optional[Any] = self.get_scheduler_config(**lowerCamelCase__ )
lowerCAmelCase_ : Optional[int] = scheduler_class(**lowerCamelCase__ )
scheduler.set_timesteps(lowerCamelCase__ )
# copy over dummy past residuals
lowerCAmelCase_ : Optional[Any] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCamelCase__ )
lowerCAmelCase_ : Tuple = scheduler_class.from_pretrained(lowerCamelCase__ )
new_scheduler.set_timesteps(lowerCamelCase__ )
# copy over dummy past residuals
lowerCAmelCase_ : str = dummy_past_residuals[: new_scheduler.config.solver_order]
lowerCAmelCase_ : Any = sample, sample
for t in range(lowerCamelCase__ , time_step + scheduler.config.solver_order + 1 ):
lowerCAmelCase_ : int = scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ).prev_sample
lowerCAmelCase_ : Optional[Any] = new_scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def __lowercase ( self : Tuple ) -> Dict:
pass
def __lowercase ( self : List[Any] , lowerCamelCase : List[str]=0 , **lowerCamelCase : Dict ) -> Optional[Any]:
lowerCAmelCase_ : List[str] = dict(self.forward_default_kwargs )
lowerCAmelCase_ : List[Any] = kwargs.pop("""num_inference_steps""" , lowerCamelCase__ )
lowerCAmelCase_ : int = self.dummy_sample
lowerCAmelCase_ : str = 0.1 * sample
lowerCAmelCase_ : Tuple = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
lowerCAmelCase_ : Optional[int] = self.get_scheduler_config()
lowerCAmelCase_ : Tuple = scheduler_class(**lowerCamelCase__ )
scheduler.set_timesteps(lowerCamelCase__ )
# copy over dummy past residuals (must be after setting timesteps)
lowerCAmelCase_ : List[str] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCamelCase__ )
lowerCAmelCase_ : Optional[int] = scheduler_class.from_pretrained(lowerCamelCase__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(lowerCamelCase__ )
# copy over dummy past residual (must be after setting timesteps)
lowerCAmelCase_ : Optional[Any] = dummy_past_residuals[: new_scheduler.config.solver_order]
lowerCAmelCase_ : Optional[int] = scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ).prev_sample
lowerCAmelCase_ : List[Any] = new_scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def __lowercase ( self : Any , lowerCamelCase : int=None , **lowerCamelCase : Any ) -> str:
if scheduler is None:
lowerCAmelCase_ : Any = self.scheduler_classes[0]
lowerCAmelCase_ : Optional[Any] = self.get_scheduler_config(**lowerCamelCase__ )
lowerCAmelCase_ : str = scheduler_class(**lowerCamelCase__ )
lowerCAmelCase_ : List[Any] = self.scheduler_classes[0]
lowerCAmelCase_ : str = self.get_scheduler_config(**lowerCamelCase__ )
lowerCAmelCase_ : int = scheduler_class(**lowerCamelCase__ )
lowerCAmelCase_ : Dict = 10
lowerCAmelCase_ : Optional[int] = self.dummy_model()
lowerCAmelCase_ : Tuple = self.dummy_sample_deter
scheduler.set_timesteps(lowerCamelCase__ )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase_ : Dict = model(lowerCamelCase__ , lowerCamelCase__ )
lowerCAmelCase_ : List[str] = scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ).prev_sample
return sample
def __lowercase ( self : Optional[int] ) -> Dict:
lowerCAmelCase_ : Any = dict(self.forward_default_kwargs )
lowerCAmelCase_ : Dict = kwargs.pop("""num_inference_steps""" , lowerCamelCase__ )
for scheduler_class in self.scheduler_classes:
lowerCAmelCase_ : Union[str, Any] = self.get_scheduler_config()
lowerCAmelCase_ : Optional[int] = scheduler_class(**lowerCamelCase__ )
lowerCAmelCase_ : str = self.dummy_sample
lowerCAmelCase_ : Optional[Any] = 0.1 * sample
if num_inference_steps is not None and hasattr(lowerCamelCase__ , """set_timesteps""" ):
scheduler.set_timesteps(lowerCamelCase__ )
elif num_inference_steps is not None and not hasattr(lowerCamelCase__ , """set_timesteps""" ):
lowerCAmelCase_ : List[Any] = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
lowerCAmelCase_ : Tuple = [residual + 0.2, residual + 0.15, residual + 0.10]
lowerCAmelCase_ : List[str] = dummy_past_residuals[: scheduler.config.solver_order]
lowerCAmelCase_ : Union[str, Any] = scheduler.timesteps[5]
lowerCAmelCase_ : int = scheduler.timesteps[6]
lowerCAmelCase_ : Tuple = scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ).prev_sample
lowerCAmelCase_ : str = scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def __lowercase ( self : Union[str, Any] ) -> Tuple:
lowerCAmelCase_ : List[str] = DEISMultistepScheduler(**self.get_scheduler_config() )
lowerCAmelCase_ : Any = self.full_loop(scheduler=lowerCamelCase__ )
lowerCAmelCase_ : List[Any] = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_mean.item() - 0.23_916 ) < 1E-3
lowerCAmelCase_ : str = DPMSolverSinglestepScheduler.from_config(scheduler.config )
lowerCAmelCase_ : List[str] = DPMSolverMultistepScheduler.from_config(scheduler.config )
lowerCAmelCase_ : Union[str, Any] = UniPCMultistepScheduler.from_config(scheduler.config )
lowerCAmelCase_ : List[str] = DEISMultistepScheduler.from_config(scheduler.config )
lowerCAmelCase_ : Dict = self.full_loop(scheduler=lowerCamelCase__ )
lowerCAmelCase_ : Union[str, Any] = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_mean.item() - 0.23_916 ) < 1E-3
def __lowercase ( self : Optional[int] ) -> Any:
for timesteps in [25, 50, 1_00, 9_99, 10_00]:
self.check_over_configs(num_train_timesteps=lowerCamelCase__ )
def __lowercase ( self : Any ) -> Optional[Any]:
self.check_over_configs(thresholding=lowerCamelCase__ )
for order in [1, 2, 3]:
for solver_type in ["logrho"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=lowerCamelCase__ , prediction_type=lowerCamelCase__ , sample_max_value=lowerCamelCase__ , algorithm_type="""deis""" , solver_order=lowerCamelCase__ , solver_type=lowerCamelCase__ , )
def __lowercase ( self : List[str] ) -> Dict:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCamelCase__ )
def __lowercase ( self : Dict ) -> List[Any]:
for algorithm_type in ["deis"]:
for solver_type in ["logrho"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=lowerCamelCase__ , solver_type=lowerCamelCase__ , prediction_type=lowerCamelCase__ , algorithm_type=lowerCamelCase__ , )
lowerCAmelCase_ : int = self.full_loop(
solver_order=lowerCamelCase__ , solver_type=lowerCamelCase__ , prediction_type=lowerCamelCase__ , algorithm_type=lowerCamelCase__ , )
assert not torch.isnan(lowerCamelCase__ ).any(), "Samples have nan numbers"
def __lowercase ( self : int ) -> List[str]:
self.check_over_configs(lower_order_final=lowerCamelCase__ )
self.check_over_configs(lower_order_final=lowerCamelCase__ )
def __lowercase ( self : List[Any] ) -> int:
for num_inference_steps in [1, 2, 3, 5, 10, 50, 1_00, 9_99, 10_00]:
self.check_over_forward(num_inference_steps=lowerCamelCase__ , time_step=0 )
def __lowercase ( self : Optional[Any] ) -> Tuple:
lowerCAmelCase_ : str = self.full_loop()
lowerCAmelCase_ : Tuple = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_mean.item() - 0.23_916 ) < 1E-3
def __lowercase ( self : int ) -> int:
lowerCAmelCase_ : Union[str, Any] = self.full_loop(prediction_type="""v_prediction""" )
lowerCAmelCase_ : Tuple = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_mean.item() - 0.091 ) < 1E-3
def __lowercase ( self : List[str] ) -> Union[str, Any]:
lowerCAmelCase_ : Optional[int] = self.scheduler_classes[0]
lowerCAmelCase_ : List[Any] = self.get_scheduler_config(thresholding=lowerCamelCase__ , dynamic_thresholding_ratio=0 )
lowerCAmelCase_ : Any = scheduler_class(**lowerCamelCase__ )
lowerCAmelCase_ : Dict = 10
lowerCAmelCase_ : List[Any] = self.dummy_model()
lowerCAmelCase_ : Any = self.dummy_sample_deter.half()
scheduler.set_timesteps(lowerCamelCase__ )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase_ : Optional[Any] = model(lowerCamelCase__ , lowerCamelCase__ )
lowerCAmelCase_ : List[Any] = scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ).prev_sample
assert sample.dtype == torch.floataa
| 120
|
'''simple docstring'''
from __future__ import annotations
from collections.abc import MutableSequence
class lowercase__ :
def __init__( self : List[Any] ,lowerCamelCase__ : int ,lowerCamelCase__ : MutableSequence[float] ):
'''simple docstring'''
if len(lowerCamelCase__ ) != degree + 1:
raise ValueError(
'The number of coefficients should be equal to the degree + 1.' )
_UpperCamelCase : list[float] = list(lowerCamelCase__ )
_UpperCamelCase : Tuple = degree
def __add__( self : Optional[int] ,lowerCamelCase__ : Polynomial ):
'''simple docstring'''
if self.degree > polynomial_a.degree:
_UpperCamelCase : str = self.coefficients[:]
for i in range(polynomial_a.degree + 1 ):
coefficients[i] += polynomial_a.coefficients[i]
return Polynomial(self.degree ,lowerCamelCase__ )
else:
_UpperCamelCase : str = polynomial_a.coefficients[:]
for i in range(self.degree + 1 ):
coefficients[i] += self.coefficients[i]
return Polynomial(polynomial_a.degree ,lowerCamelCase__ )
def __sub__( self : Dict ,lowerCamelCase__ : Polynomial ):
'''simple docstring'''
return self + polynomial_a * Polynomial(0 ,[-1] )
def __neg__( self : Dict ):
'''simple docstring'''
return Polynomial(self.degree ,[-c for c in self.coefficients] )
def __mul__( self : Union[str, Any] ,lowerCamelCase__ : Polynomial ):
'''simple docstring'''
_UpperCamelCase : list[float] = [0] * (self.degree + polynomial_a.degree + 1)
for i in range(self.degree + 1 ):
for j in range(polynomial_a.degree + 1 ):
coefficients[i + j] += (
self.coefficients[i] * polynomial_a.coefficients[j]
)
return Polynomial(self.degree + polynomial_a.degree ,lowerCamelCase__ )
def UpperCamelCase_ ( self : Dict ,lowerCamelCase__ : int | float ):
'''simple docstring'''
_UpperCamelCase : int | float = 0
for i in range(self.degree + 1 ):
result += self.coefficients[i] * (substitution**i)
return result
def __str__( self : Union[str, Any] ):
'''simple docstring'''
_UpperCamelCase : Dict = ''
for i in range(self.degree ,-1 ,-1 ):
if self.coefficients[i] == 0:
continue
elif self.coefficients[i] > 0:
if polynomial:
polynomial += " + "
else:
polynomial += " - "
if i == 0:
polynomial += str(abs(self.coefficients[i] ) )
elif i == 1:
polynomial += str(abs(self.coefficients[i] ) ) + "x"
else:
polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(lowerCamelCase__ )
return polynomial
def __repr__( self : List[str] ):
'''simple docstring'''
return self.__str__()
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
_UpperCamelCase : list[float] = [0] * self.degree
for i in range(self.degree ):
_UpperCamelCase : Optional[int] = self.coefficients[i + 1] * (i + 1)
return Polynomial(self.degree - 1 ,lowerCamelCase__ )
def UpperCamelCase_ ( self : Any ,lowerCamelCase__ : int | float = 0 ):
'''simple docstring'''
_UpperCamelCase : list[float] = [0] * (self.degree + 2)
_UpperCamelCase : Any = constant
for i in range(self.degree + 1 ):
_UpperCamelCase : Optional[Any] = self.coefficients[i] / (i + 1)
return Polynomial(self.degree + 1 ,lowerCamelCase__ )
def __eq__( self : str ,lowerCamelCase__ : object ):
'''simple docstring'''
if not isinstance(lowerCamelCase__ ,lowerCamelCase__ ):
return False
if self.degree != polynomial_a.degree:
return False
for i in range(self.degree + 1 ):
if self.coefficients[i] != polynomial_a.coefficients[i]:
return False
return True
def __ne__( self : List[str] ,lowerCamelCase__ : object ):
'''simple docstring'''
return not self.__eq__(lowerCamelCase__ )
| 83
| 0
|
"""simple docstring"""
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[int]:
_UpperCamelCase : List[Any] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
_UpperCamelCase : Union[str, Any] = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(__a )
_UpperCamelCase : Optional[int] = -1
_UpperCamelCase : List[str] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__a )
_UpperCamelCase : Union[str, Any] = model.generate(__a , max_new_tokens=10 , do_sample=__a )
_UpperCamelCase : Optional[Any] = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
_UpperCamelCase : Any = TextStreamer(__a )
model.generate(__a , max_new_tokens=10 , do_sample=__a , streamer=__a )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
_UpperCamelCase : Optional[int] = cs.out[:-1]
self.assertEqual(__a , __a )
def __SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
_UpperCamelCase : List[str] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
_UpperCamelCase : Tuple = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(__a )
_UpperCamelCase : Dict = -1
_UpperCamelCase : Dict = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__a )
_UpperCamelCase : List[str] = model.generate(__a , max_new_tokens=10 , do_sample=__a )
_UpperCamelCase : Optional[int] = tokenizer.decode(greedy_ids[0] )
_UpperCamelCase : Tuple = TextIteratorStreamer(__a )
_UpperCamelCase : Union[str, Any] = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer}
_UpperCamelCase : Optional[Any] = Thread(target=model.generate , kwargs=__a )
thread.start()
_UpperCamelCase : Tuple = ""
for new_text in streamer:
streamer_text += new_text
self.assertEqual(__a , __a )
def __SCREAMING_SNAKE_CASE ( self : str ) -> Dict:
_UpperCamelCase : Tuple = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
_UpperCamelCase : int = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(__a )
_UpperCamelCase : Union[str, Any] = -1
_UpperCamelCase : str = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__a )
_UpperCamelCase : Union[str, Any] = model.generate(__a , max_new_tokens=10 , do_sample=__a )
_UpperCamelCase : str = greedy_ids[:, input_ids.shape[1] :]
_UpperCamelCase : Dict = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
_UpperCamelCase : Optional[int] = TextStreamer(__a , skip_prompt=__a )
model.generate(__a , max_new_tokens=10 , do_sample=__a , streamer=__a )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
_UpperCamelCase : Tuple = cs.out[:-1]
self.assertEqual(__a , __a )
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[str]:
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
_UpperCamelCase : Dict = AutoTokenizer.from_pretrained("distilgpt2" )
_UpperCamelCase : Optional[int] = AutoModelForCausalLM.from_pretrained("distilgpt2" ).to(__a )
_UpperCamelCase : int = -1
_UpperCamelCase : Any = torch.ones((1, 5) , device=__a ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
_UpperCamelCase : List[str] = TextStreamer(__a , skip_special_tokens=__a )
model.generate(__a , max_new_tokens=1 , do_sample=__a , streamer=__a )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
_UpperCamelCase : int = cs.out[:-1] # Remove the final "\n"
_UpperCamelCase : int = tokenizer(__a , return_tensors="pt" )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def __SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]:
_UpperCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
_UpperCamelCase : Union[str, Any] = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(__a )
_UpperCamelCase : Optional[Any] = -1
_UpperCamelCase : Tuple = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__a )
_UpperCamelCase : Any = TextIteratorStreamer(__a , timeout=0.0_01 )
_UpperCamelCase : Optional[int] = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer}
_UpperCamelCase : List[Any] = Thread(target=model.generate , kwargs=__a )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(__a ):
_UpperCamelCase : List[str] = ""
for new_text in streamer:
streamer_text += new_text
| 310
|
"""simple docstring"""
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[int]:
_UpperCamelCase : List[Any] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
_UpperCamelCase : Union[str, Any] = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(__a )
_UpperCamelCase : Optional[int] = -1
_UpperCamelCase : List[str] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__a )
_UpperCamelCase : Union[str, Any] = model.generate(__a , max_new_tokens=10 , do_sample=__a )
_UpperCamelCase : Optional[Any] = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
_UpperCamelCase : Any = TextStreamer(__a )
model.generate(__a , max_new_tokens=10 , do_sample=__a , streamer=__a )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
_UpperCamelCase : Optional[int] = cs.out[:-1]
self.assertEqual(__a , __a )
def __SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
_UpperCamelCase : List[str] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
_UpperCamelCase : Tuple = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(__a )
_UpperCamelCase : Dict = -1
_UpperCamelCase : Dict = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__a )
_UpperCamelCase : List[str] = model.generate(__a , max_new_tokens=10 , do_sample=__a )
_UpperCamelCase : Optional[int] = tokenizer.decode(greedy_ids[0] )
_UpperCamelCase : Tuple = TextIteratorStreamer(__a )
_UpperCamelCase : Union[str, Any] = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer}
_UpperCamelCase : Optional[Any] = Thread(target=model.generate , kwargs=__a )
thread.start()
_UpperCamelCase : Tuple = ""
for new_text in streamer:
streamer_text += new_text
self.assertEqual(__a , __a )
def __SCREAMING_SNAKE_CASE ( self : str ) -> Dict:
_UpperCamelCase : Tuple = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
_UpperCamelCase : int = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(__a )
_UpperCamelCase : Union[str, Any] = -1
_UpperCamelCase : str = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__a )
_UpperCamelCase : Union[str, Any] = model.generate(__a , max_new_tokens=10 , do_sample=__a )
_UpperCamelCase : str = greedy_ids[:, input_ids.shape[1] :]
_UpperCamelCase : Dict = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
_UpperCamelCase : Optional[int] = TextStreamer(__a , skip_prompt=__a )
model.generate(__a , max_new_tokens=10 , do_sample=__a , streamer=__a )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
_UpperCamelCase : Tuple = cs.out[:-1]
self.assertEqual(__a , __a )
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[str]:
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
_UpperCamelCase : Dict = AutoTokenizer.from_pretrained("distilgpt2" )
_UpperCamelCase : Optional[int] = AutoModelForCausalLM.from_pretrained("distilgpt2" ).to(__a )
_UpperCamelCase : int = -1
_UpperCamelCase : Any = torch.ones((1, 5) , device=__a ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
_UpperCamelCase : List[str] = TextStreamer(__a , skip_special_tokens=__a )
model.generate(__a , max_new_tokens=1 , do_sample=__a , streamer=__a )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
_UpperCamelCase : int = cs.out[:-1] # Remove the final "\n"
_UpperCamelCase : int = tokenizer(__a , return_tensors="pt" )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def __SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]:
_UpperCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
_UpperCamelCase : Union[str, Any] = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(__a )
_UpperCamelCase : Optional[Any] = -1
_UpperCamelCase : Tuple = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__a )
_UpperCamelCase : Any = TextIteratorStreamer(__a , timeout=0.0_01 )
_UpperCamelCase : Optional[int] = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer}
_UpperCamelCase : List[Any] = Thread(target=model.generate , kwargs=__a )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(__a ):
_UpperCamelCase : List[str] = ""
for new_text in streamer:
streamer_text += new_text
| 310
| 1
|
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class SCREAMING_SNAKE_CASE_ :
__lowerCAmelCase = XGLMConfig
__lowerCAmelCase = {}
__lowerCAmelCase = """gelu"""
def __init__( self : str , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Union[str, Any]=14 , lowerCamelCase_ : Optional[int]=7 , lowerCamelCase_ : List[str]=True , lowerCamelCase_ : int=True , lowerCamelCase_ : Union[str, Any]=True , lowerCamelCase_ : Dict=99 , lowerCamelCase_ : Optional[int]=32 , lowerCamelCase_ : Optional[int]=2 , lowerCamelCase_ : Optional[int]=4 , lowerCamelCase_ : Dict=37 , lowerCamelCase_ : Union[str, Any]="gelu" , lowerCamelCase_ : str=0.1 , lowerCamelCase_ : List[str]=0.1 , lowerCamelCase_ : List[str]=512 , lowerCamelCase_ : Optional[int]=0.0_2 , ):
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = seq_length
UpperCamelCase = is_training
UpperCamelCase = use_input_mask
UpperCamelCase = use_labels
UpperCamelCase = vocab_size
UpperCamelCase = d_model
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = ffn_dim
UpperCamelCase = activation_function
UpperCamelCase = activation_dropout
UpperCamelCase = attention_dropout
UpperCamelCase = max_position_embeddings
UpperCamelCase = initializer_range
UpperCamelCase = None
UpperCamelCase = 0
UpperCamelCase = 2
UpperCamelCase = 1
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
return XGLMConfig.from_pretrained("""facebook/xglm-564M""" )
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
UpperCamelCase = tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 )
UpperCamelCase = None
if self.use_input_mask:
UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase = self.get_config()
UpperCamelCase = floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
return XGLMConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=lowerCamelCase_ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=lowerCamelCase_ , )
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
UpperCamelCase = self.prepare_config_and_inputs()
(
UpperCamelCase
) = config_and_inputs
UpperCamelCase = {
'input_ids': input_ids,
'head_mask': head_mask,
}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE_ ( _a , _a , unittest.TestCase ):
__lowerCAmelCase = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
__lowerCAmelCase = (TFXGLMForCausalLM,) if is_tf_available() else ()
__lowerCAmelCase = (
{"""feature-extraction""": TFXGLMModel, """text-generation""": TFXGLMForCausalLM} if is_tf_available() else {}
)
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = TFXGLMModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=lowerCamelCase_ , n_embd=37 )
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
self.config_tester.run_common_tests()
@slow
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase = TFXGLMModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
@unittest.skip(reason="""Currently, model embeddings are going to undergo a major refactor.""" )
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
super().test_resize_token_embeddings()
@require_tf
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
@slow
def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : List[Any]=True ):
"""simple docstring"""
UpperCamelCase = TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""" )
UpperCamelCase = tf.convert_to_tensor([[2, 268, 9865]] , dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
UpperCamelCase = [2, 268, 9865, 67, 11, 1988, 5_7252, 9865, 5, 984, 67, 1988, 21_3838, 1658, 53, 7_0446, 33, 6657, 278, 1581]
# fmt: on
UpperCamelCase = model.generate(lowerCamelCase_ , do_sample=lowerCamelCase_ , num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist() , lowerCamelCase_ )
@slow
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
UpperCamelCase = XGLMTokenizer.from_pretrained("""facebook/xglm-564M""" )
UpperCamelCase = TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""" )
tf.random.set_seed(0 )
UpperCamelCase = tokenizer("""Today is a nice day and""" , return_tensors="""tf""" )
UpperCamelCase = tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(""":/CPU:0""" ):
UpperCamelCase = model.generate(lowerCamelCase_ , do_sample=lowerCamelCase_ , seed=[7, 0] )
UpperCamelCase = tokenizer.decode(output_ids[0] , skip_special_tokens=lowerCamelCase_ )
UpperCamelCase = (
'Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due'
)
self.assertEqual(lowerCamelCase_ , lowerCamelCase_ )
@slow
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""" )
UpperCamelCase = XGLMTokenizer.from_pretrained("""facebook/xglm-564M""" )
UpperCamelCase = 'left'
# use different length sentences to test batching
UpperCamelCase = [
'This is an extremelly long sentence that only exists to test the ability of the model to cope with '
'left-padding, such as in batched generation. The output for the sequence below should be the same '
'regardless of whether left padding is applied or not. When',
'Hello, my dog is a little',
]
UpperCamelCase = tokenizer(lowerCamelCase_ , return_tensors="""tf""" , padding=lowerCamelCase_ )
UpperCamelCase = inputs['input_ids']
UpperCamelCase = model.generate(input_ids=lowerCamelCase_ , attention_mask=inputs["""attention_mask"""] , max_new_tokens=12 )
UpperCamelCase = tokenizer(sentences[0] , return_tensors="""tf""" ).input_ids
UpperCamelCase = model.generate(input_ids=lowerCamelCase_ , max_new_tokens=12 )
UpperCamelCase = tokenizer(sentences[1] , return_tensors="""tf""" ).input_ids
UpperCamelCase = model.generate(input_ids=lowerCamelCase_ , max_new_tokens=12 )
UpperCamelCase = tokenizer.batch_decode(lowerCamelCase_ , skip_special_tokens=lowerCamelCase_ )
UpperCamelCase = tokenizer.decode(output_non_padded[0] , skip_special_tokens=lowerCamelCase_ )
UpperCamelCase = tokenizer.decode(output_padded[0] , skip_special_tokens=lowerCamelCase_ )
UpperCamelCase = [
'This is an extremelly long sentence that only exists to test the ability of the model to cope with '
'left-padding, such as in batched generation. The output for the sequence below should be the same '
'regardless of whether left padding is applied or not. When left padding is applied, the sequence will be '
'a single',
'Hello, my dog is a little bit of a shy one, but he is very friendly',
]
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , [non_padded_sentence, padded_sentence] )
| 343
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _lowerCamelCase( _a, unittest.TestCase ):
lowercase_ : Any = KandinskyImgaImgPipeline
lowercase_ : Union[str, Any] = ["""prompt""", """image_embeds""", """negative_image_embeds""", """image"""]
lowercase_ : Any = [
"""prompt""",
"""negative_prompt""",
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
]
lowercase_ : List[Any] = [
"""generator""",
"""height""",
"""width""",
"""strength""",
"""guidance_scale""",
"""negative_prompt""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
lowercase_ : Union[str, Any] = False
@property
def UpperCamelCase ( self) -> str:
"""simple docstring"""
return 32
@property
def UpperCamelCase ( self) -> int:
"""simple docstring"""
return 32
@property
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
return self.time_input_dim
@property
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
return self.time_input_dim * 4
@property
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
return 1_00
@property
def UpperCamelCase ( self) -> str:
"""simple docstring"""
_lowercase : str = XLMRobertaTokenizerFast.from_pretrained('YiYiXu/tiny-random-mclip-base')
return tokenizer
@property
def UpperCamelCase ( self) -> int:
"""simple docstring"""
torch.manual_seed(0)
_lowercase : Optional[int] = MCLIPConfig(
numDims=self.cross_attention_dim, transformerDimensions=self.text_embedder_hidden_size, hidden_size=self.text_embedder_hidden_size, intermediate_size=37, num_attention_heads=4, num_hidden_layers=5, vocab_size=10_05, )
_lowercase : Optional[int] = MultilingualCLIP(lowerCamelCase)
_lowercase : List[str] = text_encoder.eval()
return text_encoder
@property
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
torch.manual_seed(0)
_lowercase : Union[str, Any] = {
'in_channels': 4,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'text_image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'text_image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
_lowercase : Optional[Any] = UNetaDConditionModel(**lowerCamelCase)
return model
@property
def UpperCamelCase ( self) -> str:
"""simple docstring"""
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
torch.manual_seed(0)
_lowercase : Dict = VQModel(**self.dummy_movq_kwargs)
return model
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
_lowercase : Any = self.dummy_text_encoder
_lowercase : List[Any] = self.dummy_tokenizer
_lowercase : int = self.dummy_unet
_lowercase : int = self.dummy_movq
_lowercase : Optional[int] = {
'num_train_timesteps': 10_00,
'beta_schedule': 'linear',
'beta_start': 0.0_0_0_8_5,
'beta_end': 0.0_1_2,
'clip_sample': False,
'set_alpha_to_one': False,
'steps_offset': 0,
'prediction_type': 'epsilon',
'thresholding': False,
}
_lowercase : List[Any] = DDIMScheduler(**lowerCamelCase)
_lowercase : List[Any] = {
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase=0) -> Dict:
"""simple docstring"""
_lowercase : List[str] = floats_tensor((1, self.cross_attention_dim), rng=random.Random(lowerCamelCase)).to(lowerCamelCase)
_lowercase : Optional[Any] = floats_tensor((1, self.cross_attention_dim), rng=random.Random(seed + 1)).to(lowerCamelCase)
# create init_image
_lowercase : Tuple = floats_tensor((1, 3, 64, 64), rng=random.Random(lowerCamelCase)).to(lowerCamelCase)
_lowercase : Optional[int] = image.cpu().permute(0, 2, 3, 1)[0]
_lowercase : Tuple = Image.fromarray(np.uinta(lowerCamelCase)).convert('RGB').resize((2_56, 2_56))
if str(lowerCamelCase).startswith('mps'):
_lowercase : List[str] = torch.manual_seed(lowerCamelCase)
else:
_lowercase : Optional[Any] = torch.Generator(device=lowerCamelCase).manual_seed(lowerCamelCase)
_lowercase : Tuple = {
'prompt': 'horse',
'image': init_image,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 64,
'width': 64,
'num_inference_steps': 10,
'guidance_scale': 7.0,
'strength': 0.2,
'output_type': 'np',
}
return inputs
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : Dict = 'cpu'
_lowercase : Tuple = self.get_dummy_components()
_lowercase : str = self.pipeline_class(**lowerCamelCase)
_lowercase : str = pipe.to(lowerCamelCase)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : List[str] = pipe(**self.get_dummy_inputs(lowerCamelCase))
_lowercase : Optional[int] = output.images
_lowercase : List[Any] = pipe(
**self.get_dummy_inputs(lowerCamelCase), return_dict=lowerCamelCase, )[0]
_lowercase : List[str] = image[0, -3:, -3:, -1]
_lowercase : List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_lowercase : Tuple = np.array(
[0.6_1_4_7_4_9_4_3, 0.6_0_7_3_5_3_9, 0.4_3_3_0_8_5_4_4, 0.5_9_2_8_2_6_9, 0.4_7_4_9_3_5_9_5, 0.4_6_7_5_5_9_7_3, 0.4_6_1_3_8_3_8, 0.4_5_3_6_8_7_9_7, 0.5_0_1_1_9_2_3_3])
assert (
np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class _lowerCamelCase( unittest.TestCase ):
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase : int = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinsky/kandinsky_img2img_frog.npy')
_lowercase : str = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png')
_lowercase : Optional[int] = 'A red cartoon frog, 4k'
_lowercase : Union[str, Any] = KandinskyPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1-prior', torch_dtype=torch.floataa)
pipe_prior.to(lowerCamelCase)
_lowercase : Optional[Any] = KandinskyImgaImgPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1', torch_dtype=torch.floataa)
_lowercase : List[Any] = pipeline.to(lowerCamelCase)
pipeline.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : str = torch.Generator(device='cpu').manual_seed(0)
_lowercase , _lowercase : List[Any] = pipe_prior(
lowerCamelCase, generator=lowerCamelCase, num_inference_steps=5, negative_prompt='', ).to_tuple()
_lowercase : Union[str, Any] = pipeline(
lowerCamelCase, image=lowerCamelCase, image_embeds=lowerCamelCase, negative_image_embeds=lowerCamelCase, generator=lowerCamelCase, num_inference_steps=1_00, height=7_68, width=7_68, strength=0.2, output_type='np', )
_lowercase : Dict = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(lowerCamelCase, lowerCamelCase)
| 21
| 0
|
"""simple docstring"""
import argparse
import os
import gluonnlp as nlp
import mxnet as mx
import numpy as np
import torch
from gluonnlp.base import get_home_dir
from gluonnlp.model.bert import BERTEncoder
from gluonnlp.model.utils import _load_vocab
from gluonnlp.vocab import Vocab
from packaging import version
from torch import nn
from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
if version.parse(nlp.__version__) != version.parse("0.8.3"):
raise Exception("requires gluonnlp == 0.8.3")
if version.parse(mx.__version__) != version.parse("1.5.0"):
raise Exception("requires mxnet == 1.5.0")
logging.set_verbosity_info()
lowercase_ = logging.get_logger(__name__)
lowercase_ = 'The Nymphenburg Palace is a beautiful palace in Munich!'
def lowercase ( lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Optional[Any] ) -> Optional[int]:
__a = {
'''attention_cell''': '''multi_head''',
'''num_layers''': 4,
'''units''': 1024,
'''hidden_size''': 768,
'''max_length''': 512,
'''num_heads''': 8,
'''scaled''': True,
'''dropout''': 0.1,
'''use_residual''': True,
'''embed_size''': 1024,
'''embed_dropout''': 0.1,
'''word_embed''': None,
'''layer_norm_eps''': 1e-5,
'''token_type_vocab_size''': 2,
}
__a = bort_4_8_768_1024_hparams
# Let's construct the original Bort model here
# Taken from official BERT implementation, see:
# https://github.com/alexa/bort/blob/master/bort/bort.py
__a = BERTEncoder(
attention_cell=predefined_args['''attention_cell'''] , num_layers=predefined_args['''num_layers'''] , units=predefined_args['''units'''] , hidden_size=predefined_args['''hidden_size'''] , max_length=predefined_args['''max_length'''] , num_heads=predefined_args['''num_heads'''] , scaled=predefined_args['''scaled'''] , dropout=predefined_args['''dropout'''] , output_attention=lowercase__ , output_all_encodings=lowercase__ , use_residual=predefined_args['''use_residual'''] , activation=predefined_args.get('''activation''' , '''gelu''' ) , layer_norm_eps=predefined_args.get('''layer_norm_eps''' , lowercase__ ) , )
# Vocab information needs to be fetched first
# It's the same as RoBERTa, so RobertaTokenizer can be used later
__a = '''openwebtext_ccnews_stories_books_cased'''
# Specify download folder to Gluonnlp's vocab
__a = os.path.join(get_home_dir() , '''models''' )
__a = _load_vocab(lowercase__ , lowercase__ , lowercase__ , cls=lowercase__ )
__a = nlp.model.BERTModel(
lowercase__ , len(lowercase__ ) , units=predefined_args['''units'''] , embed_size=predefined_args['''embed_size'''] , embed_dropout=predefined_args['''embed_dropout'''] , word_embed=predefined_args['''word_embed'''] , use_pooler=lowercase__ , use_token_type_embed=lowercase__ , token_type_vocab_size=predefined_args['''token_type_vocab_size'''] , use_classifier=lowercase__ , use_decoder=lowercase__ , )
original_bort.load_parameters(lowercase__ , cast_dtype=lowercase__ , ignore_extra=lowercase__ )
__a = original_bort._collect_params_with_prefix()
# Build our config 🤗
__a = {
'''architectures''': ['''BertForMaskedLM'''],
'''attention_probs_dropout_prob''': predefined_args['''dropout'''],
'''hidden_act''': '''gelu''',
'''hidden_dropout_prob''': predefined_args['''dropout'''],
'''hidden_size''': predefined_args['''embed_size'''],
'''initializer_range''': 0.02,
'''intermediate_size''': predefined_args['''hidden_size'''],
'''layer_norm_eps''': predefined_args['''layer_norm_eps'''],
'''max_position_embeddings''': predefined_args['''max_length'''],
'''model_type''': '''bort''',
'''num_attention_heads''': predefined_args['''num_heads'''],
'''num_hidden_layers''': predefined_args['''num_layers'''],
'''pad_token_id''': 1, # 2 = BERT, 1 = RoBERTa
'''type_vocab_size''': 1, # 2 = BERT, 1 = RoBERTa
'''vocab_size''': len(lowercase__ ),
}
__a = BertConfig.from_dict(lowercase__ )
__a = BertForMaskedLM(lowercase__ )
hf_bort_model.eval()
# Parameter mapping table (Gluonnlp to Transformers)
# * denotes layer index
#
# | Gluon Parameter | Transformers Parameter
# | -------------------------------------------------------------- | ----------------------
# | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias`
# | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight`
# | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight`
# | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight`
# | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight`
# | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight`
# | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias`
# | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight`
# Helper function to convert MXNET Arrays to PyTorch
def to_torch(lowerCAmelCase__ : str ) -> nn.Parameter:
return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) )
# Check param shapes and map new HF param back
def check_and_map_params(lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Any ):
__a = hf_param.shape
__a = to_torch(params[gluon_param] )
__a = gluon_param.shape
assert (
shape_hf == shape_gluon
), f'''The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers'''
return gluon_param
__a = check_and_map_params(
hf_bort_model.bert.embeddings.word_embeddings.weight , '''word_embed.0.weight''' )
__a = check_and_map_params(
hf_bort_model.bert.embeddings.position_embeddings.weight , '''encoder.position_weight''' )
__a = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.bias , '''encoder.layer_norm.beta''' )
__a = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.weight , '''encoder.layer_norm.gamma''' )
# Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them)
__a = torch.zeros_like(
hf_bort_model.bert.embeddings.token_type_embeddings.weight.data )
for i in range(hf_bort_config.num_hidden_layers ):
__a = hf_bort_model.bert.encoder.layer[i]
# self attention
__a = layer.attention.self
__a = check_and_map_params(
self_attn.key.bias.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_key.bias''' )
__a = check_and_map_params(
self_attn.key.weight.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_key.weight''' )
__a = check_and_map_params(
self_attn.query.bias.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_query.bias''' )
__a = check_and_map_params(
self_attn.query.weight.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_query.weight''' )
__a = check_and_map_params(
self_attn.value.bias.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_value.bias''' )
__a = check_and_map_params(
self_attn.value.weight.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_value.weight''' )
# self attention output
__a = layer.attention.output
__a = check_and_map_params(
self_output.dense.bias , f'''encoder.transformer_cells.{i}.proj.bias''' )
__a = check_and_map_params(
self_output.dense.weight , f'''encoder.transformer_cells.{i}.proj.weight''' )
__a = check_and_map_params(
self_output.LayerNorm.bias , f'''encoder.transformer_cells.{i}.layer_norm.beta''' )
__a = check_and_map_params(
self_output.LayerNorm.weight , f'''encoder.transformer_cells.{i}.layer_norm.gamma''' )
# intermediate
__a = layer.intermediate
__a = check_and_map_params(
intermediate.dense.bias , f'''encoder.transformer_cells.{i}.ffn.ffn_1.bias''' )
__a = check_and_map_params(
intermediate.dense.weight , f'''encoder.transformer_cells.{i}.ffn.ffn_1.weight''' )
# output
__a = layer.output
__a = check_and_map_params(
bert_output.dense.bias , f'''encoder.transformer_cells.{i}.ffn.ffn_2.bias''' )
__a = check_and_map_params(
bert_output.dense.weight , f'''encoder.transformer_cells.{i}.ffn.ffn_2.weight''' )
__a = check_and_map_params(
bert_output.LayerNorm.bias , f'''encoder.transformer_cells.{i}.ffn.layer_norm.beta''' )
__a = check_and_map_params(
bert_output.LayerNorm.weight , f'''encoder.transformer_cells.{i}.ffn.layer_norm.gamma''' )
# Save space and energy 🎄
hf_bort_model.half()
# Compare output of both models
__a = RobertaTokenizer.from_pretrained('''roberta-base''' )
__a = tokenizer.encode_plus(lowercase__ )['''input_ids''']
# Get gluon output
__a = mx.nd.array([input_ids] )
__a = original_bort(inputs=lowercase__ , token_types=[] )
# Get Transformer output (save and reload model again)
hf_bort_model.save_pretrained(lowercase__ )
__a = BertModel.from_pretrained(lowercase__ )
hf_bort_model.eval()
__a = tokenizer.encode_plus(lowercase__ , return_tensors='''pt''' )
__a = hf_bort_model(**lowercase__ )[0]
__a = output_gluon[0].asnumpy()
__a = output_hf[0].detach().numpy()
__a = np.max(np.abs(hf_layer - gluon_layer ) ).item()
__a = np.allclose(lowercase__ , lowercase__ , atol=1e-3 )
if success:
print('''✔️ Both model do output the same tensors''' )
else:
print('''❌ Both model do **NOT** output the same tensors''' )
print('''Absolute difference is:''' , lowercase__ )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--bort_checkpoint_path", default=None, type=str, required=True, help="Path the official Bort params file."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
lowercase_ = parser.parse_args()
convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
| 362
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowercase_ = {
"configuration_vision_text_dual_encoder": ["VisionTextDualEncoderConfig"],
"processing_vision_text_dual_encoder": ["VisionTextDualEncoderProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["VisionTextDualEncoderModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["FlaxVisionTextDualEncoderModel"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["TFVisionTextDualEncoderModel"]
if TYPE_CHECKING:
from .configuration_vision_text_dual_encoder import VisionTextDualEncoderConfig
from .processing_vision_text_dual_encoder import VisionTextDualEncoderProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_text_dual_encoder import VisionTextDualEncoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_text_dual_encoder import FlaxVisionTextDualEncoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_text_dual_encoder import TFVisionTextDualEncoderModel
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 11
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase__ = {
"""configuration_mctct""": ["""MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MCTCTConfig"""],
"""feature_extraction_mctct""": ["""MCTCTFeatureExtractor"""],
"""processing_mctct""": ["""MCTCTProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MCTCTForCTC""",
"""MCTCTModel""",
"""MCTCTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 86
|
"""simple docstring"""
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
_UpperCAmelCase = imread(r"""digital_image_processing/image_data/lena_small.jpg""")
_UpperCAmelCase = cvtColor(img, COLOR_BGR2GRAY)
def __magic_name__ ( ):
SCREAMING_SNAKE_CASE_: Any =cn.convert_to_negative(lowercase )
# assert negative_img array for at least one True
assert negative_img.any()
def __magic_name__ ( ):
with Image.open("""digital_image_processing/image_data/lena_small.jpg""" ) as img:
# Work around assertion for response
assert str(cc.change_contrast(lowercase , 110 ) ).startswith(
"""<PIL.Image.Image image mode=RGB size=100x100 at""" )
def __magic_name__ ( ):
SCREAMING_SNAKE_CASE_: Dict =canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def __magic_name__ ( ):
SCREAMING_SNAKE_CASE_: Optional[int] =imread("""digital_image_processing/image_data/lena_small.jpg""" , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
SCREAMING_SNAKE_CASE_: List[Any] =canny.canny(lowercase )
# assert canny array for at least one True
assert canny_array.any()
def __magic_name__ ( ):
assert gg.gaussian_filter(lowercase , 5 , sigma=0.9 ).all()
def __magic_name__ ( ):
# laplace diagonals
SCREAMING_SNAKE_CASE_: str =array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
SCREAMING_SNAKE_CASE_: Tuple =conv.img_convolve(lowercase , lowercase ).astype(lowercase )
assert res.any()
def __magic_name__ ( ):
assert med.median_filter(lowercase , 3 ).any()
def __magic_name__ ( ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[Any] =sob.sobel_filter(lowercase )
assert grad.any() and theta.any()
def __magic_name__ ( ):
SCREAMING_SNAKE_CASE_: Optional[Any] =sp.make_sepia(lowercase , 20 )
assert sepia.all()
def __magic_name__ ( lowercase = "digital_image_processing/image_data/lena_small.jpg" ):
SCREAMING_SNAKE_CASE_: Dict =bs.Burkes(imread(lowercase , 1 ) , 120 )
burkes.process()
assert burkes.output_img.any()
def __magic_name__ ( lowercase = "digital_image_processing/image_data/lena_small.jpg" , ):
SCREAMING_SNAKE_CASE_: int =rs.NearestNeighbour(imread(lowercase , 1 ) , 400 , 200 )
nn.process()
assert nn.output.any()
def __magic_name__ ( ):
SCREAMING_SNAKE_CASE_: str ="""digital_image_processing/image_data/lena.jpg"""
# Reading the image and converting it to grayscale.
SCREAMING_SNAKE_CASE_: Tuple =imread(lowercase , 0 )
# Test for get_neighbors_pixel function() return not None
SCREAMING_SNAKE_CASE_: Optional[Any] =0
SCREAMING_SNAKE_CASE_: Any =0
SCREAMING_SNAKE_CASE_: List[Any] =image[x_coordinate][y_coordinate]
SCREAMING_SNAKE_CASE_: Optional[Any] =lbp.get_neighbors_pixel(
lowercase , lowercase , lowercase , lowercase )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
SCREAMING_SNAKE_CASE_: Dict =np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
SCREAMING_SNAKE_CASE_: List[str] =lbp.local_binary_value(lowercase , lowercase , lowercase )
assert lbp_image.any()
| 173
| 0
|
"""simple docstring"""
def lowerCAmelCase_ ():
"""simple docstring"""
return [list(range(1_0_0_0 - i , -1_0_0_0 - i , -1 ) ) for i in range(1_0_0_0 )]
a : Dict = generate_large_matrix()
a : str = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def lowerCAmelCase_ (lowerCAmelCase__: int ):
"""simple docstring"""
assert all(row == sorted(SCREAMING_SNAKE_CASE_ , reverse=SCREAMING_SNAKE_CASE_ ) for row in grid )
assert all(list(SCREAMING_SNAKE_CASE_ ) == sorted(SCREAMING_SNAKE_CASE_ , reverse=SCREAMING_SNAKE_CASE_ ) for col in zip(*SCREAMING_SNAKE_CASE_ ) )
def lowerCAmelCase_ (lowerCAmelCase__: Union[str, Any] ):
"""simple docstring"""
UpperCAmelCase_: str = 0
UpperCAmelCase_: Any = len(SCREAMING_SNAKE_CASE_ ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
UpperCAmelCase_: str = (left + right) // 2
UpperCAmelCase_: List[str] = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
UpperCAmelCase_: Optional[Any] = mid + 1
else:
UpperCAmelCase_: int = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase_ (lowerCAmelCase__: Optional[int] ):
"""simple docstring"""
UpperCAmelCase_: List[Any] = 0
UpperCAmelCase_: Dict = len(grid[0] )
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
UpperCAmelCase_: int = find_negative_index(grid[i][:bound] )
total += bound
return (len(SCREAMING_SNAKE_CASE_ ) * len(grid[0] )) - total
def lowerCAmelCase_ (lowerCAmelCase__: Optional[Any] ):
"""simple docstring"""
return len([number for row in grid for number in row if number < 0] )
def lowerCAmelCase_ (lowerCAmelCase__: List[Any] ):
"""simple docstring"""
UpperCAmelCase_: Any = 0
for row in grid:
for i, number in enumerate(SCREAMING_SNAKE_CASE_ ):
if number < 0:
total += len(SCREAMING_SNAKE_CASE_ ) - i
break
return total
def lowerCAmelCase_ ():
"""simple docstring"""
from timeit import timeit
print("""Running benchmarks""" )
UpperCAmelCase_: Any = (
"""from __main__ import count_negatives_binary_search, """
"""count_negatives_brute_force, count_negatives_brute_force_with_break, grid"""
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
UpperCAmelCase_: List[str] = timeit(F'{func}(grid=grid)' , setup=SCREAMING_SNAKE_CASE_ , number=5_0_0 )
print(F'{func}() took {time:0.4f} seconds' )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 366
|
import argparse
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
a : List[str] = logging.get_logger(__name__)
def lowerCAmelCase_ (lowerCAmelCase__: int ):
"""simple docstring"""
UpperCAmelCase_: str = OrderedDict()
for key, value in state_dict.items():
if key.startswith("""module.encoder""" ):
UpperCAmelCase_: Tuple = key.replace("""module.encoder""" , """glpn.encoder""" )
if key.startswith("""module.decoder""" ):
UpperCAmelCase_: List[str] = key.replace("""module.decoder""" , """decoder.stages""" )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
UpperCAmelCase_: Optional[Any] = key[key.find("""patch_embed""" ) + len("""patch_embed""" )]
UpperCAmelCase_: Optional[Any] = key.replace(F'patch_embed{idx}' , F'patch_embeddings.{int(lowerCAmelCase__ )-1}' )
if "norm" in key:
UpperCAmelCase_: Dict = key.replace("""norm""" , """layer_norm""" )
if "glpn.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
UpperCAmelCase_: Any = key[key.find("""glpn.encoder.layer_norm""" ) + len("""glpn.encoder.layer_norm""" )]
UpperCAmelCase_: str = key.replace(F'layer_norm{idx}' , F'layer_norm.{int(lowerCAmelCase__ )-1}' )
if "layer_norm1" in key:
UpperCAmelCase_: Tuple = key.replace("""layer_norm1""" , """layer_norm_1""" )
if "layer_norm2" in key:
UpperCAmelCase_: int = key.replace("""layer_norm2""" , """layer_norm_2""" )
if "block" in key:
# replace for example block1 by block.0
UpperCAmelCase_: Any = key[key.find("""block""" ) + len("""block""" )]
UpperCAmelCase_: Optional[Any] = key.replace(F'block{idx}' , F'block.{int(lowerCAmelCase__ )-1}' )
if "attn.q" in key:
UpperCAmelCase_: Dict = key.replace("""attn.q""" , """attention.self.query""" )
if "attn.proj" in key:
UpperCAmelCase_: Tuple = key.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in key:
UpperCAmelCase_: str = key.replace("""attn""" , """attention.self""" )
if "fc1" in key:
UpperCAmelCase_: Tuple = key.replace("""fc1""" , """dense1""" )
if "fc2" in key:
UpperCAmelCase_: Optional[int] = key.replace("""fc2""" , """dense2""" )
if "linear_pred" in key:
UpperCAmelCase_: Optional[int] = key.replace("""linear_pred""" , """classifier""" )
if "linear_fuse" in key:
UpperCAmelCase_: Tuple = key.replace("""linear_fuse.conv""" , """linear_fuse""" )
UpperCAmelCase_: List[str] = key.replace("""linear_fuse.bn""" , """batch_norm""" )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
UpperCAmelCase_: Optional[int] = key[key.find("""linear_c""" ) + len("""linear_c""" )]
UpperCAmelCase_: List[str] = key.replace(F'linear_c{idx}' , F'linear_c.{int(lowerCAmelCase__ )-1}' )
if "bot_conv" in key:
UpperCAmelCase_: Optional[Any] = key.replace("""bot_conv""" , """0.convolution""" )
if "skip_conv1" in key:
UpperCAmelCase_: Any = key.replace("""skip_conv1""" , """1.convolution""" )
if "skip_conv2" in key:
UpperCAmelCase_: Optional[Any] = key.replace("""skip_conv2""" , """2.convolution""" )
if "fusion1" in key:
UpperCAmelCase_: Dict = key.replace("""fusion1""" , """1.fusion""" )
if "fusion2" in key:
UpperCAmelCase_: Any = key.replace("""fusion2""" , """2.fusion""" )
if "fusion3" in key:
UpperCAmelCase_: Union[str, Any] = key.replace("""fusion3""" , """3.fusion""" )
if "fusion" in key and "conv" in key:
UpperCAmelCase_: List[Any] = key.replace("""conv""" , """convolutional_layer""" )
if key.startswith("""module.last_layer_depth""" ):
UpperCAmelCase_: Union[str, Any] = key.replace("""module.last_layer_depth""" , """head.head""" )
UpperCAmelCase_: Union[str, Any] = value
return new_state_dict
def lowerCAmelCase_ (lowerCAmelCase__: List[Any] , lowerCAmelCase__: str ):
"""simple docstring"""
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
UpperCAmelCase_: str = state_dict.pop(F'glpn.encoder.block.{i}.{j}.attention.self.kv.weight' )
UpperCAmelCase_: Tuple = state_dict.pop(F'glpn.encoder.block.{i}.{j}.attention.self.kv.bias' )
# next, add keys and values (in that order) to the state dict
UpperCAmelCase_: Any = kv_weight[
: config.hidden_sizes[i], :
]
UpperCAmelCase_: Tuple = kv_bias[: config.hidden_sizes[i]]
UpperCAmelCase_: Optional[int] = kv_weight[
config.hidden_sizes[i] :, :
]
UpperCAmelCase_: int = kv_bias[config.hidden_sizes[i] :]
def lowerCAmelCase_ ():
"""simple docstring"""
UpperCAmelCase_: str = """http://images.cocodataset.org/val2017/000000039769.jpg"""
UpperCAmelCase_: List[Any] = Image.open(requests.get(lowerCAmelCase__ , stream=lowerCAmelCase__ ).raw )
return image
@torch.no_grad()
def lowerCAmelCase_ (lowerCAmelCase__: List[str] , lowerCAmelCase__: List[Any] , lowerCAmelCase__: List[Any]=False , lowerCAmelCase__: Optional[Any]=None ):
"""simple docstring"""
UpperCAmelCase_: str = GLPNConfig(hidden_sizes=[6_4, 1_2_8, 3_2_0, 5_1_2] , decoder_hidden_size=6_4 , depths=[3, 8, 2_7, 3] )
# load image processor (only resize + rescale)
UpperCAmelCase_: Dict = GLPNImageProcessor()
# prepare image
UpperCAmelCase_: List[str] = prepare_img()
UpperCAmelCase_: List[Any] = image_processor(images=lowerCAmelCase__ , return_tensors="""pt""" ).pixel_values
logger.info("""Converting model...""" )
# load original state dict
UpperCAmelCase_: Any = torch.load(lowerCAmelCase__ , map_location=torch.device("""cpu""" ) )
# rename keys
UpperCAmelCase_: Optional[Any] = rename_keys(lowerCAmelCase__ )
# key and value matrices need special treatment
read_in_k_v(lowerCAmelCase__ , lowerCAmelCase__ )
# create HuggingFace model and load state dict
UpperCAmelCase_: Dict = GLPNForDepthEstimation(lowerCAmelCase__ )
model.load_state_dict(lowerCAmelCase__ )
model.eval()
# forward pass
UpperCAmelCase_: Any = model(lowerCAmelCase__ )
UpperCAmelCase_: Union[str, Any] = outputs.predicted_depth
# verify output
if model_name is not None:
if "nyu" in model_name:
UpperCAmelCase_: List[str] = torch.tensor(
[[4.4147, 4.0873, 4.0673], [3.7890, 3.2881, 3.1525], [3.7674, 3.5423, 3.4913]] )
elif "kitti" in model_name:
UpperCAmelCase_: List[str] = torch.tensor(
[[3.4291, 2.7865, 2.5151], [3.2841, 2.7021, 2.3502], [3.1147, 2.4625, 2.2481]] )
else:
raise ValueError(F'Unknown model name: {model_name}' )
UpperCAmelCase_: Any = torch.Size([1, 4_8_0, 6_4_0] )
assert predicted_depth.shape == expected_shape
assert torch.allclose(predicted_depth[0, :3, :3] , lowerCAmelCase__ , atol=1e-4 )
print("""Looks ok!""" )
# finally, push to hub if required
if push_to_hub:
logger.info("""Pushing model and image processor to the hub...""" )
model.push_to_hub(
repo_path_or_name=Path(lowerCAmelCase__ , lowerCAmelCase__ ) , organization="""nielsr""" , commit_message="""Add model""" , use_temp_dir=lowerCAmelCase__ , )
image_processor.push_to_hub(
repo_path_or_name=Path(lowerCAmelCase__ , lowerCAmelCase__ ) , organization="""nielsr""" , commit_message="""Add image processor""" , use_temp_dir=lowerCAmelCase__ , )
if __name__ == "__main__":
a : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path',
default=None,
type=str,
help='Path to the original PyTorch checkpoint (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether to upload the model to the HuggingFace hub.'
)
parser.add_argument(
'--model_name',
default='glpn-kitti',
type=str,
help='Name of the model in case you\'re pushing to the hub.',
)
a : List[Any] = parser.parse_args()
convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 82
| 0
|
'''simple docstring'''
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
__snake_case =logging.get_logger(__name__)
def a_ ( lowerCamelCase : bool , lowerCamelCase : bool ):
def run_func(lowerCamelCase : List[str] ):
@wraps(lowerCamelCase )
def run_in_eager_mode(*lowerCamelCase : List[str] , **lowerCamelCase : Tuple ):
return func(*lowerCamelCase , **lowerCamelCase )
@wraps(lowerCamelCase )
@tf.function(experimental_compile=lowerCamelCase )
def run_in_graph_mode(*lowerCamelCase : List[Any] , **lowerCamelCase : Any ):
return func(*lowerCamelCase , **lowerCamelCase )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
'Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.' )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def a_ ( lowerCamelCase : int , lowerCamelCase : int , lowerCamelCase : int ):
lowerCAmelCase = random.Random()
lowerCAmelCase = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(lowerCamelCase , shape=(batch_size, sequence_length) , dtype=tf.intaa )
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : TensorFlowBenchmarkArguments
lowerCamelCase : PretrainedConfig
lowerCamelCase : str = "TensorFlow"
@property
def __UpperCAmelCase ( self : int ) -> Optional[int]:
return tf.__version__
def __UpperCAmelCase ( self : Any , UpperCAmelCase__ : str , UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> float:
# initialize GPU on separate process
lowerCAmelCase = self.args.strategy
if strategy is None:
raise ValueError('A device strategy has to be initialized before using TensorFlow.' )
lowerCAmelCase = self._prepare_inference_func(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
return self._measure_speed(_inference )
def __UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> float:
lowerCAmelCase = self.args.strategy
if strategy is None:
raise ValueError('A device strategy has to be initialized before using TensorFlow.' )
lowerCAmelCase = self._prepare_train_func(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
return self._measure_speed(_train )
def __UpperCAmelCase ( self : Optional[int] , UpperCAmelCase__ : str , UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> [Memory, Optional[MemorySummary]]:
# initialize GPU on separate process
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , UpperCAmelCase__ )
lowerCAmelCase = self.args.strategy
if strategy is None:
raise ValueError('A device strategy has to be initialized before using TensorFlow.' )
lowerCAmelCase = self._prepare_inference_func(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
return self._measure_memory(_inference )
def __UpperCAmelCase ( self : str , UpperCAmelCase__ : str , UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> [Memory, Optional[MemorySummary]]:
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , UpperCAmelCase__ )
lowerCAmelCase = self.args.strategy
if strategy is None:
raise ValueError('A device strategy has to be initialized before using TensorFlow.' )
lowerCAmelCase = self._prepare_train_func(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
return self._measure_memory(_train )
def __UpperCAmelCase ( self : Optional[int] , UpperCAmelCase__ : str , UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> Callable[[], None]:
lowerCAmelCase = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError('Mixed precision is currently not supported.' )
lowerCAmelCase = (
hasattr(UpperCAmelCase__ , 'architectures' )
and isinstance(config.architectures , UpperCAmelCase__ )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
lowerCAmelCase = 'TF' + config.architectures[0] # prepend 'TF' for tensorflow model
lowerCAmelCase = __import__('transformers' , fromlist=[model_class] )
lowerCAmelCase = getattr(UpperCAmelCase__ , UpperCAmelCase__ )
lowerCAmelCase = model_cls(UpperCAmelCase__ )
except ImportError:
raise ImportError(
F'''{model_class} does not exist. If you just want to test the pretrained model, you might want to'''
' set `--only_pretrain_model` or `args.only_pretrain_model=True`.' )
else:
lowerCAmelCase = TF_MODEL_MAPPING[config.__class__](UpperCAmelCase__ )
# encoder-decoder has vocab size saved differently
lowerCAmelCase = config.vocab_size if hasattr(UpperCAmelCase__ , 'vocab_size' ) else config.encoder.vocab_size
lowerCAmelCase = random_input_ids(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_forward():
return model(UpperCAmelCase__ , decoder_input_ids=UpperCAmelCase__ , training=UpperCAmelCase__ )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_forward():
return model(UpperCAmelCase__ , training=UpperCAmelCase__ )
lowerCAmelCase = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def __UpperCAmelCase ( self : List[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> Callable[[], None]:
lowerCAmelCase = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError('Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.' )
if self.args.fpaa:
raise NotImplementedError('Mixed precision is currently not supported.' )
lowerCAmelCase = (
hasattr(UpperCAmelCase__ , 'architectures' )
and isinstance(config.architectures , UpperCAmelCase__ )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
lowerCAmelCase = 'TF' + config.architectures[0] # prepend 'TF' for tensorflow model
lowerCAmelCase = __import__('transformers' , fromlist=[model_class] )
lowerCAmelCase = getattr(UpperCAmelCase__ , UpperCAmelCase__ )
lowerCAmelCase = model_cls(UpperCAmelCase__ )
except ImportError:
raise ImportError(
F'''{model_class} does not exist. If you just want to test the pretrained model, you might want to'''
' set `--only_pretrain_model` or `args.only_pretrain_model=True`.' )
else:
lowerCAmelCase = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](UpperCAmelCase__ )
# encoder-decoder has vocab size saved differently
lowerCAmelCase = config.vocab_size if hasattr(UpperCAmelCase__ , 'vocab_size' ) else config.encoder.vocab_size
lowerCAmelCase = random_input_ids(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_train():
lowerCAmelCase = model(UpperCAmelCase__ , decoder_input_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ , training=UpperCAmelCase__ )[0]
lowerCAmelCase = tf.gradients(UpperCAmelCase__ , model.trainable_variables )
return gradients
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_train():
lowerCAmelCase = model(UpperCAmelCase__ , labels=UpperCAmelCase__ , training=UpperCAmelCase__ )[0]
lowerCAmelCase = tf.gradients(UpperCAmelCase__ , model.trainable_variables )
return gradients
lowerCAmelCase = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def __UpperCAmelCase ( self : Optional[int] , UpperCAmelCase__ : Optional[Any] ) -> float:
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info('Do inference on TPU. Running model 5 times to stabilize compilation' )
timeit.repeat(UpperCAmelCase__ , repeat=1 , number=5 )
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
lowerCAmelCase = timeit.repeat(
UpperCAmelCase__ , repeat=self.args.repeat , number=1_0 , )
return min(UpperCAmelCase__ ) / 10.0
except ResourceExhaustedError as e:
self.print_fn(F'''Doesn\'t fit on GPU. {e}''' )
def __UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase__ : Callable[[], None] ) -> [Memory, MemorySummary]:
logger.info(
'Note that TensorFlow allocates more memory than '
'it might need to speed up computation. '
'The memory reported here corresponds to the memory '
'reported by `nvidia-smi`, which can vary depending '
'on total available memory on the GPU that is used.' )
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
'`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory'
' consumption line by line.' )
lowerCAmelCase = start_memory_tracing('transformers' )
if self.args.is_tpu:
# tpu
raise NotImplementedError(
'Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking'
' with `args.memory=False`' )
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
'py3nvml not installed, we won\'t log GPU memory usage. '
'Install py3nvml (pip install py3nvml) to log information about GPU.' )
lowerCAmelCase = 'N/A'
else:
logger.info(
'Measuring total GPU usage on GPU device. Make sure to not have additional processes'
' running on the same GPU.' )
# init nvml
nvml.nvmlInit()
func()
lowerCAmelCase = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx )
lowerCAmelCase = nvml.nvmlDeviceGetMemoryInfo(UpperCAmelCase__ )
lowerCAmelCase = meminfo.used
lowerCAmelCase = Memory(UpperCAmelCase__ )
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
'When enabling line by line tracing, the max peak memory for CPU is inaccurate in'
' TensorFlow.' )
lowerCAmelCase = None
else:
lowerCAmelCase = measure_peak_memory_cpu(UpperCAmelCase__ )
lowerCAmelCase = Memory(UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else memory_bytes
if self.args.trace_memory_line_by_line:
lowerCAmelCase = stop_memory_tracing(UpperCAmelCase__ )
if memory is None:
lowerCAmelCase = summary.total
else:
lowerCAmelCase = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(F'''Doesn\'t fit on GPU. {e}''' )
return "N/A", None
| 4
|
import gc
import unittest
import numpy as np
import torch
import torch.nn.functional as F
from transformers import (
ClapTextConfig,
ClapTextModelWithProjection,
RobertaTokenizer,
SpeechTaHifiGan,
SpeechTaHifiGanConfig,
)
from diffusers import (
AudioLDMPipeline,
AutoencoderKL,
DDIMScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __lowerCAmelCase ( lowerCamelCase__ , unittest.TestCase ):
__lowerCamelCase = AudioLDMPipeline
__lowerCamelCase = TEXT_TO_AUDIO_PARAMS
__lowerCamelCase = TEXT_TO_AUDIO_BATCH_PARAMS
__lowerCamelCase = frozenset(
[
'''num_inference_steps''',
'''num_waveforms_per_prompt''',
'''generator''',
'''latents''',
'''output_type''',
'''return_dict''',
'''callback''',
'''callback_steps''',
] )
def snake_case ( self ):
"""simple docstring"""
torch.manual_seed(0 )
_lowerCAmelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=(32, 64) , class_embed_type="""simple_projection""" , projection_class_embeddings_input_dim=32 , class_embeddings_concat=_snake_case , )
_lowerCAmelCase = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=_snake_case , set_alpha_to_one=_snake_case , )
torch.manual_seed(0 )
_lowerCAmelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=1 , out_channels=1 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
_lowerCAmelCase = ClapTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , projection_dim=32 , )
_lowerCAmelCase = ClapTextModelWithProjection(_snake_case )
_lowerCAmelCase = RobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-random-roberta""" , model_max_length=77 )
_lowerCAmelCase = SpeechTaHifiGanConfig(
model_in_dim=8 , sampling_rate=16000 , upsample_initial_channel=16 , upsample_rates=[2, 2] , upsample_kernel_sizes=[4, 4] , resblock_kernel_sizes=[3, 7] , resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] , normalize_before=_snake_case , )
_lowerCAmelCase = SpeechTaHifiGan(_snake_case )
_lowerCAmelCase = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""vocoder""": vocoder,
}
return components
def snake_case ( self , _snake_case , _snake_case=0 ):
"""simple docstring"""
if str(_snake_case ).startswith("""mps""" ):
_lowerCAmelCase = torch.manual_seed(_snake_case )
else:
_lowerCAmelCase = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
_lowerCAmelCase = {
"""prompt""": """A hammer hitting a wooden surface""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
}
return inputs
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase = self.get_dummy_components()
_lowerCAmelCase = AudioLDMPipeline(**_snake_case )
_lowerCAmelCase = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
_lowerCAmelCase = self.get_dummy_inputs(_snake_case )
_lowerCAmelCase = audioldm_pipe(**_snake_case )
_lowerCAmelCase = output.audios[0]
assert audio.ndim == 1
assert len(_snake_case ) == 256
_lowerCAmelCase = audio[:10]
_lowerCAmelCase = np.array(
[-0.0050, 0.0050, -0.0060, 0.0033, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0033] )
assert np.abs(audio_slice - expected_slice ).max() < 1e-2
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = self.get_dummy_components()
_lowerCAmelCase = AudioLDMPipeline(**_snake_case )
_lowerCAmelCase = audioldm_pipe.to(_snake_case )
_lowerCAmelCase = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
_lowerCAmelCase = self.get_dummy_inputs(_snake_case )
_lowerCAmelCase = 3 * [inputs["""prompt"""]]
# forward
_lowerCAmelCase = audioldm_pipe(**_snake_case )
_lowerCAmelCase = output.audios[0]
_lowerCAmelCase = self.get_dummy_inputs(_snake_case )
_lowerCAmelCase = 3 * [inputs.pop("""prompt""" )]
_lowerCAmelCase = audioldm_pipe.tokenizer(
_snake_case , padding="""max_length""" , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=_snake_case , return_tensors="""pt""" , )
_lowerCAmelCase = text_inputs["""input_ids"""].to(_snake_case )
_lowerCAmelCase = audioldm_pipe.text_encoder(
_snake_case , )
_lowerCAmelCase = prompt_embeds.text_embeds
# additional L_2 normalization over each hidden-state
_lowerCAmelCase = F.normalize(_snake_case , dim=-1 )
_lowerCAmelCase = prompt_embeds
# forward
_lowerCAmelCase = audioldm_pipe(**_snake_case )
_lowerCAmelCase = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1e-2
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = self.get_dummy_components()
_lowerCAmelCase = AudioLDMPipeline(**_snake_case )
_lowerCAmelCase = audioldm_pipe.to(_snake_case )
_lowerCAmelCase = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
_lowerCAmelCase = self.get_dummy_inputs(_snake_case )
_lowerCAmelCase = 3 * ["""this is a negative prompt"""]
_lowerCAmelCase = negative_prompt
_lowerCAmelCase = 3 * [inputs["""prompt"""]]
# forward
_lowerCAmelCase = audioldm_pipe(**_snake_case )
_lowerCAmelCase = output.audios[0]
_lowerCAmelCase = self.get_dummy_inputs(_snake_case )
_lowerCAmelCase = 3 * [inputs.pop("""prompt""" )]
_lowerCAmelCase = []
for p in [prompt, negative_prompt]:
_lowerCAmelCase = audioldm_pipe.tokenizer(
_snake_case , padding="""max_length""" , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=_snake_case , return_tensors="""pt""" , )
_lowerCAmelCase = text_inputs["""input_ids"""].to(_snake_case )
_lowerCAmelCase = audioldm_pipe.text_encoder(
_snake_case , )
_lowerCAmelCase = text_embeds.text_embeds
# additional L_2 normalization over each hidden-state
_lowerCAmelCase = F.normalize(_snake_case , dim=-1 )
embeds.append(_snake_case )
_lowerCAmelCase , _lowerCAmelCase = embeds
# forward
_lowerCAmelCase = audioldm_pipe(**_snake_case )
_lowerCAmelCase = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1e-2
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase = self.get_dummy_components()
_lowerCAmelCase = PNDMScheduler(skip_prk_steps=_snake_case )
_lowerCAmelCase = AudioLDMPipeline(**_snake_case )
_lowerCAmelCase = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
_lowerCAmelCase = self.get_dummy_inputs(_snake_case )
_lowerCAmelCase = """egg cracking"""
_lowerCAmelCase = audioldm_pipe(**_snake_case , negative_prompt=_snake_case )
_lowerCAmelCase = output.audios[0]
assert audio.ndim == 1
assert len(_snake_case ) == 256
_lowerCAmelCase = audio[:10]
_lowerCAmelCase = np.array(
[-0.0051, 0.0050, -0.0060, 0.0034, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0032] )
assert np.abs(audio_slice - expected_slice ).max() < 1e-2
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase = self.get_dummy_components()
_lowerCAmelCase = PNDMScheduler(skip_prk_steps=_snake_case )
_lowerCAmelCase = AudioLDMPipeline(**_snake_case )
_lowerCAmelCase = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
_lowerCAmelCase = """A hammer hitting a wooden surface"""
# test num_waveforms_per_prompt=1 (default)
_lowerCAmelCase = audioldm_pipe(_snake_case , num_inference_steps=2 ).audios
assert audios.shape == (1, 256)
# test num_waveforms_per_prompt=1 (default) for batch of prompts
_lowerCAmelCase = 2
_lowerCAmelCase = audioldm_pipe([prompt] * batch_size , num_inference_steps=2 ).audios
assert audios.shape == (batch_size, 256)
# test num_waveforms_per_prompt for single prompt
_lowerCAmelCase = 2
_lowerCAmelCase = audioldm_pipe(_snake_case , num_inference_steps=2 , num_waveforms_per_prompt=_snake_case ).audios
assert audios.shape == (num_waveforms_per_prompt, 256)
# test num_waveforms_per_prompt for batch of prompts
_lowerCAmelCase = 2
_lowerCAmelCase = audioldm_pipe(
[prompt] * batch_size , num_inference_steps=2 , num_waveforms_per_prompt=_snake_case ).audios
assert audios.shape == (batch_size * num_waveforms_per_prompt, 256)
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase = self.get_dummy_components()
_lowerCAmelCase = AudioLDMPipeline(**_snake_case )
_lowerCAmelCase = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
_lowerCAmelCase = audioldm_pipe.vocoder.config.sampling_rate
_lowerCAmelCase = self.get_dummy_inputs(_snake_case )
_lowerCAmelCase = audioldm_pipe(audio_length_in_s=0.016 , **_snake_case )
_lowerCAmelCase = output.audios[0]
assert audio.ndim == 1
assert len(_snake_case ) / vocoder_sampling_rate == 0.016
_lowerCAmelCase = audioldm_pipe(audio_length_in_s=0.032 , **_snake_case )
_lowerCAmelCase = output.audios[0]
assert audio.ndim == 1
assert len(_snake_case ) / vocoder_sampling_rate == 0.032
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = self.get_dummy_components()
_lowerCAmelCase = AudioLDMPipeline(**_snake_case )
_lowerCAmelCase = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
_lowerCAmelCase = ["""hey"""]
_lowerCAmelCase = audioldm_pipe(_snake_case , num_inference_steps=1 )
_lowerCAmelCase = output.audios.shape
assert audio_shape == (1, 256)
_lowerCAmelCase = audioldm_pipe.vocoder.config
config.model_in_dim *= 2
_lowerCAmelCase = SpeechTaHifiGan(_snake_case ).to(_snake_case )
_lowerCAmelCase = audioldm_pipe(_snake_case , num_inference_steps=1 )
_lowerCAmelCase = output.audios.shape
# waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram
assert audio_shape == (1, 256)
def snake_case ( self ):
"""simple docstring"""
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=_snake_case )
def snake_case ( self ):
"""simple docstring"""
self._test_inference_batch_single_identical(test_mean_pixel_difference=_snake_case )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def snake_case ( self ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=_snake_case )
@slow
class __lowerCAmelCase ( unittest.TestCase ):
def snake_case ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case ( self , _snake_case , _snake_case="cpu" , _snake_case=torch.floataa , _snake_case=0 ):
"""simple docstring"""
_lowerCAmelCase = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
_lowerCAmelCase = np.random.RandomState(_snake_case ).standard_normal((1, 8, 128, 16) )
_lowerCAmelCase = torch.from_numpy(_snake_case ).to(device=_snake_case , dtype=_snake_case )
_lowerCAmelCase = {
"""prompt""": """A hammer hitting a wooden surface""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 2.5,
}
return inputs
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = AudioLDMPipeline.from_pretrained("""cvssp/audioldm""" )
_lowerCAmelCase = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
_lowerCAmelCase = self.get_inputs(_snake_case )
_lowerCAmelCase = 25
_lowerCAmelCase = audioldm_pipe(**_snake_case ).audios[0]
assert audio.ndim == 1
assert len(_snake_case ) == 81920
_lowerCAmelCase = audio[77230:77240]
_lowerCAmelCase = np.array(
[-0.4884, -0.4607, 0.0023, 0.5007, 0.5896, 0.5151, 0.3813, -0.0208, -0.3687, -0.4315] )
_lowerCAmelCase = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 1e-2
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = AudioLDMPipeline.from_pretrained("""cvssp/audioldm""" )
_lowerCAmelCase = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config )
_lowerCAmelCase = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
_lowerCAmelCase = self.get_inputs(_snake_case )
_lowerCAmelCase = audioldm_pipe(**_snake_case ).audios[0]
assert audio.ndim == 1
assert len(_snake_case ) == 81920
_lowerCAmelCase = audio[27780:27790]
_lowerCAmelCase = np.array([-0.2131, -0.0873, -0.0124, -0.0189, 0.0569, 0.1373, 0.1883, 0.2886, 0.3297, 0.2212] )
_lowerCAmelCase = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 3e-2
| 82
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase__ = {
'''configuration_albert''': ['''ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''AlbertConfig''', '''AlbertOnnxConfig'''],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['''AlbertTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['''AlbertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''AlbertForMaskedLM''',
'''AlbertForMultipleChoice''',
'''AlbertForPreTraining''',
'''AlbertForQuestionAnswering''',
'''AlbertForSequenceClassification''',
'''AlbertForTokenClassification''',
'''AlbertModel''',
'''AlbertPreTrainedModel''',
'''load_tf_weights_in_albert''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFAlbertForMaskedLM''',
'''TFAlbertForMultipleChoice''',
'''TFAlbertForPreTraining''',
'''TFAlbertForQuestionAnswering''',
'''TFAlbertForSequenceClassification''',
'''TFAlbertForTokenClassification''',
'''TFAlbertMainLayer''',
'''TFAlbertModel''',
'''TFAlbertPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''FlaxAlbertForMaskedLM''',
'''FlaxAlbertForMultipleChoice''',
'''FlaxAlbertForPreTraining''',
'''FlaxAlbertForQuestionAnswering''',
'''FlaxAlbertForSequenceClassification''',
'''FlaxAlbertForTokenClassification''',
'''FlaxAlbertModel''',
'''FlaxAlbertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_albert import ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig, AlbertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert import AlbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert_fast import AlbertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_albert import (
ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
AlbertPreTrainedModel,
load_tf_weights_in_albert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_albert import (
TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAlbertForMaskedLM,
TFAlbertForMultipleChoice,
TFAlbertForPreTraining,
TFAlbertForQuestionAnswering,
TFAlbertForSequenceClassification,
TFAlbertForTokenClassification,
TFAlbertMainLayer,
TFAlbertModel,
TFAlbertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
FlaxAlbertPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 52
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_glpn import GLPNImageProcessor
lowerCAmelCase__ = logging.get_logger(__name__)
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
def __init__( self : List[Any] ,*lowercase__ : List[str] ,**lowercase__ : Optional[Any] ):
warnings.warn(
'''The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use GLPNImageProcessor instead.''' ,lowercase__ ,)
super().__init__(*lowercase__ ,**lowercase__ )
| 52
| 1
|
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__snake_case = logging.get_logger(__name__)
__snake_case = {
'''b0''': efficientnet.EfficientNetBa,
'''b1''': efficientnet.EfficientNetBa,
'''b2''': efficientnet.EfficientNetBa,
'''b3''': efficientnet.EfficientNetBa,
'''b4''': efficientnet.EfficientNetBa,
'''b5''': efficientnet.EfficientNetBa,
'''b6''': efficientnet.EfficientNetBa,
'''b7''': efficientnet.EfficientNetBa,
}
__snake_case = {
'''b0''': {
'''hidden_dim''': 1_2_8_0,
'''width_coef''': 1.0,
'''depth_coef''': 1.0,
'''image_size''': 2_2_4,
'''dropout_rate''': 0.2,
'''dw_padding''': [],
},
'''b1''': {
'''hidden_dim''': 1_2_8_0,
'''width_coef''': 1.0,
'''depth_coef''': 1.1,
'''image_size''': 2_4_0,
'''dropout_rate''': 0.2,
'''dw_padding''': [1_6],
},
'''b2''': {
'''hidden_dim''': 1_4_0_8,
'''width_coef''': 1.1,
'''depth_coef''': 1.2,
'''image_size''': 2_6_0,
'''dropout_rate''': 0.3,
'''dw_padding''': [5, 8, 1_6],
},
'''b3''': {
'''hidden_dim''': 1_5_3_6,
'''width_coef''': 1.2,
'''depth_coef''': 1.4,
'''image_size''': 3_0_0,
'''dropout_rate''': 0.3,
'''dw_padding''': [5, 1_8],
},
'''b4''': {
'''hidden_dim''': 1_7_9_2,
'''width_coef''': 1.4,
'''depth_coef''': 1.8,
'''image_size''': 3_8_0,
'''dropout_rate''': 0.4,
'''dw_padding''': [6],
},
'''b5''': {
'''hidden_dim''': 2_0_4_8,
'''width_coef''': 1.6,
'''depth_coef''': 2.2,
'''image_size''': 4_5_6,
'''dropout_rate''': 0.4,
'''dw_padding''': [1_3, 2_7],
},
'''b6''': {
'''hidden_dim''': 2_3_0_4,
'''width_coef''': 1.8,
'''depth_coef''': 2.6,
'''image_size''': 5_2_8,
'''dropout_rate''': 0.5,
'''dw_padding''': [3_1],
},
'''b7''': {
'''hidden_dim''': 2_5_6_0,
'''width_coef''': 2.0,
'''depth_coef''': 3.1,
'''image_size''': 6_0_0,
'''dropout_rate''': 0.5,
'''dw_padding''': [1_8],
},
}
def _A ( _lowercase ) -> Optional[int]:
"""simple docstring"""
__UpperCamelCase = EfficientNetConfig()
__UpperCamelCase = CONFIG_MAP[model_name]['hidden_dim']
__UpperCamelCase = CONFIG_MAP[model_name]['width_coef']
__UpperCamelCase = CONFIG_MAP[model_name]['depth_coef']
__UpperCamelCase = CONFIG_MAP[model_name]['image_size']
__UpperCamelCase = CONFIG_MAP[model_name]['dropout_rate']
__UpperCamelCase = CONFIG_MAP[model_name]['dw_padding']
__UpperCamelCase = 'huggingface/label-files'
__UpperCamelCase = 'imagenet-1k-id2label.json'
__UpperCamelCase = 10_00
__UpperCamelCase = json.load(open(hf_hub_download(_lowercase , _lowercase , repo_type='dataset' ) , 'r' ) )
__UpperCamelCase = {int(_lowercase ): v for k, v in idalabel.items()}
__UpperCamelCase = idalabel
__UpperCamelCase = {v: k for k, v in idalabel.items()}
return config
def _A ( ) -> Union[str, Any]:
"""simple docstring"""
__UpperCamelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
__UpperCamelCase = Image.open(requests.get(_lowercase , stream=_lowercase ).raw )
return im
def _A ( _lowercase ) -> Optional[Any]:
"""simple docstring"""
__UpperCamelCase = CONFIG_MAP[model_name]['image_size']
__UpperCamelCase = EfficientNetImageProcessor(
size={'height': size, 'width': size} , image_mean=[0.4_85, 0.4_56, 0.4_06] , image_std=[0.47_85_39_44, 0.4_73_28_64, 0.47_43_41_63] , do_center_crop=_lowercase , )
return preprocessor
def _A ( _lowercase ) -> List[str]:
"""simple docstring"""
__UpperCamelCase = [v.split('_' )[0].split('block' )[1] for v in original_param_names if v.startswith('block' )]
__UpperCamelCase = sorted(set(_lowercase ) )
__UpperCamelCase = len(_lowercase )
__UpperCamelCase = {b: str(_lowercase ) for b, i in zip(_lowercase , range(_lowercase ) )}
__UpperCamelCase = []
rename_keys.append(('stem_conv/kernel:0', 'embeddings.convolution.weight') )
rename_keys.append(('stem_bn/gamma:0', 'embeddings.batchnorm.weight') )
rename_keys.append(('stem_bn/beta:0', 'embeddings.batchnorm.bias') )
rename_keys.append(('stem_bn/moving_mean:0', 'embeddings.batchnorm.running_mean') )
rename_keys.append(('stem_bn/moving_variance:0', 'embeddings.batchnorm.running_var') )
for b in block_names:
__UpperCamelCase = block_name_mapping[b]
rename_keys.append((f'''block{b}_expand_conv/kernel:0''', f'''encoder.blocks.{hf_b}.expansion.expand_conv.weight''') )
rename_keys.append((f'''block{b}_expand_bn/gamma:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.weight''') )
rename_keys.append((f'''block{b}_expand_bn/beta:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.bias''') )
rename_keys.append(
(f'''block{b}_expand_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.running_mean''') )
rename_keys.append(
(f'''block{b}_expand_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.running_var''') )
rename_keys.append(
(f'''block{b}_dwconv/depthwise_kernel:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight''') )
rename_keys.append((f'''block{b}_bn/gamma:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight''') )
rename_keys.append((f'''block{b}_bn/beta:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias''') )
rename_keys.append(
(f'''block{b}_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean''') )
rename_keys.append(
(f'''block{b}_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var''') )
rename_keys.append((f'''block{b}_se_reduce/kernel:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.reduce.weight''') )
rename_keys.append((f'''block{b}_se_reduce/bias:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.reduce.bias''') )
rename_keys.append((f'''block{b}_se_expand/kernel:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.expand.weight''') )
rename_keys.append((f'''block{b}_se_expand/bias:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.expand.bias''') )
rename_keys.append(
(f'''block{b}_project_conv/kernel:0''', f'''encoder.blocks.{hf_b}.projection.project_conv.weight''') )
rename_keys.append((f'''block{b}_project_bn/gamma:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.weight''') )
rename_keys.append((f'''block{b}_project_bn/beta:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.bias''') )
rename_keys.append(
(f'''block{b}_project_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.running_mean''') )
rename_keys.append(
(f'''block{b}_project_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.running_var''') )
rename_keys.append(('top_conv/kernel:0', 'encoder.top_conv.weight') )
rename_keys.append(('top_bn/gamma:0', 'encoder.top_bn.weight') )
rename_keys.append(('top_bn/beta:0', 'encoder.top_bn.bias') )
rename_keys.append(('top_bn/moving_mean:0', 'encoder.top_bn.running_mean') )
rename_keys.append(('top_bn/moving_variance:0', 'encoder.top_bn.running_var') )
__UpperCamelCase = {}
for item in rename_keys:
if item[0] in original_param_names:
__UpperCamelCase = 'efficientnet.' + item[1]
__UpperCamelCase = 'classifier.weight'
__UpperCamelCase = 'classifier.bias'
return key_mapping
def _A ( _lowercase , _lowercase , _lowercase ) -> List[str]:
"""simple docstring"""
for key, value in tf_params.items():
if "normalization" in key:
continue
__UpperCamelCase = key_mapping[key]
if "_conv" in key and "kernel" in key:
__UpperCamelCase = torch.from_numpy(_lowercase ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
__UpperCamelCase = torch.from_numpy(_lowercase ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
__UpperCamelCase = torch.from_numpy(np.transpose(_lowercase ) )
else:
__UpperCamelCase = torch.from_numpy(_lowercase )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(_lowercase )
@torch.no_grad()
def _A ( _lowercase , _lowercase , _lowercase , _lowercase ) -> Tuple:
"""simple docstring"""
__UpperCamelCase = model_classes[model_name](
include_top=_lowercase , weights='imagenet' , input_tensor=_lowercase , input_shape=_lowercase , pooling=_lowercase , classes=10_00 , classifier_activation='softmax' , )
__UpperCamelCase = original_model.trainable_variables
__UpperCamelCase = original_model.non_trainable_variables
__UpperCamelCase = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
__UpperCamelCase = param.numpy()
__UpperCamelCase = list(tf_params.keys() )
# Load HuggingFace model
__UpperCamelCase = get_efficientnet_config(_lowercase )
__UpperCamelCase = EfficientNetForImageClassification(_lowercase ).eval()
__UpperCamelCase = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print('Converting parameters...' )
__UpperCamelCase = rename_keys(_lowercase )
replace_params(_lowercase , _lowercase , _lowercase )
# Initialize preprocessor and preprocess input image
__UpperCamelCase = convert_image_processor(_lowercase )
__UpperCamelCase = preprocessor(images=prepare_img() , return_tensors='pt' )
# HF model inference
hf_model.eval()
with torch.no_grad():
__UpperCamelCase = hf_model(**_lowercase )
__UpperCamelCase = outputs.logits.detach().numpy()
# Original model inference
__UpperCamelCase = False
__UpperCamelCase = CONFIG_MAP[model_name]['image_size']
__UpperCamelCase = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
__UpperCamelCase = image.img_to_array(_lowercase )
__UpperCamelCase = np.expand_dims(_lowercase , axis=0 )
__UpperCamelCase = original_model.predict(_lowercase )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(_lowercase , _lowercase , atol=1e-3 ), "The predicted logits are not the same."
print('Model outputs match!' )
if save_model:
# Create folder to save model
if not os.path.isdir(_lowercase ):
os.mkdir(_lowercase )
# Save converted model and image processor
hf_model.save_pretrained(_lowercase )
preprocessor.save_pretrained(_lowercase )
if push_to_hub:
# Push model and image processor to hub
print(f'''Pushing converted {model_name} to the hub...''' )
__UpperCamelCase = f'''efficientnet-{model_name}'''
preprocessor.push_to_hub(_lowercase )
hf_model.push_to_hub(_lowercase )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''b0''',
type=str,
help='''Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''hf_model''',
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--save_model''', action='''store_true''', help='''Save model to local''')
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''')
__snake_case = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 310
|
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
"""The RoBERTa Model transformer with early exiting (DeeRoBERTa). """ , _a , )
class __lowerCamelCase (_a ):
_lowercase = RobertaConfig
_lowercase = """roberta"""
def __init__( self: Union[str, Any],A_: List[str] ):
'''simple docstring'''
super().__init__(A_ )
__UpperCamelCase = RobertaEmbeddings(A_ )
self.init_weights()
@add_start_docstrings(
"""RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,
also takes care of multi-layer training. """ , _a , )
class __lowerCamelCase (_a ):
_lowercase = RobertaConfig
_lowercase = """roberta"""
def __init__( self: Any,A_: int ):
'''simple docstring'''
super().__init__(A_ )
__UpperCamelCase = config.num_labels
__UpperCamelCase = config.num_hidden_layers
__UpperCamelCase = DeeRobertaModel(A_ )
__UpperCamelCase = nn.Dropout(config.hidden_dropout_prob )
__UpperCamelCase = nn.Linear(config.hidden_size,self.config.num_labels )
@add_start_docstrings_to_model_forward(A_ )
def snake_case_ ( self: List[str],A_: int=None,A_: List[Any]=None,A_: List[str]=None,A_: List[str]=None,A_: Optional[int]=None,A_: List[str]=None,A_: Any=None,A_: List[Any]=-1,A_: List[Any]=False,):
'''simple docstring'''
__UpperCamelCase = self.num_layers
try:
__UpperCamelCase = self.roberta(
A_,attention_mask=A_,token_type_ids=A_,position_ids=A_,head_mask=A_,inputs_embeds=A_,)
__UpperCamelCase = outputs[1]
__UpperCamelCase = self.dropout(A_ )
__UpperCamelCase = self.classifier(A_ )
__UpperCamelCase = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
__UpperCamelCase = e.message
__UpperCamelCase = e.exit_layer
__UpperCamelCase = outputs[0]
if not self.training:
__UpperCamelCase = entropy(A_ )
__UpperCamelCase = []
__UpperCamelCase = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
__UpperCamelCase = MSELoss()
__UpperCamelCase = loss_fct(logits.view(-1 ),labels.view(-1 ) )
else:
__UpperCamelCase = CrossEntropyLoss()
__UpperCamelCase = loss_fct(logits.view(-1,self.num_labels ),labels.view(-1 ) )
# work with highway exits
__UpperCamelCase = []
for highway_exit in outputs[-1]:
__UpperCamelCase = highway_exit[0]
if not self.training:
highway_logits_all.append(A_ )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
__UpperCamelCase = MSELoss()
__UpperCamelCase = loss_fct(highway_logits.view(-1 ),labels.view(-1 ) )
else:
__UpperCamelCase = CrossEntropyLoss()
__UpperCamelCase = loss_fct(highway_logits.view(-1,self.num_labels ),labels.view(-1 ) )
highway_losses.append(A_ )
if train_highway:
__UpperCamelCase = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
__UpperCamelCase = (loss,) + outputs
if not self.training:
__UpperCamelCase = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
__UpperCamelCase = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy
| 310
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json"""
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech2text2
}
class lowerCAmelCase__ ( lowerCAmelCase_ ):
lowerCAmelCase : Dict = """speech_to_text_2"""
lowerCAmelCase : Any = ["""past_key_values"""]
lowerCAmelCase : Optional[Any] = {"""num_attention_heads""": """decoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self : List[str] , lowerCamelCase__ : List[str]=1_00_00 , lowerCamelCase__ : str=6 , lowerCamelCase__ : Optional[int]=20_48 , lowerCamelCase__ : List[str]=4 , lowerCamelCase__ : Union[str, Any]=0.0 , lowerCamelCase__ : Dict=True , lowerCamelCase__ : str="relu" , lowerCamelCase__ : Union[str, Any]=2_56 , lowerCamelCase__ : Optional[Any]=0.1 , lowerCamelCase__ : Optional[int]=0.0 , lowerCamelCase__ : Optional[int]=0.0 , lowerCamelCase__ : Tuple=0.0_2 , lowerCamelCase__ : List[Any]=2 , lowerCamelCase__ : List[str]=True , lowerCamelCase__ : Optional[int]=1 , lowerCamelCase__ : Union[str, Any]=0 , lowerCamelCase__ : str=2 , lowerCamelCase__ : Union[str, Any]=10_24 , **lowerCamelCase__ : Union[str, Any] , ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = vocab_size
_UpperCAmelCase : Dict = d_model
_UpperCAmelCase : Tuple = decoder_ffn_dim
_UpperCAmelCase : Optional[int] = decoder_layers
_UpperCAmelCase : List[str] = decoder_attention_heads
_UpperCAmelCase : Optional[Any] = dropout
_UpperCAmelCase : Optional[Any] = attention_dropout
_UpperCAmelCase : str = activation_dropout
_UpperCAmelCase : str = activation_function
_UpperCAmelCase : Tuple = init_std
_UpperCAmelCase : Optional[int] = decoder_layerdrop
_UpperCAmelCase : int = use_cache
_UpperCAmelCase : List[str] = decoder_layers
_UpperCAmelCase : str = scale_embedding # scale factor will be sqrt(d_model) if True
_UpperCAmelCase : Tuple = max_target_positions
super().__init__(
pad_token_id=__SCREAMING_SNAKE_CASE , bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , decoder_start_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
| 352
|
'''simple docstring'''
import pytest
lowerCamelCase__ = '__dummy_dataset1__'
lowerCamelCase__ = '\nimport json\nimport os\n\nimport datasets\n\n\nREPO_URL = "https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/"\nURLS = {"train": REPO_URL + "wikiann-bn-train.jsonl", "validation": REPO_URL + "wikiann-bn-validation.jsonl"}\n\n\nclass __DummyDataset1__(datasets.GeneratorBasedBuilder):\n\n def _info(self):\n features = datasets.Features(\n {\n "tokens": datasets.Sequence(datasets.Value("string")),\n "ner_tags": datasets.Sequence(\n datasets.features.ClassLabel(\n names=[\n "O",\n "B-PER",\n "I-PER",\n "B-ORG",\n "I-ORG",\n "B-LOC",\n "I-LOC",\n ]\n )\n ),\n "langs": datasets.Sequence(datasets.Value("string")),\n "spans": datasets.Sequence(datasets.Value("string")),\n }\n )\n return datasets.DatasetInfo(features=features)\n\n def _split_generators(self, dl_manager):\n dl_path = dl_manager.download(URLS)\n return [\n datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={"filepath": dl_path["train"]}),\n datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={"filepath": dl_path["validation"]}),\n ]\n\n def _generate_examples(self, filepath):\n with open(filepath, "r", encoding="utf-8") as f:\n for i, line in enumerate(f):\n yield i, json.loads(line)\n'
@pytest.fixture
def __lowerCAmelCase ():
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def __lowerCAmelCase ():
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : Optional[Any] = dataset_loading_script_name
_UpperCAmelCase : Any = tmp_path / "datasets" / script_name
script_dir.mkdir(parents=__lowerCAmelCase )
_UpperCAmelCase : Optional[Any] = script_dir / F"""{script_name}.py"""
with open(__lowerCAmelCase , "w" ) as f:
f.write(__lowerCAmelCase )
return str(__lowerCAmelCase )
| 322
| 0
|
"""simple docstring"""
A : int = "\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
A : List[Any] = [{"type": "code", "content": INSTALL_CONTENT}]
A : List[str] = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 57
|
from __future__ import annotations
def _UpperCAmelCase (UpperCamelCase__ : list[int] , UpperCamelCase__ : list[int] , UpperCamelCase__ : int ):
_A : Dict = list(range(len(UpperCamelCase__ ) ) )
_A : Any = [v / w for v, w in zip(UpperCamelCase__ , UpperCamelCase__ )]
index.sort(key=lambda UpperCamelCase__ : ratio[i] , reverse=UpperCamelCase__ )
_A : float = 0
_A : list[float] = [0] * len(UpperCamelCase__ )
for i in index:
if weight[i] <= capacity:
_A : Union[str, Any] = 1
max_value += value[i]
capacity -= weight[i]
else:
_A : Optional[Any] = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 11
| 0
|
import json
import os
import shutil
import tempfile
from unittest import TestCase
from transformers import BartTokenizer, BartTokenizerFast, DPRQuestionEncoderTokenizer, DPRQuestionEncoderTokenizerFast
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_tokenizers, require_torch, slow
from transformers.utils import is_datasets_available, is_faiss_available, is_torch_available
if is_torch_available() and is_datasets_available() and is_faiss_available():
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.tokenization_rag import RagTokenizer
@require_faiss
@require_torch
class __SCREAMING_SNAKE_CASE ( A__ ):
def __lowerCamelCase ( self ):
lowercase : List[str] = tempfile.mkdtemp()
lowercase : Any = 8
# DPR tok
lowercase : List[str] = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
lowercase : int = os.path.join(self.tmpdirname , '''dpr_tokenizer''' )
os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
lowercase : str = os.path.join(SCREAMING_SNAKE_CASE__ , DPR_VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
# BART tok
lowercase : int = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
lowercase : Optional[int] = dict(zip(SCREAMING_SNAKE_CASE__ , range(len(SCREAMING_SNAKE_CASE__ ) ) ) )
lowercase : str = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
lowercase : Union[str, Any] = {'''unk_token''': '''<unk>'''}
lowercase : int = os.path.join(self.tmpdirname , '''bart_tokenizer''' )
os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
lowercase : Optional[int] = os.path.join(SCREAMING_SNAKE_CASE__ , BART_VOCAB_FILES_NAMES['''vocab_file'''] )
lowercase : int = os.path.join(SCREAMING_SNAKE_CASE__ , BART_VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE__ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(SCREAMING_SNAKE_CASE__ ) )
def __lowerCamelCase ( self ):
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) )
def __lowerCamelCase ( self ):
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''bart_tokenizer''' ) )
def __lowerCamelCase ( self ):
shutil.rmtree(self.tmpdirname )
@require_tokenizers
def __lowerCamelCase ( self ):
lowercase : Optional[Any] = os.path.join(self.tmpdirname , '''rag_tokenizer''' )
lowercase : Dict = RagConfig(question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() )
lowercase : Dict = RagTokenizer(question_encoder=self.get_dpr_tokenizer() , generator=self.get_bart_tokenizer() )
rag_config.save_pretrained(SCREAMING_SNAKE_CASE__ )
rag_tokenizer.save_pretrained(SCREAMING_SNAKE_CASE__ )
lowercase : Dict = RagTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ , config=SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(new_rag_tokenizer.question_encoder , SCREAMING_SNAKE_CASE__ )
self.assertEqual(new_rag_tokenizer.question_encoder.get_vocab() , rag_tokenizer.question_encoder.get_vocab() )
self.assertIsInstance(new_rag_tokenizer.generator , SCREAMING_SNAKE_CASE__ )
self.assertEqual(new_rag_tokenizer.generator.get_vocab() , rag_tokenizer.generator.get_vocab() )
@slow
def __lowerCamelCase ( self ):
lowercase : Dict = RagTokenizer.from_pretrained('''facebook/rag-token-nq''' )
lowercase : Optional[int] = [
'''who got the first nobel prize in physics''',
'''when is the next deadpool movie being released''',
'''which mode is used for short wave broadcast service''',
'''who is the owner of reading football club''',
'''when is the next scandal episode coming out''',
'''when is the last time the philadelphia won the superbowl''',
'''what is the most current adobe flash player version''',
'''how many episodes are there in dragon ball z''',
'''what is the first step in the evolution of the eye''',
'''where is gall bladder situated in human body''',
'''what is the main mineral in lithium batteries''',
'''who is the president of usa right now''',
'''where do the greasers live in the outsiders''',
'''panda is a national animal of which country''',
'''what is the name of manchester united stadium''',
]
lowercase : str = tokenizer(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
@slow
def __lowerCamelCase ( self ):
lowercase : int = RagTokenizer.from_pretrained('''facebook/rag-sequence-nq''' )
lowercase : Tuple = [
'''who got the first nobel prize in physics''',
'''when is the next deadpool movie being released''',
'''which mode is used for short wave broadcast service''',
'''who is the owner of reading football club''',
'''when is the next scandal episode coming out''',
'''when is the last time the philadelphia won the superbowl''',
'''what is the most current adobe flash player version''',
'''how many episodes are there in dragon ball z''',
'''what is the first step in the evolution of the eye''',
'''where is gall bladder situated in human body''',
'''what is the main mineral in lithium batteries''',
'''who is the president of usa right now''',
'''where do the greasers live in the outsiders''',
'''panda is a national animal of which country''',
'''what is the name of manchester united stadium''',
]
lowercase : Any = tokenizer(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
| 173
|
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
__a = 50_00_00
__a , __a = os.path.split(__file__)
__a = os.path.join(RESULTS_BASEPATH, '''results''', RESULTS_FILENAME.replace('''.py''', '''.json'''))
@get_duration
def __lowercase ( _UpperCamelCase, **_UpperCamelCase ) ->Any:
"""simple docstring"""
lowercase : Optional[Any] = dataset.map(**_UpperCamelCase )
@get_duration
def __lowercase ( _UpperCamelCase, **_UpperCamelCase ) ->Union[str, Any]:
"""simple docstring"""
lowercase : int = dataset.filter(**_UpperCamelCase )
def __lowercase ( ) ->Union[str, Any]:
"""simple docstring"""
lowercase : Dict = {'''num examples''': SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase : Dict = datasets.Features({'''text''': datasets.Value('''string''' ), '''numbers''': datasets.Value('''float32''' )} )
lowercase : List[str] = generate_example_dataset(
os.path.join(_UpperCamelCase, '''dataset.arrow''' ), _UpperCamelCase, num_examples=_UpperCamelCase )
lowercase : List[Any] = transformers.AutoTokenizer.from_pretrained('''bert-base-cased''', use_fast=_UpperCamelCase )
def tokenize(_UpperCamelCase ):
return tokenizer(examples['''text'''] )
lowercase : Union[str, Any] = map(_UpperCamelCase )
lowercase : Dict = map(_UpperCamelCase, batched=_UpperCamelCase )
lowercase : Tuple = map(_UpperCamelCase, function=lambda _UpperCamelCase : None, batched=_UpperCamelCase )
with dataset.formatted_as(type='''numpy''' ):
lowercase : Dict = map(_UpperCamelCase, function=lambda _UpperCamelCase : None, batched=_UpperCamelCase )
with dataset.formatted_as(type='''pandas''' ):
lowercase : Any = map(_UpperCamelCase, function=lambda _UpperCamelCase : None, batched=_UpperCamelCase )
with dataset.formatted_as(type='''torch''', columns='''numbers''' ):
lowercase : str = map(_UpperCamelCase, function=lambda _UpperCamelCase : None, batched=_UpperCamelCase )
with dataset.formatted_as(type='''tensorflow''', columns='''numbers''' ):
lowercase : Tuple = map(_UpperCamelCase, function=lambda _UpperCamelCase : None, batched=_UpperCamelCase )
lowercase : List[str] = map(_UpperCamelCase, function=_UpperCamelCase, batched=_UpperCamelCase )
lowercase : Any = filter(_UpperCamelCase )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(_UpperCamelCase, '''wb''' ) as f:
f.write(json.dumps(_UpperCamelCase ).encode('''utf-8''' ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 173
| 1
|
'''simple docstring'''
import json
import os
import shutil
import warnings
from argparse import ArgumentParser, Namespace
from pathlib import Path
from typing import List
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from cookiecutter.main import cookiecutter
lowercase_ = True
except ImportError:
lowercase_ = False
lowercase_ = logging.get_logger(__name__) # pylint: disable=invalid-name
def lowerCamelCase ( __lowerCamelCase : Namespace ) ->Optional[Any]:
return AddNewModelCommand(args.testing , args.testing_file , path=args.path )
class a_ ( snake_case_ ):
'''simple docstring'''
@staticmethod
def snake_case_( A ) -> Tuple:
_SCREAMING_SNAKE_CASE = parser.add_parser("""add-new-model""" )
add_new_model_parser.add_argument("""--testing""" , action="""store_true""" , help="""If in testing mode.""" )
add_new_model_parser.add_argument("""--testing_file""" , type=A , help="""Configuration file on which to run.""" )
add_new_model_parser.add_argument(
"""--path""" , type=A , help="""Path to cookiecutter. Should only be used for testing purposes.""" )
add_new_model_parser.set_defaults(func=A )
def __init__( self , A , A , A=None , *A ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE = testing
_SCREAMING_SNAKE_CASE = testing_file
_SCREAMING_SNAKE_CASE = path
def snake_case_( self ) -> List[str]:
warnings.warn(
"""The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. """
"""It is not actively maintained anymore, so might give a result that won't pass all tests and quality """
"""checks, you should use `transformers-cli add-new-model-like` instead.""" )
if not _has_cookiecutter:
raise ImportError(
"""Model creation dependencies are required to use the `add_new_model` command. Install them by running """
"""the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n""" )
# Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory
_SCREAMING_SNAKE_CASE = [directory for directory in os.listdir() if """cookiecutter-template-""" == directory[:22]]
if len(A ) > 0:
raise ValueError(
"""Several directories starting with `cookiecutter-template-` in current working directory. """
"""Please clean your directory by removing all folders starting with `cookiecutter-template-` or """
"""change your working directory.""" )
_SCREAMING_SNAKE_CASE = (
Path(A ).parent.parent.parent.parent if self._path is None else Path(self._path ).parent.parent
)
_SCREAMING_SNAKE_CASE = path_to_transformer_root / """templates""" / """adding_a_new_model"""
# Execute cookiecutter
if not self._testing:
cookiecutter(str(A ) )
else:
with open(self._testing_file , """r""" ) as configuration_file:
_SCREAMING_SNAKE_CASE = json.load(A )
cookiecutter(
str(path_to_cookiecutter if self._path is None else self._path ) , no_input=A , extra_context=A , )
_SCREAMING_SNAKE_CASE = [directory for directory in os.listdir() if """cookiecutter-template-""" in directory[:22]][0]
# Retrieve configuration
with open(directory + """/configuration.json""" , """r""" ) as configuration_file:
_SCREAMING_SNAKE_CASE = json.load(A )
_SCREAMING_SNAKE_CASE = configuration["""lowercase_modelname"""]
_SCREAMING_SNAKE_CASE = configuration["""generate_tensorflow_pytorch_and_flax"""]
os.remove(f'{directory}/configuration.json' )
_SCREAMING_SNAKE_CASE = """PyTorch""" in generate_tensorflow_pytorch_and_flax
_SCREAMING_SNAKE_CASE = """TensorFlow""" in generate_tensorflow_pytorch_and_flax
_SCREAMING_SNAKE_CASE = """Flax""" in generate_tensorflow_pytorch_and_flax
_SCREAMING_SNAKE_CASE = f'{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}'
os.makedirs(A , exist_ok=A )
os.makedirs(f'{path_to_transformer_root}/tests/models/{lowercase_model_name}' , exist_ok=A )
# Tests require submodules as they have parent imports
with open(f'{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py' , """w""" ):
pass
shutil.move(
f'{directory}/__init__.py' , f'{model_dir}/__init__.py' , )
shutil.move(
f'{directory}/configuration_{lowercase_model_name}.py' , f'{model_dir}/configuration_{lowercase_model_name}.py' , )
def remove_copy_lines(A ):
with open(A , """r""" ) as f:
_SCREAMING_SNAKE_CASE = f.readlines()
with open(A , """w""" ) as f:
for line in lines:
if "# Copied from transformers." not in line:
f.write(A )
if output_pytorch:
if not self._testing:
remove_copy_lines(f'{directory}/modeling_{lowercase_model_name}.py' )
shutil.move(
f'{directory}/modeling_{lowercase_model_name}.py' , f'{model_dir}/modeling_{lowercase_model_name}.py' , )
shutil.move(
f'{directory}/test_modeling_{lowercase_model_name}.py' , f'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py' , )
else:
os.remove(f'{directory}/modeling_{lowercase_model_name}.py' )
os.remove(f'{directory}/test_modeling_{lowercase_model_name}.py' )
if output_tensorflow:
if not self._testing:
remove_copy_lines(f'{directory}/modeling_tf_{lowercase_model_name}.py' )
shutil.move(
f'{directory}/modeling_tf_{lowercase_model_name}.py' , f'{model_dir}/modeling_tf_{lowercase_model_name}.py' , )
shutil.move(
f'{directory}/test_modeling_tf_{lowercase_model_name}.py' , f'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py' , )
else:
os.remove(f'{directory}/modeling_tf_{lowercase_model_name}.py' )
os.remove(f'{directory}/test_modeling_tf_{lowercase_model_name}.py' )
if output_flax:
if not self._testing:
remove_copy_lines(f'{directory}/modeling_flax_{lowercase_model_name}.py' )
shutil.move(
f'{directory}/modeling_flax_{lowercase_model_name}.py' , f'{model_dir}/modeling_flax_{lowercase_model_name}.py' , )
shutil.move(
f'{directory}/test_modeling_flax_{lowercase_model_name}.py' , f'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py' , )
else:
os.remove(f'{directory}/modeling_flax_{lowercase_model_name}.py' )
os.remove(f'{directory}/test_modeling_flax_{lowercase_model_name}.py' )
shutil.move(
f'{directory}/{lowercase_model_name}.md' , f'{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md' , )
shutil.move(
f'{directory}/tokenization_{lowercase_model_name}.py' , f'{model_dir}/tokenization_{lowercase_model_name}.py' , )
shutil.move(
f'{directory}/tokenization_fast_{lowercase_model_name}.py' , f'{model_dir}/tokenization_{lowercase_model_name}_fast.py' , )
from os import fdopen, remove
from shutil import copymode, move
from tempfile import mkstemp
def replace(A , A , A ):
# Create temp file
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = mkstemp()
_SCREAMING_SNAKE_CASE = False
with fdopen(A , """w""" ) as new_file:
with open(A ) as old_file:
for line in old_file:
new_file.write(A )
if line_to_copy_below in line:
_SCREAMING_SNAKE_CASE = True
for line_to_copy in lines_to_copy:
new_file.write(A )
if not line_found:
raise ValueError(f'Line {line_to_copy_below} was not found in file.' )
# Copy the file permissions from the old file to the new file
copymode(A , A )
# Remove original file
remove(A )
# Move new file
move(A , A )
def skip_units(A ):
return (
("generating PyTorch" in line and not output_pytorch)
or ("generating TensorFlow" in line and not output_tensorflow)
or ("generating Flax" in line and not output_flax)
)
def replace_in_files(A ):
with open(A ) as datafile:
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
for line in datafile:
if "# To replace in: " in line and "##" not in line:
_SCREAMING_SNAKE_CASE = line.split("""\"""" )[1]
_SCREAMING_SNAKE_CASE = skip_units(A )
elif "# Below: " in line and "##" not in line:
_SCREAMING_SNAKE_CASE = line.split("""\"""" )[1]
_SCREAMING_SNAKE_CASE = skip_units(A )
elif "# End." in line and "##" not in line:
if not skip_file and not skip_snippet:
replace(A , A , A )
_SCREAMING_SNAKE_CASE = []
elif "# Replace with" in line and "##" not in line:
_SCREAMING_SNAKE_CASE = []
elif "##" not in line:
lines_to_copy.append(A )
remove(A )
replace_in_files(f'{directory}/to_replace_{lowercase_model_name}.py' )
os.rmdir(A )
| 58
|
import argparse
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
A__ = logging.get_logger(__name__)
def _UpperCAmelCase ( snake_case ):
"""simple docstring"""
_lowerCAmelCase = OrderedDict()
for key, value in state_dict.items():
if key.startswith("""module.encoder""" ):
_lowerCAmelCase = key.replace("""module.encoder""" , """glpn.encoder""" )
if key.startswith("""module.decoder""" ):
_lowerCAmelCase = key.replace("""module.decoder""" , """decoder.stages""" )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
_lowerCAmelCase = key[key.find("""patch_embed""" ) + len("""patch_embed""" )]
_lowerCAmelCase = key.replace(F'patch_embed{idx}' , F'patch_embeddings.{int(snake_case )-1}' )
if "norm" in key:
_lowerCAmelCase = key.replace("""norm""" , """layer_norm""" )
if "glpn.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
_lowerCAmelCase = key[key.find("""glpn.encoder.layer_norm""" ) + len("""glpn.encoder.layer_norm""" )]
_lowerCAmelCase = key.replace(F'layer_norm{idx}' , F'layer_norm.{int(snake_case )-1}' )
if "layer_norm1" in key:
_lowerCAmelCase = key.replace("""layer_norm1""" , """layer_norm_1""" )
if "layer_norm2" in key:
_lowerCAmelCase = key.replace("""layer_norm2""" , """layer_norm_2""" )
if "block" in key:
# replace for example block1 by block.0
_lowerCAmelCase = key[key.find("""block""" ) + len("""block""" )]
_lowerCAmelCase = key.replace(F'block{idx}' , F'block.{int(snake_case )-1}' )
if "attn.q" in key:
_lowerCAmelCase = key.replace("""attn.q""" , """attention.self.query""" )
if "attn.proj" in key:
_lowerCAmelCase = key.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in key:
_lowerCAmelCase = key.replace("""attn""" , """attention.self""" )
if "fc1" in key:
_lowerCAmelCase = key.replace("""fc1""" , """dense1""" )
if "fc2" in key:
_lowerCAmelCase = key.replace("""fc2""" , """dense2""" )
if "linear_pred" in key:
_lowerCAmelCase = key.replace("""linear_pred""" , """classifier""" )
if "linear_fuse" in key:
_lowerCAmelCase = key.replace("""linear_fuse.conv""" , """linear_fuse""" )
_lowerCAmelCase = key.replace("""linear_fuse.bn""" , """batch_norm""" )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
_lowerCAmelCase = key[key.find("""linear_c""" ) + len("""linear_c""" )]
_lowerCAmelCase = key.replace(F'linear_c{idx}' , F'linear_c.{int(snake_case )-1}' )
if "bot_conv" in key:
_lowerCAmelCase = key.replace("""bot_conv""" , """0.convolution""" )
if "skip_conv1" in key:
_lowerCAmelCase = key.replace("""skip_conv1""" , """1.convolution""" )
if "skip_conv2" in key:
_lowerCAmelCase = key.replace("""skip_conv2""" , """2.convolution""" )
if "fusion1" in key:
_lowerCAmelCase = key.replace("""fusion1""" , """1.fusion""" )
if "fusion2" in key:
_lowerCAmelCase = key.replace("""fusion2""" , """2.fusion""" )
if "fusion3" in key:
_lowerCAmelCase = key.replace("""fusion3""" , """3.fusion""" )
if "fusion" in key and "conv" in key:
_lowerCAmelCase = key.replace("""conv""" , """convolutional_layer""" )
if key.startswith("""module.last_layer_depth""" ):
_lowerCAmelCase = key.replace("""module.last_layer_depth""" , """head.head""" )
_lowerCAmelCase = value
return new_state_dict
def _UpperCAmelCase ( snake_case , snake_case ):
"""simple docstring"""
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
_lowerCAmelCase = state_dict.pop(F'glpn.encoder.block.{i}.{j}.attention.self.kv.weight' )
_lowerCAmelCase = state_dict.pop(F'glpn.encoder.block.{i}.{j}.attention.self.kv.bias' )
# next, add keys and values (in that order) to the state dict
_lowerCAmelCase = kv_weight[
: config.hidden_sizes[i], :
]
_lowerCAmelCase = kv_bias[: config.hidden_sizes[i]]
_lowerCAmelCase = kv_weight[
config.hidden_sizes[i] :, :
]
_lowerCAmelCase = kv_bias[config.hidden_sizes[i] :]
def _UpperCAmelCase ( ):
"""simple docstring"""
_lowerCAmelCase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
_lowerCAmelCase = Image.open(requests.get(snake_case , stream=snake_case ).raw )
return image
@torch.no_grad()
def _UpperCAmelCase ( snake_case , snake_case , snake_case=False , snake_case=None ):
"""simple docstring"""
_lowerCAmelCase = GLPNConfig(hidden_sizes=[64, 1_28, 3_20, 5_12] , decoder_hidden_size=64 , depths=[3, 8, 27, 3] )
# load image processor (only resize + rescale)
_lowerCAmelCase = GLPNImageProcessor()
# prepare image
_lowerCAmelCase = prepare_img()
_lowerCAmelCase = image_processor(images=snake_case , return_tensors="""pt""" ).pixel_values
logger.info("""Converting model...""" )
# load original state dict
_lowerCAmelCase = torch.load(snake_case , map_location=torch.device("""cpu""" ) )
# rename keys
_lowerCAmelCase = rename_keys(snake_case )
# key and value matrices need special treatment
read_in_k_v(snake_case , snake_case )
# create HuggingFace model and load state dict
_lowerCAmelCase = GLPNForDepthEstimation(snake_case )
model.load_state_dict(snake_case )
model.eval()
# forward pass
_lowerCAmelCase = model(snake_case )
_lowerCAmelCase = outputs.predicted_depth
# verify output
if model_name is not None:
if "nyu" in model_name:
_lowerCAmelCase = torch.tensor(
[[4.4_147, 4.0_873, 4.0_673], [3.7_890, 3.2_881, 3.1_525], [3.7_674, 3.5_423, 3.4_913]] )
elif "kitti" in model_name:
_lowerCAmelCase = torch.tensor(
[[3.4_291, 2.7_865, 2.5_151], [3.2_841, 2.7_021, 2.3_502], [3.1_147, 2.4_625, 2.2_481]] )
else:
raise ValueError(F'Unknown model name: {model_name}' )
_lowerCAmelCase = torch.Size([1, 4_80, 6_40] )
assert predicted_depth.shape == expected_shape
assert torch.allclose(predicted_depth[0, :3, :3] , snake_case , atol=1E-4 )
print("""Looks ok!""" )
# finally, push to hub if required
if push_to_hub:
logger.info("""Pushing model and image processor to the hub...""" )
model.push_to_hub(
repo_path_or_name=Path(snake_case , snake_case ) , organization="""nielsr""" , commit_message="""Add model""" , use_temp_dir=snake_case , )
image_processor.push_to_hub(
repo_path_or_name=Path(snake_case , snake_case ) , organization="""nielsr""" , commit_message="""Add image processor""" , use_temp_dir=snake_case , )
if __name__ == "__main__":
A__ = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_path""",
default=None,
type=str,
help="""Path to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether to upload the model to the HuggingFace hub."""
)
parser.add_argument(
"""--model_name""",
default="""glpn-kitti""",
type=str,
help="""Name of the model in case you're pushing to the hub.""",
)
A__ = parser.parse_args()
convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 82
| 0
|
'''simple docstring'''
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__snake_case : int = logging.get_logger(__name__)
__snake_case : Optional[int] = {
'google/efficientnet-b7': 'https://huggingface.co/google/efficientnet-b7/resolve/main/config.json',
}
class __UpperCAmelCase ( __snake_case ):
'''simple docstring'''
__lowercase : str = "efficientnet"
def __init__( self , _SCREAMING_SNAKE_CASE = 3 , _SCREAMING_SNAKE_CASE = 600 , _SCREAMING_SNAKE_CASE = 2.0 , _SCREAMING_SNAKE_CASE = 3.1 , _SCREAMING_SNAKE_CASE = 8 , _SCREAMING_SNAKE_CASE = [3, 3, 5, 3, 5, 5, 3] , _SCREAMING_SNAKE_CASE = [32, 16, 24, 40, 80, 112, 192] , _SCREAMING_SNAKE_CASE = [16, 24, 40, 80, 112, 192, 320] , _SCREAMING_SNAKE_CASE = [] , _SCREAMING_SNAKE_CASE = [1, 2, 2, 2, 1, 2, 1] , _SCREAMING_SNAKE_CASE = [1, 2, 2, 3, 3, 4, 1] , _SCREAMING_SNAKE_CASE = [1, 6, 6, 6, 6, 6, 6] , _SCREAMING_SNAKE_CASE = 0.25 , _SCREAMING_SNAKE_CASE = "swish" , _SCREAMING_SNAKE_CASE = 2560 , _SCREAMING_SNAKE_CASE = "mean" , _SCREAMING_SNAKE_CASE = 0.02 , _SCREAMING_SNAKE_CASE = 0.001 , _SCREAMING_SNAKE_CASE = 0.99 , _SCREAMING_SNAKE_CASE = 0.5 , _SCREAMING_SNAKE_CASE = 0.2 , **_SCREAMING_SNAKE_CASE , ) -> Optional[int]:
super().__init__(**lowerCamelCase_ )
A_ = num_channels
A_ = image_size
A_ = width_coefficient
A_ = depth_coefficient
A_ = depth_divisor
A_ = kernel_sizes
A_ = in_channels
A_ = out_channels
A_ = depthwise_padding
A_ = strides
A_ = num_block_repeats
A_ = expand_ratios
A_ = squeeze_expansion_ratio
A_ = hidden_act
A_ = hidden_dim
A_ = pooling_type
A_ = initializer_range
A_ = batch_norm_eps
A_ = batch_norm_momentum
A_ = dropout_rate
A_ = drop_connect_rate
A_ = sum(lowerCamelCase_ ) * 4
class __UpperCAmelCase ( __snake_case ):
'''simple docstring'''
__lowercase : Tuple = version.parse('1.11' )
@property
def __A ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def __A ( self ) -> float:
return 1E-5
| 364
|
'''simple docstring'''
from collections import defaultdict
def _UpperCAmelCase ( _UpperCamelCase : int ) -> int:
A_ = 1
A_ = True
for v in tree[start]:
if v not in visited:
ret += dfs(_UpperCamelCase )
if ret % 2 == 0:
cuts.append(_UpperCamelCase )
return ret
def _UpperCAmelCase ( ) -> Optional[Any]:
dfs(1 )
if __name__ == "__main__":
__snake_case , __snake_case : Union[str, Any] = 10, 9
__snake_case : int = defaultdict(list)
__snake_case : dict[int, bool] = {}
__snake_case : list[int] = []
__snake_case : Union[str, Any] = 0
__snake_case : int = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)]
for u, v in edges:
tree[u].append(v)
tree[v].append(u)
even_tree()
print(len(cuts) - 1)
| 18
| 0
|
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
__lowerCamelCase : Any = logging.get_logger(__name__)
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> List[Any]:
def run_func(_lowerCAmelCase ):
@wraps(_lowerCAmelCase )
def run_in_eager_mode(*_lowerCAmelCase , **_lowerCAmelCase ):
return func(*_lowerCAmelCase , **_lowerCAmelCase )
@wraps(_lowerCAmelCase )
@tf.function(experimental_compile=_lowerCAmelCase )
def run_in_graph_mode(*_lowerCAmelCase , **_lowerCAmelCase ):
return func(*_lowerCAmelCase , **_lowerCAmelCase )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
"Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`." )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> ["tf.Tensor"]:
UpperCamelCase : Optional[Any] = random.Random()
UpperCamelCase : str = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(_lowerCAmelCase , shape=(batch_size, sequence_length) , dtype=tf.intaa )
class A__ ( __snake_case ):
_UpperCAmelCase :TensorFlowBenchmarkArguments
_UpperCAmelCase :PretrainedConfig
_UpperCAmelCase :str = "TensorFlow"
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return tf.__version__
def __UpperCamelCase( self , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : Optional[int] = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
UpperCamelCase : Optional[Any] = self._prepare_inference_func(A_ , A_ , A_ )
return self._measure_speed(_inference )
def __UpperCamelCase( self , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : Any = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
UpperCamelCase : Any = self._prepare_train_func(A_ , A_ , A_ )
return self._measure_speed(_train )
def __UpperCamelCase( self , A_ , A_ , A_ ):
'''simple docstring'''
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , A_ )
UpperCamelCase : Union[str, Any] = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
UpperCamelCase : Union[str, Any] = self._prepare_inference_func(A_ , A_ , A_ )
return self._measure_memory(_inference )
def __UpperCamelCase( self , A_ , A_ , A_ ):
'''simple docstring'''
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , A_ )
UpperCamelCase : Union[str, Any] = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
UpperCamelCase : Union[str, Any] = self._prepare_train_func(A_ , A_ , A_ )
return self._measure_memory(_train )
def __UpperCamelCase( self , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : Tuple = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError("Mixed precision is currently not supported." )
UpperCamelCase : Union[str, Any] = (
hasattr(A_ , "architectures" )
and isinstance(config.architectures , A_ )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
UpperCamelCase : Optional[Any] = "TF" + config.architectures[0] # prepend 'TF' for tensorflow model
UpperCamelCase : str = __import__("transformers" , fromlist=[model_class] )
UpperCamelCase : Dict = getattr(A_ , A_ )
UpperCamelCase : List[str] = model_cls(A_ )
except ImportError:
raise ImportError(
F"""{model_class} does not exist. If you just want to test the pretrained model, you might want to"""
" set `--only_pretrain_model` or `args.only_pretrain_model=True`." )
else:
UpperCamelCase : Tuple = TF_MODEL_MAPPING[config.__class__](A_ )
# encoder-decoder has vocab size saved differently
UpperCamelCase : Optional[int] = config.vocab_size if hasattr(A_ , "vocab_size" ) else config.encoder.vocab_size
UpperCamelCase : str = random_input_ids(A_ , A_ , A_ )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_forward():
return model(A_ , decoder_input_ids=A_ , training=A_ )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_forward():
return model(A_ , training=A_ )
UpperCamelCase : Dict = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def __UpperCamelCase( self , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError("Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`." )
if self.args.fpaa:
raise NotImplementedError("Mixed precision is currently not supported." )
UpperCamelCase : List[Any] = (
hasattr(A_ , "architectures" )
and isinstance(config.architectures , A_ )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
UpperCamelCase : Any = "TF" + config.architectures[0] # prepend 'TF' for tensorflow model
UpperCamelCase : Dict = __import__("transformers" , fromlist=[model_class] )
UpperCamelCase : Any = getattr(A_ , A_ )
UpperCamelCase : List[str] = model_cls(A_ )
except ImportError:
raise ImportError(
F"""{model_class} does not exist. If you just want to test the pretrained model, you might want to"""
" set `--only_pretrain_model` or `args.only_pretrain_model=True`." )
else:
UpperCamelCase : int = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](A_ )
# encoder-decoder has vocab size saved differently
UpperCamelCase : Optional[Any] = config.vocab_size if hasattr(A_ , "vocab_size" ) else config.encoder.vocab_size
UpperCamelCase : Any = random_input_ids(A_ , A_ , A_ )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_train():
UpperCamelCase : List[Any] = model(A_ , decoder_input_ids=A_ , labels=A_ , training=A_ )[0]
UpperCamelCase : Dict = tf.gradients(A_ , model.trainable_variables )
return gradients
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_train():
UpperCamelCase : Union[str, Any] = model(A_ , labels=A_ , training=A_ )[0]
UpperCamelCase : Optional[int] = tf.gradients(A_ , model.trainable_variables )
return gradients
UpperCamelCase : Union[str, Any] = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info("Do inference on TPU. Running model 5 times to stabilize compilation" )
timeit.repeat(A_ , repeat=1 , number=5 )
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
UpperCamelCase : str = timeit.repeat(
A_ , repeat=self.args.repeat , number=10 , )
return min(A_ ) / 10.0
except ResourceExhaustedError as e:
self.print_fn(F"""Doesn't fit on GPU. {e}""" )
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
logger.info(
"Note that TensorFlow allocates more memory than "
"it might need to speed up computation. "
"The memory reported here corresponds to the memory "
"reported by `nvidia-smi`, which can vary depending "
"on total available memory on the GPU that is used." )
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
"`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory"
" consumption line by line." )
UpperCamelCase : Dict = start_memory_tracing("transformers" )
if self.args.is_tpu:
# tpu
raise NotImplementedError(
"Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking"
" with `args.memory=False`" )
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
"py3nvml not installed, we won't log GPU memory usage. "
"Install py3nvml (pip install py3nvml) to log information about GPU." )
UpperCamelCase : Any = "N/A"
else:
logger.info(
"Measuring total GPU usage on GPU device. Make sure to not have additional processes"
" running on the same GPU." )
# init nvml
nvml.nvmlInit()
func()
UpperCamelCase : List[Any] = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx )
UpperCamelCase : int = nvml.nvmlDeviceGetMemoryInfo(A_ )
UpperCamelCase : Tuple = meminfo.used
UpperCamelCase : List[str] = Memory(A_ )
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
"When enabling line by line tracing, the max peak memory for CPU is inaccurate in"
" TensorFlow." )
UpperCamelCase : Optional[Any] = None
else:
UpperCamelCase : List[str] = measure_peak_memory_cpu(A_ )
UpperCamelCase : Dict = Memory(A_ ) if isinstance(A_ , A_ ) else memory_bytes
if self.args.trace_memory_line_by_line:
UpperCamelCase : int = stop_memory_tracing(A_ )
if memory is None:
UpperCamelCase : str = summary.total
else:
UpperCamelCase : List[Any] = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(F"""Doesn't fit on GPU. {e}""" )
return "N/A", None
| 52
|
class A__ :
def __init__( self , A_ ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = set_counts
UpperCamelCase : int = max(A_ )
UpperCamelCase : Optional[Any] = len(A_ )
UpperCamelCase : Union[str, Any] = [1] * num_sets
UpperCamelCase : Union[str, Any] = list(range(A_ ) )
def __UpperCamelCase( self , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : Any = self.get_parent(A_ )
UpperCamelCase : Optional[int] = self.get_parent(A_ )
if src_parent == dst_parent:
return False
if self.ranks[dst_parent] >= self.ranks[src_parent]:
self.set_counts[dst_parent] += self.set_counts[src_parent]
UpperCamelCase : int = 0
UpperCamelCase : Dict = dst_parent
if self.ranks[dst_parent] == self.ranks[src_parent]:
self.ranks[dst_parent] += 1
UpperCamelCase : Optional[int] = self.set_counts[dst_parent]
else:
self.set_counts[src_parent] += self.set_counts[dst_parent]
UpperCamelCase : Any = 0
UpperCamelCase : Optional[int] = src_parent
UpperCamelCase : int = self.set_counts[src_parent]
UpperCamelCase : Any = max(self.max_set , A_ )
return True
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
if self.parents[disj_set] == disj_set:
return disj_set
UpperCamelCase : Optional[int] = self.get_parent(self.parents[disj_set] )
return self.parents[disj_set]
| 52
| 1
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
a_ = None
a_ = logging.get_logger(__name__)
a_ = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
a_ = {
"""vocab_file""": {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model""",
},
"""tokenizer_file""": {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json""",
},
}
a_ = {
"""xlnet-base-cased""": None,
"""xlnet-large-cased""": None,
}
a_ = """▁"""
# Segments (not really needed)
a_ = 0
a_ = 1
a_ = 2
a_ = 3
a_ = 4
class __snake_case ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = VOCAB_FILES_NAMES
_lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase = """left"""
_lowerCamelCase = XLNetTokenizer
def __init__( self , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=False , __lowerCamelCase=True , __lowerCamelCase=False , __lowerCamelCase="<s>" , __lowerCamelCase="</s>" , __lowerCamelCase="<unk>" , __lowerCamelCase="<sep>" , __lowerCamelCase="<pad>" , __lowerCamelCase="<cls>" , __lowerCamelCase="<mask>" , __lowerCamelCase=["<eop>", "<eod>"] , **__lowerCamelCase , ):
'''simple docstring'''
__A : List[str] = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else mask_token
super().__init__(
vocab_file=__lowerCamelCase , tokenizer_file=__lowerCamelCase , do_lower_case=__lowerCamelCase , remove_space=__lowerCamelCase , keep_accents=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , pad_token=__lowerCamelCase , cls_token=__lowerCamelCase , mask_token=__lowerCamelCase , additional_special_tokens=__lowerCamelCase , **__lowerCamelCase , )
__A : Dict = 3
__A : Optional[Any] = do_lower_case
__A : Tuple = remove_space
__A : Tuple = keep_accents
__A : Optional[Any] = vocab_file
__A : Optional[Any] = False if not self.vocab_file else True
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase = None ):
'''simple docstring'''
__A : List[Any] = [self.sep_token_id]
__A : int = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase = None ):
'''simple docstring'''
__A : Optional[int] = [self.sep_token_id]
__A : Any = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(__lowerCamelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
__A : List[Any] = os.path.join(
__lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ):
copyfile(self.vocab_file , __lowerCamelCase )
return (out_vocab_file,)
| 356
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
a_ = {
"""configuration_tapas""": ["""TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TapasConfig"""],
"""tokenization_tapas""": ["""TapasTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"""TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TapasForMaskedLM""",
"""TapasForQuestionAnswering""",
"""TapasForSequenceClassification""",
"""TapasModel""",
"""TapasPreTrainedModel""",
"""load_tf_weights_in_tapas""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"""TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFTapasForMaskedLM""",
"""TFTapasForQuestionAnswering""",
"""TFTapasForSequenceClassification""",
"""TFTapasModel""",
"""TFTapasPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig
from .tokenization_tapas import TapasTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tapas import (
TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasPreTrainedModel,
load_tf_weights_in_tapas,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_tapas import (
TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTapasForMaskedLM,
TFTapasForQuestionAnswering,
TFTapasForSequenceClassification,
TFTapasModel,
TFTapasPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 291
| 0
|
"""simple docstring"""
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : int = 2_00 ):
'''simple docstring'''
lowerCAmelCase = [1, 2, 5, 10, 20, 50, 1_00, 2_00]
lowerCAmelCase = [0] * (pence + 1)
lowerCAmelCase = 1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(SCREAMING_SNAKE_CASE , pence + 1 , 1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(200) == 73_682
| 46
|
import os
from datetime import datetime as dt
from github import Github
_a = [
'''good first issue''',
'''feature request''',
'''wip''',
]
def _a ( ) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase: Dict = Github(os.environ['GITHUB_TOKEN'] )
__lowerCAmelCase: Tuple = g.get_repo('huggingface/accelerate' )
__lowerCAmelCase: str = repo.get_issues(state='open' )
for issue in open_issues:
__lowerCAmelCase: Optional[int] = sorted([comment for comment in issue.get_comments()] , key=lambda SCREAMING_SNAKE_CASE : i.created_at , reverse=SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Dict = comments[0] if len(SCREAMING_SNAKE_CASE ) > 0 else None
__lowerCAmelCase: Tuple = dt.utcnow()
__lowerCAmelCase: Optional[int] = (current_time - issue.updated_at).days
__lowerCAmelCase: str = (current_time - issue.created_at).days
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and days_since_updated > 7
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Close issue since it has been 7 days of inactivity since bot mention.
issue.edit(state='closed' )
elif (
days_since_updated > 23
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Add stale comment
issue.create_comment(
'This issue has been automatically marked as stale because it has not had '
'recent activity. If you think this still needs to be addressed '
'please comment on this thread.\n\nPlease note that issues that do not follow the '
'[contributing guidelines](https://github.com/huggingface/accelerate/blob/main/CONTRIBUTING.md) '
'are likely to be ignored.' )
if __name__ == "__main__":
main()
| 322
| 0
|
'''simple docstring'''
import dataclasses
import re
import string
from typing import Any, Dict, Iterator, List, Mapping, Optional, Sequence, Tuple
import numpy as np
from . import residue_constants
__UpperCAmelCase = Mapping[str, np.ndarray]
__UpperCAmelCase = Mapping[str, Any] # Is a nested dict.
__UpperCAmelCase = 0.01
@dataclasses.dataclass(frozen=a__ )
class a__ :
'''simple docstring'''
lowercase__ : np.ndarray # [num_res, num_atom_type, 3]
# Amino-acid type for each residue represented as an integer between 0 and
# 20, where 20 is 'X'.
lowercase__ : np.ndarray # [num_res]
# Binary float mask to indicate presence of a particular atom. 1.0 if an atom
# is present and 0.0 if not. This should be used for loss masking.
lowercase__ : np.ndarray # [num_res, num_atom_type]
# Residue index as used in PDB. It is not necessarily continuous or 0-indexed.
lowercase__ : np.ndarray # [num_res]
# B-factors, or temperature factors, of each residue (in sq. angstroms units),
# representing the displacement of the residue from its ground truth mean
# value.
lowercase__ : np.ndarray # [num_res, num_atom_type]
# Chain indices for multi-chain predictions
lowercase__ : Optional[np.ndarray] = None
# Optional remark about the protein. Included as a comment in output PDB
# files
lowercase__ : Optional[str] = None
# Templates used to generate this protein (prediction-only)
lowercase__ : Optional[Sequence[str]] = None
# Chain corresponding to each parent
lowercase__ : Optional[Sequence[int]] = None
def _snake_case ( A ) -> Protein:
lowerCAmelCase__ = R'''(\[[A-Z]+\]\n)'''
lowerCAmelCase__ = [tag.strip() for tag in re.split(A , A ) if len(A ) > 0]
lowerCAmelCase__ = zip(tags[0::2] , [l.split('''\n''' ) for l in tags[1::2]] )
lowerCAmelCase__ = ['''N''', '''CA''', '''C''']
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = None
for g in groups:
if "[PRIMARY]" == g[0]:
lowerCAmelCase__ = g[1][0].strip()
for i in range(len(A ) ):
if seq[i] not in residue_constants.restypes:
lowerCAmelCase__ = '''X''' # FIXME: strings are immutable
lowerCAmelCase__ = np.array(
[residue_constants.restype_order.get(A , residue_constants.restype_num ) for res_symbol in seq] )
elif "[TERTIARY]" == g[0]:
lowerCAmelCase__ = []
for axis in range(3 ):
tertiary.append(list(map(A , g[1][axis].split() ) ) )
lowerCAmelCase__ = np.array(A )
lowerCAmelCase__ = np.zeros((len(tertiary[0] ) // 3, residue_constants.atom_type_num, 3) ).astype(np.floataa )
for i, atom in enumerate(A ):
lowerCAmelCase__ = np.transpose(tertiary_np[:, i::3] )
atom_positions *= PICO_TO_ANGSTROM
elif "[MASK]" == g[0]:
lowerCAmelCase__ = np.array(list(map({'''-''': 0, '''+''': 1}.get , g[1][0].strip() ) ) )
lowerCAmelCase__ = np.zeros(
(
len(A ),
residue_constants.atom_type_num,
) ).astype(np.floataa )
for i, atom in enumerate(A ):
lowerCAmelCase__ = 1
atom_mask *= mask[..., None]
assert aatype is not None
return Protein(
atom_positions=A , atom_mask=A , aatype=A , residue_index=np.arange(len(A ) ) , b_factors=A , )
def _snake_case ( A , A = 0 ) -> List[str]:
lowerCAmelCase__ = []
lowerCAmelCase__ = prot.remark
if remark is not None:
pdb_headers.append(F"""REMARK {remark}""" )
lowerCAmelCase__ = prot.parents
lowerCAmelCase__ = prot.parents_chain_index
if parents is not None and parents_chain_index is not None:
lowerCAmelCase__ = [p for i, p in zip(A , A ) if i == chain_id]
if parents is None or len(A ) == 0:
lowerCAmelCase__ = ['''N/A''']
pdb_headers.append(F"""PARENT {" ".join(A )}""" )
return pdb_headers
def _snake_case ( A , A ) -> str:
lowerCAmelCase__ = []
lowerCAmelCase__ = pdb_str.split('''\n''' )
lowerCAmelCase__ = prot.remark
if remark is not None:
out_pdb_lines.append(F"""REMARK {remark}""" )
lowerCAmelCase__ = 42
if prot.parents is not None and len(prot.parents ) > 0:
lowerCAmelCase__ = []
if prot.parents_chain_index is not None:
lowerCAmelCase__ = {}
for p, i in zip(prot.parents , prot.parents_chain_index ):
parent_dict.setdefault(str(A ) , [] )
parent_dict[str(A )].append(A )
lowerCAmelCase__ = max([int(A ) for chain_idx in parent_dict] )
for i in range(max_idx + 1 ):
lowerCAmelCase__ = parent_dict.get(str(A ) , ['''N/A'''] )
parents_per_chain.append(A )
else:
parents_per_chain.append(list(prot.parents ) )
else:
lowerCAmelCase__ = [['''N/A''']]
def make_parent_line(A ) -> str:
return F"""PARENT {" ".join(A )}"""
out_pdb_lines.append(make_parent_line(parents_per_chain[0] ) )
lowerCAmelCase__ = 0
for i, l in enumerate(A ):
if "PARENT" not in l and "REMARK" not in l:
out_pdb_lines.append(A )
if "TER" in l and "END" not in lines[i + 1]:
chain_counter += 1
if not chain_counter >= len(A ):
lowerCAmelCase__ = parents_per_chain[chain_counter]
else:
lowerCAmelCase__ = ['''N/A''']
out_pdb_lines.append(make_parent_line(A ) )
return "\n".join(A )
def _snake_case ( A ) -> str:
lowerCAmelCase__ = residue_constants.restypes + ['''X''']
def res_atoa(A ) -> str:
return residue_constants.restype_atoa.get(restypes[r] , '''UNK''' )
lowerCAmelCase__ = residue_constants.atom_types
lowerCAmelCase__ = []
lowerCAmelCase__ = prot.atom_mask
lowerCAmelCase__ = prot.aatype
lowerCAmelCase__ = prot.atom_positions
lowerCAmelCase__ = prot.residue_index.astype(np.intaa )
lowerCAmelCase__ = prot.b_factors
lowerCAmelCase__ = prot.chain_index
if np.any(aatype > residue_constants.restype_num ):
raise ValueError('''Invalid aatypes.''' )
lowerCAmelCase__ = get_pdb_headers(A )
if len(A ) > 0:
pdb_lines.extend(A )
lowerCAmelCase__ = aatype.shape[0]
lowerCAmelCase__ = 1
lowerCAmelCase__ = 0
lowerCAmelCase__ = string.ascii_uppercase
lowerCAmelCase__ = None
# Add all atom sites.
for i in range(A ):
lowerCAmelCase__ = res_atoa(aatype[i] )
for atom_name, pos, mask, b_factor in zip(A , atom_positions[i] , atom_mask[i] , b_factors[i] ):
if mask < 0.5:
continue
lowerCAmelCase__ = '''ATOM'''
lowerCAmelCase__ = atom_name if len(A ) == 4 else F""" {atom_name}"""
lowerCAmelCase__ = ''''''
lowerCAmelCase__ = ''''''
lowerCAmelCase__ = 1.00
lowerCAmelCase__ = atom_name[0] # Protein supports only C, N, O, S, this works.
lowerCAmelCase__ = ''''''
lowerCAmelCase__ = '''A'''
if chain_index is not None:
lowerCAmelCase__ = chain_tags[chain_index[i]]
# PDB is a columnar format, every space matters here!
lowerCAmelCase__ = (
F"""{record_type:<6}{atom_index:>5} {name:<4}{alt_loc:>1}"""
F"""{res_name_a:>3} {chain_tag:>1}"""
F"""{residue_index[i]:>4}{insertion_code:>1} """
F"""{pos[0]:>8.3f}{pos[1]:>8.3f}{pos[2]:>8.3f}"""
F"""{occupancy:>6.2f}{b_factor:>6.2f} """
F"""{element:>2}{charge:>2}"""
)
pdb_lines.append(A )
atom_index += 1
lowerCAmelCase__ = i == n - 1
if chain_index is not None:
if i != n - 1 and chain_index[i + 1] != prev_chain_index:
lowerCAmelCase__ = True
lowerCAmelCase__ = chain_index[i + 1]
if should_terminate:
# Close the chain.
lowerCAmelCase__ = '''TER'''
lowerCAmelCase__ = (
F"""{chain_end:<6}{atom_index:>5} {res_atoa(aatype[i] ):>3} {chain_tag:>1}{residue_index[i]:>4}"""
)
pdb_lines.append(A )
atom_index += 1
if i != n - 1:
# "prev" is a misnomer here. This happens at the beginning of
# each new chain.
pdb_lines.extend(get_pdb_headers(A , A ) )
pdb_lines.append('''END''' )
pdb_lines.append('''''' )
return "\n".join(A )
def _snake_case ( A ) -> np.ndarray:
return residue_constants.STANDARD_ATOM_MASK[prot.aatype]
def _snake_case ( A , A , A = None , A = None , A = None , A = None , A = None , ) -> Protein:
return Protein(
aatype=features['''aatype'''] , atom_positions=result['''final_atom_positions'''] , atom_mask=result['''final_atom_mask'''] , residue_index=features['''residue_index'''] + 1 , b_factors=b_factors if b_factors is not None else np.zeros_like(result['''final_atom_mask'''] ) , chain_index=A , remark=A , parents=A , parents_chain_index=A , )
| 364
|
'''simple docstring'''
from collections.abc import Iterable
from typing import Any
class a__ :
'''simple docstring'''
def __init__( self , lowerCamelCase_ = None ) -> List[str]:
lowerCAmelCase__ = value
lowerCAmelCase__ = None # Added in order to delete a node easier
lowerCAmelCase__ = None
lowerCAmelCase__ = None
def __repr__( self ) -> str:
from pprint import pformat
if self.left is None and self.right is None:
return str(self.value )
return pformat({F"""{self.value}""": (self.left, self.right)} , indent=1 )
class a__ :
'''simple docstring'''
def __init__( self , lowerCamelCase_ = None ) -> Union[str, Any]:
lowerCAmelCase__ = root
def __str__( self ) -> str:
return str(self.root )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ ) -> None:
if new_children is not None: # reset its kids
lowerCAmelCase__ = node.parent
if node.parent is not None: # reset its parent
if self.is_right(lowerCamelCase_ ): # If it is the right children
lowerCAmelCase__ = new_children
else:
lowerCAmelCase__ = new_children
else:
lowerCAmelCase__ = new_children
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> bool:
if node.parent and node.parent.right:
return node == node.parent.right
return False
def __SCREAMING_SNAKE_CASE ( self ) -> bool:
return self.root is None
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> None:
lowerCAmelCase__ = Node(lowerCamelCase_ ) # create a new Node
if self.empty(): # if Tree is empty
lowerCAmelCase__ = new_node # set its root
else: # Tree is not empty
lowerCAmelCase__ = self.root # from root
if parent_node is None:
return
while True: # While we don't get to a leaf
if value < parent_node.value: # We go left
if parent_node.left is None:
lowerCAmelCase__ = new_node # We insert the new node in a leaf
break
else:
lowerCAmelCase__ = parent_node.left
else:
if parent_node.right is None:
lowerCAmelCase__ = new_node
break
else:
lowerCAmelCase__ = parent_node.right
lowerCAmelCase__ = parent_node
def __SCREAMING_SNAKE_CASE ( self , *lowerCamelCase_ ) -> None:
for value in values:
self.__insert(lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> Node | None:
if self.empty():
raise IndexError('''Warning: Tree is empty! please use another.''' )
else:
lowerCAmelCase__ = self.root
# use lazy evaluation here to avoid NoneType Attribute error
while node is not None and node.value is not value:
lowerCAmelCase__ = node.left if value < node.value else node.right
return node
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ = None ) -> Node | None:
if node is None:
if self.root is None:
return None
lowerCAmelCase__ = self.root
if not self.empty():
while node.right is not None:
lowerCAmelCase__ = node.right
return node
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ = None ) -> Node | None:
if node is None:
lowerCAmelCase__ = self.root
if self.root is None:
return None
if not self.empty():
lowerCAmelCase__ = self.root
while node.left is not None:
lowerCAmelCase__ = node.left
return node
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> None:
lowerCAmelCase__ = self.search(lowerCamelCase_ ) # Look for the node with that label
if node is not None:
if node.left is None and node.right is None: # If it has no children
self.__reassign_nodes(lowerCamelCase_ , lowerCamelCase_ )
elif node.left is None: # Has only right children
self.__reassign_nodes(lowerCamelCase_ , node.right )
elif node.right is None: # Has only left children
self.__reassign_nodes(lowerCamelCase_ , node.left )
else:
lowerCAmelCase__ = self.get_max(
node.left ) # Gets the max value of the left branch
self.remove(tmp_node.value ) # type: ignore
lowerCAmelCase__ = (
tmp_node.value # type: ignore
) # Assigns the value to the node to delete and keep tree structure
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> Iterable:
if node is not None:
yield node # Preorder Traversal
yield from self.preorder_traverse(node.left )
yield from self.preorder_traverse(node.right )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_=None ) -> Any:
if traversal_function is None:
return self.preorder_traverse(self.root )
else:
return traversal_function(self.root )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ ) -> None:
if node:
self.inorder(lowerCamelCase_ , node.left )
arr.append(node.value )
self.inorder(lowerCamelCase_ , node.right )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ ) -> int:
lowerCAmelCase__ = []
self.inorder(lowerCamelCase_ , lowerCamelCase_ ) # append all values to list using inorder traversal
return arr[k - 1]
def _snake_case ( A ) -> list[Node]:
lowerCAmelCase__ = []
if curr_node is not None:
lowerCAmelCase__ = postorder(curr_node.left ) + postorder(curr_node.right ) + [curr_node]
return node_list
def _snake_case ( ) -> None:
lowerCAmelCase__ = (8, 3, 6, 1, 10, 14, 13, 4, 7)
lowerCAmelCase__ = BinarySearchTree()
for i in testlist:
t.insert(A )
# Prints all the elements of the list in order traversal
print(A )
if t.search(6 ) is not None:
print('''The value 6 exists''' )
else:
print('''The value 6 doesn\'t exist''' )
if t.search(-1 ) is not None:
print('''The value -1 exists''' )
else:
print('''The value -1 doesn\'t exist''' )
if not t.empty():
print('''Max Value: ''' , t.get_max().value ) # type: ignore
print('''Min Value: ''' , t.get_min().value ) # type: ignore
for i in testlist:
t.remove(A )
print(A )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 228
| 0
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {"""vocab_file""": """spm_char.model"""}
_UpperCAmelCase = {
"""vocab_file""": {
"""microsoft/speecht5_asr""": """https://huggingface.co/microsoft/speecht5_asr/resolve/main/spm_char.model""",
"""microsoft/speecht5_tts""": """https://huggingface.co/microsoft/speecht5_tts/resolve/main/spm_char.model""",
"""microsoft/speecht5_vc""": """https://huggingface.co/microsoft/speecht5_vc/resolve/main/spm_char.model""",
}
}
_UpperCAmelCase = {
"""microsoft/speecht5_asr""": 1_0_2_4,
"""microsoft/speecht5_tts""": 1_0_2_4,
"""microsoft/speecht5_vc""": 1_0_2_4,
}
class a ( UpperCAmelCase__ ):
UpperCamelCase : str = VOCAB_FILES_NAMES
UpperCamelCase : List[Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase : List[str] = ['input_ids', 'attention_mask']
def __init__( self : int , lowerCAmelCase : Tuple , lowerCAmelCase : Optional[int]="<s>" , lowerCAmelCase : Optional[Any]="</s>" , lowerCAmelCase : int="<unk>" , lowerCAmelCase : Tuple="<pad>" , lowerCAmelCase : Optional[Dict[str, Any]] = None , **lowerCAmelCase : Optional[int] , ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[int] ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowerCAmelCase , eos_token=lowerCAmelCase , unk_token=lowerCAmelCase , pad_token=lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **lowerCAmelCase , )
SCREAMING_SNAKE_CASE_: int =vocab_file
SCREAMING_SNAKE_CASE_: Union[str, Any] =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowerCAmelCase )
@property
def lowerCamelCase__ ( self : int ) -> Optional[Any]:
'''simple docstring'''
return self.sp_model.get_piece_size()
def lowerCamelCase__ ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[str] ={self.convert_ids_to_tokens(lowerCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : List[str] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[Any] =self.__dict__.copy()
SCREAMING_SNAKE_CASE_: str =None
return state
def __setstate__( self : Any , lowerCAmelCase : Any ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: int =d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
SCREAMING_SNAKE_CASE_: str ={}
SCREAMING_SNAKE_CASE_: List[str] =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowerCamelCase__ ( self : Optional[Any] , lowerCAmelCase : str ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(lowerCAmelCase , out_type=lowerCAmelCase )
def lowerCamelCase__ ( self : Optional[int] , lowerCAmelCase : List[Any] ) -> str:
'''simple docstring'''
return self.sp_model.piece_to_id(lowerCAmelCase )
def lowerCamelCase__ ( self : Union[str, Any] , lowerCAmelCase : Any ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[int] =self.sp_model.IdToPiece(lowerCAmelCase )
return token
def lowerCamelCase__ ( self : str , lowerCAmelCase : Dict ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Any =[]
SCREAMING_SNAKE_CASE_: Dict =""""""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(lowerCAmelCase ) + token
SCREAMING_SNAKE_CASE_: str =[]
else:
current_sub_tokens.append(lowerCAmelCase )
out_string += self.sp_model.decode(lowerCAmelCase )
return out_string.strip()
def lowerCamelCase__ ( self : Any , lowerCAmelCase : Tuple , lowerCAmelCase : List[Any]=None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def lowerCamelCase__ ( self : Optional[int] , lowerCAmelCase : List[int] , lowerCAmelCase : Optional[List[int]] = None , lowerCAmelCase : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase , token_ids_a=lowerCAmelCase , already_has_special_tokens=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] =[1]
if token_ids_a is None:
return ([0] * len(lowerCAmelCase )) + suffix_ones
return ([0] * len(lowerCAmelCase )) + ([0] * len(lowerCAmelCase )) + suffix_ones
def lowerCamelCase__ ( self : str , lowerCAmelCase : str , lowerCAmelCase : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(lowerCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
SCREAMING_SNAKE_CASE_: str =os.path.join(
lowerCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCAmelCase , """wb""" ) as fi:
SCREAMING_SNAKE_CASE_: List[Any] =self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase )
return (out_vocab_file,)
| 173
|
"""simple docstring"""
import datasets
from .evaluate import evaluate
_UpperCAmelCase = """\
@inproceedings{Rajpurkar2016SQuAD10,
title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text},
author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang},
booktitle={EMNLP},
year={2016}
}
"""
_UpperCAmelCase = """
This metric wrap the official scoring script for version 1 of the Stanford Question Answering Dataset (SQuAD).
Stanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by
crowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span,
from the corresponding reading passage, or the question might be unanswerable.
"""
_UpperCAmelCase = """
Computes SQuAD scores (F1 and EM).
Args:
predictions: List of question-answers dictionaries with the following key-values:
- 'id': id of the question-answer pair as given in the references (see below)
- 'prediction_text': the text of the answer
references: List of question-answers dictionaries with the following key-values:
- 'id': id of the question-answer pair (see above),
- 'answers': a Dict in the SQuAD dataset format
{
'text': list of possible texts for the answer, as a list of strings
'answer_start': list of start positions for the answer, as a list of ints
}
Note that answer_start values are not taken into account to compute the metric.
Returns:
'exact_match': Exact match (the normalized answer exactly match the gold answer)
'f1': The F-score of predicted tokens versus the gold answer
Examples:
>>> predictions = [{'prediction_text': '1976', 'id': '56e10a3be3433e1400422b22'}]
>>> references = [{'answers': {'answer_start': [97], 'text': ['1976']}, 'id': '56e10a3be3433e1400422b22'}]
>>> squad_metric = datasets.load_metric(\"squad\")
>>> results = squad_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 100.0, 'f1': 100.0}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a ( datasets.Metric ):
def lowerCamelCase__ ( self : int ) -> List[str]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": {"""id""": datasets.Value("""string""" ), """prediction_text""": datasets.Value("""string""" )},
"""references""": {
"""id""": datasets.Value("""string""" ),
"""answers""": datasets.features.Sequence(
{
"""text""": datasets.Value("""string""" ),
"""answer_start""": datasets.Value("""int32""" ),
} ),
},
} ) , codebase_urls=["""https://rajpurkar.github.io/SQuAD-explorer/"""] , reference_urls=["""https://rajpurkar.github.io/SQuAD-explorer/"""] , )
def lowerCamelCase__ ( self : int , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Optional[int] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict ={prediction["""id"""]: prediction["""prediction_text"""] for prediction in predictions}
SCREAMING_SNAKE_CASE_: Tuple =[
{
"""paragraphs""": [
{
"""qas""": [
{
"""answers""": [{"""text""": answer_text} for answer_text in ref["""answers"""]["""text"""]],
"""id""": ref["""id"""],
}
for ref in references
]
}
]
}
]
SCREAMING_SNAKE_CASE_: str =evaluate(dataset=lowerCAmelCase , predictions=lowerCAmelCase )
return score
| 173
| 1
|
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@property
def _UpperCamelCase ( self : int ):
'''simple docstring'''
torch.manual_seed(0 )
A__ : List[Any] = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
return model
def _UpperCamelCase ( self : List[str] ):
'''simple docstring'''
A__ : List[Any] = self.dummy_uncond_unet
A__ : Optional[Any] = KarrasVeScheduler()
A__ : List[str] = KarrasVePipeline(unet=snake_case , scheduler=snake_case )
pipe.to(snake_case )
pipe.set_progress_bar_config(disable=snake_case )
A__ : Union[str, Any] = torch.manual_seed(0 )
A__ : Any = pipe(num_inference_steps=2 , generator=snake_case , output_type="""numpy""" ).images
A__ : Optional[Any] = torch.manual_seed(0 )
A__ : Dict = pipe(num_inference_steps=2 , generator=snake_case , output_type="""numpy""" , return_dict=snake_case )[0]
A__ : List[str] = image[0, -3:, -3:, -1]
A__ : int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
A__ : Union[str, Any] = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def _UpperCamelCase ( self : Tuple ):
'''simple docstring'''
A__ : int = """google/ncsnpp-celebahq-256"""
A__ : List[Any] = UNetaDModel.from_pretrained(snake_case )
A__ : List[str] = KarrasVeScheduler()
A__ : Optional[Any] = KarrasVePipeline(unet=snake_case , scheduler=snake_case )
pipe.to(snake_case )
pipe.set_progress_bar_config(disable=snake_case )
A__ : int = torch.manual_seed(0 )
A__ : Tuple = pipe(num_inference_steps=20 , generator=snake_case , output_type="""numpy""" ).images
A__ : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
A__ : int = np.array([0.578, 0.5811, 0.5924, 0.5809, 0.587, 0.5886, 0.5861, 0.5802, 0.586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 366
|
"""simple docstring"""
from sklearn.metrics import mean_squared_error
import datasets
A_ = '''\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
'''
A_ = '''\
Mean Squared Error(MSE) is the average of the square of difference between the predicted
and actual values.
'''
A_ = '''
Args:
predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)
Estimated target values.
references: array-like of shape (n_samples,) or (n_samples, n_outputs)
Ground truth (correct) target values.
sample_weight: array-like of shape (n_samples,), default=None
Sample weights.
multioutput: {"raw_values", "uniform_average"} or array-like of shape (n_outputs,), default="uniform_average"
Defines aggregating of multiple output values. Array-like value defines weights used to average errors.
"raw_values" : Returns a full set of errors in case of multioutput input.
"uniform_average" : Errors of all outputs are averaged with uniform weight.
squared : bool, default=True
If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.
Returns:
mse : mean squared error.
Examples:
>>> mse_metric = datasets.load_metric("mse")
>>> predictions = [2.5, 0.0, 2, 8]
>>> references = [3, -0.5, 2, 7]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'mse\': 0.375}
>>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)
>>> print(rmse_result)
{\'mse\': 0.6123724356957945}
If you\'re using multi-dimensional lists, then set the config as follows :
>>> mse_metric = datasets.load_metric("mse", "multilist")
>>> predictions = [[0.5, 1], [-1, 1], [7, -6]]
>>> references = [[0, 2], [-1, 2], [8, -5]]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'mse\': 0.7083333333333334}
>>> results = mse_metric.compute(predictions=predictions, references=references, multioutput=\'raw_values\')
>>> print(results) # doctest: +NORMALIZE_WHITESPACE
{\'mse\': array([0.41666667, 1. ])}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __SCREAMING_SNAKE_CASE ( datasets.Metric ):
def _UpperCamelCase ( self : Dict ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[
"""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html"""
] , )
def _UpperCamelCase ( self : Tuple ):
'''simple docstring'''
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value("""float""" ) ),
"references": datasets.Sequence(datasets.Value("""float""" ) ),
}
else:
return {
"predictions": datasets.Value("""float""" ),
"references": datasets.Value("""float""" ),
}
def _UpperCamelCase ( self : List[str] , snake_case : Dict , snake_case : List[Any] , snake_case : List[str]=None , snake_case : List[Any]="uniform_average" , snake_case : int=True ):
'''simple docstring'''
A__ : Optional[int] = mean_squared_error(
snake_case , snake_case , sample_weight=snake_case , multioutput=snake_case , squared=snake_case )
return {"mse": mse}
| 296
| 0
|
'''simple docstring'''
from .imports import is_rich_available
if is_rich_available():
from rich.traceback import install
install(show_locals=False)
else:
raise ModuleNotFoundError("""To use the rich extension, install rich with `pip install rich`""")
| 58
|
def _snake_case ( lowerCAmelCase : list ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = len(lowerCAmelCase )
for i in range(1 , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : int = collection[i]
SCREAMING_SNAKE_CASE_ : Any = 0
SCREAMING_SNAKE_CASE_ : Tuple = i - 1
while low <= high:
SCREAMING_SNAKE_CASE_ : int = (low + high) // 2
if val < collection[mid]:
SCREAMING_SNAKE_CASE_ : Optional[Any] = mid - 1
else:
SCREAMING_SNAKE_CASE_ : Tuple = mid + 1
for j in range(lowerCAmelCase , lowerCAmelCase , -1 ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = collection[j - 1]
SCREAMING_SNAKE_CASE_ : int = val
return collection
if __name__ == "__main__":
__lowerCamelCase : Dict = input('''Enter numbers separated by a comma:\n''').strip()
__lowerCamelCase : List[str] = [int(item) for item in user_input.split(''',''')]
print(binary_insertion_sort(unsorted))
| 18
| 0
|
"""simple docstring"""
import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__A = get_tests_dir("fixtures/spiece.model")
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase (_UpperCAmelCase ,unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase :Dict = DebertaVaTokenizer
_UpperCAmelCase :Tuple = DebertaVaTokenizerFast
_UpperCAmelCase :int = True
_UpperCAmelCase :int = True
def _snake_case ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
lowercase__: List[Any] = DebertaVaTokenizer(_UpperCAmelCase , unk_token='''<unk>''' )
tokenizer.save_pretrained(self.tmpdirname )
def _snake_case ( self , _UpperCAmelCase ):
lowercase__: List[str] = '''this is a test'''
lowercase__: int = '''this is a test'''
return input_text, output_text
def _snake_case ( self ):
lowercase__: Optional[int] = '''<pad>'''
lowercase__: Optional[int] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCAmelCase ) , _UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCAmelCase ) , _UpperCAmelCase )
def _snake_case ( self ):
lowercase__: Union[str, Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<pad>''' )
self.assertEqual(vocab_keys[1] , '''<unk>''' )
self.assertEqual(vocab_keys[-1] , '''[PAD]''' )
self.assertEqual(len(_UpperCAmelCase ) , 30001 )
def _snake_case ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 30000 )
def _snake_case ( self ):
# fmt: off
lowercase__: int = ''' \tHeLLo!how \n Are yoU? '''
lowercase__: List[str] = ['''▁hello''', '''!''', '''how''', '''▁are''', '''▁you''', '''?''']
# fmt: on
lowercase__: Any = DebertaVaTokenizer(_UpperCAmelCase , do_lower_case=_UpperCAmelCase )
lowercase__: Union[str, Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: Optional[Any] = DebertaVaTokenizerFast(_UpperCAmelCase , do_lower_case=_UpperCAmelCase )
lowercase__: Optional[Any] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
@unittest.skip('''There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.''' )
def _snake_case ( self ):
pass
@unittest.skip('''There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.''' )
def _snake_case ( self ):
pass
def _snake_case ( self ):
# fmt: off
lowercase__: Dict = '''I was born in 92000, and this is falsé.'''
lowercase__: str = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ]
# fmt: on
lowercase__: Union[str, Any] = DebertaVaTokenizer(_UpperCAmelCase , split_by_punct=_UpperCAmelCase )
lowercase__: str = tokenizer.convert_ids_to_tokens(tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: Tuple = DebertaVaTokenizerFast(_UpperCAmelCase , split_by_punct=_UpperCAmelCase )
lowercase__: Union[str, Any] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def _snake_case ( self ):
# fmt: off
lowercase__: Any = '''I was born in 92000, and this is falsé.'''
lowercase__: str = ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ]
# fmt: on
lowercase__: Optional[int] = DebertaVaTokenizer(_UpperCAmelCase , do_lower_case=_UpperCAmelCase , split_by_punct=_UpperCAmelCase )
lowercase__: List[Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: Optional[int] = DebertaVaTokenizerFast(_UpperCAmelCase , do_lower_case=_UpperCAmelCase , split_by_punct=_UpperCAmelCase )
lowercase__: Tuple = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def _snake_case ( self ):
# fmt: off
lowercase__: List[str] = '''I was born in 92000, and this is falsé.'''
lowercase__: List[str] = ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''.''', ]
# fmt: on
lowercase__: Union[str, Any] = DebertaVaTokenizer(_UpperCAmelCase , do_lower_case=_UpperCAmelCase , split_by_punct=_UpperCAmelCase )
lowercase__: Union[str, Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: int = DebertaVaTokenizerFast(_UpperCAmelCase , do_lower_case=_UpperCAmelCase , split_by_punct=_UpperCAmelCase )
lowercase__: Tuple = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def _snake_case ( self ):
# fmt: off
lowercase__: Union[str, Any] = '''I was born in 92000, and this is falsé.'''
lowercase__: int = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ]
# fmt: on
lowercase__: Optional[int] = DebertaVaTokenizer(_UpperCAmelCase , do_lower_case=_UpperCAmelCase , split_by_punct=_UpperCAmelCase )
lowercase__: Dict = tokenizer.convert_ids_to_tokens(tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: Union[str, Any] = DebertaVaTokenizerFast(_UpperCAmelCase , do_lower_case=_UpperCAmelCase , split_by_punct=_UpperCAmelCase )
lowercase__: Dict = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def _snake_case ( self ):
# fmt: off
lowercase__: Optional[int] = ''' \tHeLLo!how \n Are yoU? '''
lowercase__: str = ['''▁''', '''<unk>''', '''e''', '''<unk>''', '''o''', '''!''', '''how''', '''▁''', '''<unk>''', '''re''', '''▁yo''', '''<unk>''', '''?''']
# fmt: on
lowercase__: Dict = DebertaVaTokenizer(_UpperCAmelCase , do_lower_case=_UpperCAmelCase , split_by_punct=_UpperCAmelCase )
lowercase__: List[str] = tokenizer.convert_ids_to_tokens(tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: Optional[Any] = DebertaVaTokenizerFast(_UpperCAmelCase , do_lower_case=_UpperCAmelCase , split_by_punct=_UpperCAmelCase )
lowercase__: List[str] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def _snake_case ( self ):
lowercase__: int = self.get_tokenizer()
lowercase__: List[Any] = self.get_rust_tokenizer()
lowercase__: List[str] = '''I was born in 92000, and this is falsé.'''
lowercase__: Any = tokenizer.convert_ids_to_tokens(tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) )
lowercase__: List[str] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: Dict = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
lowercase__: Tuple = rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: Any = self.get_rust_tokenizer()
lowercase__: str = tokenizer.encode(_UpperCAmelCase )
lowercase__: Any = rust_tokenizer.encode(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def _snake_case ( self ):
lowercase__: Optional[Any] = '''This is a test'''
lowercase__: str = [13, 1, 4398, 25, 21, 1289]
lowercase__: List[Any] = ['''▁''', '''T''', '''his''', '''▁is''', '''▁a''', '''▁test''']
lowercase__: Any = ['''▁''', '''<unk>''', '''his''', '''▁is''', '''▁a''', '''▁test''']
lowercase__: int = DebertaVaTokenizer(_UpperCAmelCase , keep_accents=_UpperCAmelCase )
lowercase__: int = DebertaVaTokenizerFast(_UpperCAmelCase , keep_accents=_UpperCAmelCase )
lowercase__: Any = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: str = tokenizer.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: Any = tokenizer.convert_ids_to_tokens(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: Union[str, Any] = rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: List[Any] = rust_tokenizer.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: str = rust_tokenizer.convert_ids_to_tokens(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
# fmt: off
lowercase__: str = '''I was born in 92000, and this is falsé.'''
lowercase__: Dict = [13, 1, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9]
lowercase__: Tuple = ['''▁''', '''I''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''é''', '''.''', ]
lowercase__: Dict = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''.''', ]
# fmt: on
lowercase__: Optional[Any] = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: Dict = tokenizer.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: Optional[Any] = tokenizer.convert_ids_to_tokens(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: List[Any] = rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: Dict = rust_tokenizer.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: Optional[Any] = rust_tokenizer.convert_ids_to_tokens(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def _snake_case ( self ):
lowercase__: Optional[int] = DebertaVaTokenizer(_UpperCAmelCase )
lowercase__: Optional[int] = tokenizer.encode('''sequence builders''' )
lowercase__: Optional[Any] = tokenizer.encode('''multi-sequence build''' )
lowercase__: Union[str, Any] = tokenizer.build_inputs_with_special_tokens(_UpperCAmelCase )
lowercase__: Dict = tokenizer.build_inputs_with_special_tokens(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , _UpperCAmelCase )
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , _UpperCAmelCase , )
@slow
def _snake_case ( self ):
# fmt: off
lowercase__: List[Any] = {'''input_ids''': [[1, 39867, 36, 19390, 486, 27, 35052, 81436, 18, 60685, 1225, 7, 35052, 81436, 18, 9367, 16899, 18, 15937, 53, 594, 773, 18, 16287, 30465, 36, 15937, 6, 41139, 38, 36979, 60763, 191, 6, 34132, 99, 6, 50538, 390, 43230, 6, 34132, 2779, 20850, 14, 699, 1072, 1194, 36, 382, 10901, 53, 7, 699, 1072, 2084, 36, 20422, 630, 53, 19, 105, 3049, 1896, 1053, 16899, 1506, 11, 37978, 4243, 7, 1237, 31869, 200, 16566, 654, 6, 35052, 81436, 7, 55630, 13593, 4, 2], [1, 26, 15011, 13, 667, 8, 1053, 18, 23611, 1237, 72356, 12820, 34, 104134, 1209, 35, 13313, 6627, 21, 202, 347, 7, 164, 2399, 11, 46, 4485, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1232, 2864, 15785, 14951, 105, 5, 8581, 1250, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_UpperCAmelCase , model_name='''microsoft/deberta-v2-xlarge''' , revision='''ad6e42c1532ddf3a15c39246b63f5559d558b670''' , )
| 2
|
"""simple docstring"""
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
"The `image_to_image.py` script is outdated. Please use directly `from diffusers import"
" StableDiffusionImg2ImgPipeline` instead."
)
| 2
| 1
|
"""simple docstring"""
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
A: int = logging.get_logger(__name__)
A: Union[str, Any] = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"encoder.layer_norm_for_extract": "layer_norm_for_extract",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"label_embs_concat": "label_embeddings_concat",
"mask_emb": "masked_spec_embed",
"spk_proj": "speaker_proj",
}
A: Optional[int] = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
"label_embeddings_concat",
"speaker_proj",
"layer_norm_for_extract",
]
def _snake_case ( UpperCamelCase : str , UpperCamelCase : Dict , UpperCamelCase : List[str] , UpperCamelCase : Optional[int] , UpperCamelCase : List[Any] ):
for attribute in key.split(""".""" ):
UpperCAmelCase : Optional[Any] = getattr(UpperCamelCase , UpperCamelCase )
if weight_type is not None:
UpperCAmelCase : Optional[int] = getattr(UpperCamelCase , UpperCamelCase ).shape
else:
UpperCAmelCase : Optional[int] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
F" {value.shape} for {full_name}" )
if weight_type == "weight":
UpperCAmelCase : str = value
elif weight_type == "weight_g":
UpperCAmelCase : int = value
elif weight_type == "weight_v":
UpperCAmelCase : Optional[int] = value
elif weight_type == "bias":
UpperCAmelCase : Optional[int] = value
else:
UpperCAmelCase : Optional[Any] = value
logger.info(F"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def _snake_case ( UpperCamelCase : Tuple , UpperCamelCase : Tuple ):
UpperCAmelCase : Tuple = []
UpperCAmelCase : List[Any] = fairseq_model.state_dict()
UpperCAmelCase : Optional[int] = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
UpperCAmelCase : str = False
if "conv_layers" in name:
load_conv_layer(
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , hf_model.config.feat_extract_norm == """group""" , )
UpperCAmelCase : Dict = True
else:
for key, mapped_key in MAPPING.items():
UpperCAmelCase : int = """unispeech_sat.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split(""".""" )[:-1] ) != key):
# special case since naming is very similar
continue
UpperCAmelCase : int = True
if "*" in mapped_key:
UpperCAmelCase : List[Any] = name.split(UpperCamelCase )[0].split(""".""" )[-2]
UpperCAmelCase : Tuple = mapped_key.replace("""*""" , UpperCamelCase )
if "weight_g" in name:
UpperCAmelCase : Optional[Any] = """weight_g"""
elif "weight_v" in name:
UpperCAmelCase : List[str] = """weight_v"""
elif "bias" in name:
UpperCAmelCase : Any = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
UpperCAmelCase : Optional[int] = """weight"""
else:
UpperCAmelCase : Dict = None
set_recursively(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
continue
if not is_used:
unused_weights.append(UpperCamelCase )
logger.warning(F"Unused weights: {unused_weights}" )
def _snake_case ( UpperCamelCase : Optional[Any] , UpperCamelCase : Dict , UpperCamelCase : Optional[Any] , UpperCamelCase : List[str] , UpperCamelCase : List[Any] ):
UpperCAmelCase : str = full_name.split("""conv_layers.""" )[-1]
UpperCAmelCase : Any = name.split(""".""" )
UpperCAmelCase : str = int(items[0] )
UpperCAmelCase : str = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." )
UpperCAmelCase : List[Any] = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." )
UpperCAmelCase : List[Any] = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor[layer_id].layer_norm.bias.data.shape} was found." )
UpperCAmelCase : int = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found." )
UpperCAmelCase : Dict = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(UpperCamelCase )
@torch.no_grad()
def _snake_case ( UpperCamelCase : List[str] , UpperCamelCase : int , UpperCamelCase : Optional[int]=None , UpperCamelCase : Optional[Any]=None , UpperCamelCase : Union[str, Any]=True ):
if config_path is not None:
UpperCAmelCase : Optional[Any] = UniSpeechSatConfig.from_pretrained(UpperCamelCase )
else:
UpperCAmelCase : Optional[Any] = UniSpeechSatConfig()
UpperCAmelCase : str = """"""
if is_finetuned:
UpperCAmelCase : int = UniSpeechSatForCTC(UpperCamelCase )
else:
UpperCAmelCase : Optional[Any] = UniSpeechSatForPreTraining(UpperCamelCase )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Union[str, Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
UpperCAmelCase : int = model[0].eval()
recursively_load_weights(UpperCamelCase , UpperCamelCase )
hf_wavavec.save_pretrained(UpperCamelCase )
if __name__ == "__main__":
A: List[str] = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
A: Dict = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 109
|
"""simple docstring"""
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class __magic_name__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , _a , _a=13 , _a=7 , _a=True , _a=True , _a=True , _a=True , _a=99 , _a=32 , _a=5 , _a=4 , _a=37 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=16 , _a=2 , _a=0.02 , _a=4 , ):
"""simple docstring"""
lowerCamelCase = parent
lowerCamelCase = batch_size
lowerCamelCase = seq_length
lowerCamelCase = is_training
lowerCamelCase = use_attention_mask
lowerCamelCase = use_token_type_ids
lowerCamelCase = use_labels
lowerCamelCase = vocab_size
lowerCamelCase = hidden_size
lowerCamelCase = num_hidden_layers
lowerCamelCase = num_attention_heads
lowerCamelCase = intermediate_size
lowerCamelCase = hidden_act
lowerCamelCase = hidden_dropout_prob
lowerCamelCase = attention_probs_dropout_prob
lowerCamelCase = max_position_embeddings
lowerCamelCase = type_vocab_size
lowerCamelCase = type_sequence_label_size
lowerCamelCase = initializer_range
lowerCamelCase = num_choices
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase = None
if self.use_attention_mask:
lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase = None
if self.use_token_type_ids:
lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_a , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = self.prepare_config_and_inputs()
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = config_and_inputs
lowerCamelCase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_flax
class __magic_name__ ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = True
__UpperCamelCase = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = FlaxRoFormerModelTester(self )
@slow
def _lowerCAmelCase ( self ):
"""simple docstring"""
for model_class_name in self.all_model_classes:
lowerCamelCase = model_class_name.from_pretrained("""junnyu/roformer_chinese_small""" , from_pt=_a )
lowerCamelCase = model(np.ones((1, 1) ) )
self.assertIsNotNone(_a )
@require_flax
class __magic_name__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = FlaxRoFormerForMaskedLM.from_pretrained("""junnyu/roformer_chinese_base""" )
lowerCamelCase = jnp.array([[0, 1, 2, 3, 4, 5]] )
lowerCamelCase = model(_a )[0]
lowerCamelCase = 50_000
lowerCamelCase = (1, 6, vocab_size)
self.assertEqual(output.shape , _a )
lowerCamelCase = jnp.array(
[[[-0.1_205, -1.0_265, 0.2_922], [-1.5_134, 0.1_974, 0.1_519], [-5.0_135, -3.9_003, -0.8_404]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , _a , atol=1e-4 ) )
| 291
| 0
|
'''simple docstring'''
from __future__ import annotations
import typing
from collections.abc import Iterable
import numpy as np
UpperCamelCase__ = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007
UpperCamelCase__ = typing.Union[np.floataa, int, float] # noqa: UP007
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> VectorOut:
return np.sqrt(np.sum((np.asarray(lowerCAmelCase__ ) - np.asarray(lowerCAmelCase__ )) ** 2 ) )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> VectorOut:
return sum((va - va) ** 2 for va, va in zip(lowerCAmelCase__ , lowerCAmelCase__ ) ) ** (1 / 2)
if __name__ == "__main__":
def a__ ( ) -> None:
from timeit import timeit
print('''Without Numpy''' )
print(
timeit(
'''euclidean_distance_no_np([1, 2, 3], [4, 5, 6])''' , number=1_00_00 , globals=globals() , ) )
print('''With Numpy''' )
print(
timeit(
'''euclidean_distance([1, 2, 3], [4, 5, 6])''' , number=1_00_00 , globals=globals() , ) )
benchmark()
| 299
|
'''simple docstring'''
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ) -> float:
UpperCAmelCase__ : Tuple = [redshift, radiation_density, matter_density, dark_energy]
if any(p < 0 for p in parameters ):
raise ValueError('''All input parameters must be positive''' )
if any(p > 1 for p in parameters[1:4] ):
raise ValueError('''Relative densities cannot be greater than one''' )
else:
UpperCAmelCase__ : List[str] = 1 - (matter_density + radiation_density + dark_energy)
UpperCAmelCase__ : List[str] = (
radiation_density * (redshift + 1) ** 4
+ matter_density * (redshift + 1) ** 3
+ curvature * (redshift + 1) ** 2
+ dark_energy
)
UpperCAmelCase__ : Any = hubble_constant * e_a ** (1 / 2)
return hubble
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# demo LCDM approximation
UpperCamelCase__ = 0.3
print(
hubble_parameter(
hubble_constant=68.3,
radiation_density=1e-4,
matter_density=matter_density,
dark_energy=1 - matter_density,
redshift=0,
)
)
| 299
| 1
|
"""simple docstring"""
from __future__ import annotations
__A : Union[str, Any] = list[list[int]]
# assigning initial values to the grid
__A : Matrix = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
__A : Matrix = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def lowercase ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def lowercase ( _SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def lowercase ( _SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
if location := find_empty_location(__lowerCamelCase ):
_UpperCAmelCase , _UpperCAmelCase = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 10 ):
if is_safe(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
_UpperCAmelCase = digit
if sudoku(__lowerCamelCase ) is not None:
return grid
_UpperCAmelCase = 0
return None
def lowercase ( _SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
for row in grid:
for cell in row:
print(__lowerCamelCase , end=''' ''' )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print("\nExample grid:\n" + "=" * 20)
print_solution(example_grid)
print("\nExample grid solution:")
__A : Any = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print("Cannot find a solution.")
| 260
|
from __future__ import annotations
def __A ( __lowerCamelCase , __lowerCamelCase ) -> float:
a = sorted(numsa + numsa )
a , a = divmod(len(__lowerCamelCase ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCamelCase : Tuple = [float(x) for x in input("Enter the elements of first array: ").split()]
__UpperCamelCase : List[Any] = [float(x) for x in input("Enter the elements of second array: ").split()]
print(F'The median of two arrays is: {median_of_two_arrays(array_a, array_a)}')
| 228
| 0
|
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : str ) -> list:
"""simple docstring"""
if n_term == "":
return []
UpperCamelCase :str = []
for temp in range(int(__magic_name__ ) ):
series.append(f"""1/{temp + 1}""" if series else """1""" )
return series
if __name__ == "__main__":
UpperCAmelCase_ : str = input('''Enter the last number (nth term) of the Harmonic Series''')
print('''Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n''')
print(harmonic_series(nth_term))
| 354
|
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all image processors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...image_processing_utils import ImageProcessingMixin
from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
UpperCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
UpperCAmelCase_ : Optional[int] = OrderedDict(
[
('''align''', '''EfficientNetImageProcessor'''),
('''beit''', '''BeitImageProcessor'''),
('''bit''', '''BitImageProcessor'''),
('''blip''', '''BlipImageProcessor'''),
('''blip-2''', '''BlipImageProcessor'''),
('''bridgetower''', '''BridgeTowerImageProcessor'''),
('''chinese_clip''', '''ChineseCLIPImageProcessor'''),
('''clip''', '''CLIPImageProcessor'''),
('''clipseg''', '''ViTImageProcessor'''),
('''conditional_detr''', '''ConditionalDetrImageProcessor'''),
('''convnext''', '''ConvNextImageProcessor'''),
('''convnextv2''', '''ConvNextImageProcessor'''),
('''cvt''', '''ConvNextImageProcessor'''),
('''data2vec-vision''', '''BeitImageProcessor'''),
('''deformable_detr''', '''DeformableDetrImageProcessor'''),
('''deit''', '''DeiTImageProcessor'''),
('''deta''', '''DetaImageProcessor'''),
('''detr''', '''DetrImageProcessor'''),
('''dinat''', '''ViTImageProcessor'''),
('''donut-swin''', '''DonutImageProcessor'''),
('''dpt''', '''DPTImageProcessor'''),
('''efficientformer''', '''EfficientFormerImageProcessor'''),
('''efficientnet''', '''EfficientNetImageProcessor'''),
('''flava''', '''FlavaImageProcessor'''),
('''focalnet''', '''BitImageProcessor'''),
('''git''', '''CLIPImageProcessor'''),
('''glpn''', '''GLPNImageProcessor'''),
('''groupvit''', '''CLIPImageProcessor'''),
('''imagegpt''', '''ImageGPTImageProcessor'''),
('''instructblip''', '''BlipImageProcessor'''),
('''layoutlmv2''', '''LayoutLMv2ImageProcessor'''),
('''layoutlmv3''', '''LayoutLMv3ImageProcessor'''),
('''levit''', '''LevitImageProcessor'''),
('''mask2former''', '''Mask2FormerImageProcessor'''),
('''maskformer''', '''MaskFormerImageProcessor'''),
('''mgp-str''', '''ViTImageProcessor'''),
('''mobilenet_v1''', '''MobileNetV1ImageProcessor'''),
('''mobilenet_v2''', '''MobileNetV2ImageProcessor'''),
('''mobilevit''', '''MobileViTImageProcessor'''),
('''mobilevit''', '''MobileViTImageProcessor'''),
('''mobilevitv2''', '''MobileViTImageProcessor'''),
('''nat''', '''ViTImageProcessor'''),
('''oneformer''', '''OneFormerImageProcessor'''),
('''owlvit''', '''OwlViTImageProcessor'''),
('''perceiver''', '''PerceiverImageProcessor'''),
('''pix2struct''', '''Pix2StructImageProcessor'''),
('''poolformer''', '''PoolFormerImageProcessor'''),
('''regnet''', '''ConvNextImageProcessor'''),
('''resnet''', '''ConvNextImageProcessor'''),
('''sam''', '''SamImageProcessor'''),
('''segformer''', '''SegformerImageProcessor'''),
('''swiftformer''', '''ViTImageProcessor'''),
('''swin''', '''ViTImageProcessor'''),
('''swin2sr''', '''Swin2SRImageProcessor'''),
('''swinv2''', '''ViTImageProcessor'''),
('''table-transformer''', '''DetrImageProcessor'''),
('''timesformer''', '''VideoMAEImageProcessor'''),
('''tvlt''', '''TvltImageProcessor'''),
('''upernet''', '''SegformerImageProcessor'''),
('''van''', '''ConvNextImageProcessor'''),
('''videomae''', '''VideoMAEImageProcessor'''),
('''vilt''', '''ViltImageProcessor'''),
('''vit''', '''ViTImageProcessor'''),
('''vit_hybrid''', '''ViTHybridImageProcessor'''),
('''vit_mae''', '''ViTImageProcessor'''),
('''vit_msn''', '''ViTImageProcessor'''),
('''xclip''', '''CLIPImageProcessor'''),
('''yolos''', '''YolosImageProcessor'''),
]
)
UpperCAmelCase_ : int = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES)
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : str ) -> Tuple:
"""simple docstring"""
for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items():
if class_name in extractors:
UpperCamelCase :List[Any] = model_type_to_module_name(__magic_name__ )
UpperCamelCase :Union[str, Any] = importlib.import_module(f""".{module_name}""" , """transformers.models""" )
try:
return getattr(__magic_name__ , __magic_name__ )
except AttributeError:
continue
for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items():
if getattr(__magic_name__ , """__name__""" , __magic_name__ ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
UpperCamelCase :List[str] = importlib.import_module("""transformers""" )
if hasattr(__magic_name__ , __magic_name__ ):
return getattr(__magic_name__ , __magic_name__ )
return None
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Union[str, os.PathLike] , __magic_name__ : Optional[Union[str, os.PathLike]] = None , __magic_name__ : bool = False , __magic_name__ : bool = False , __magic_name__ : Optional[Dict[str, str]] = None , __magic_name__ : Optional[Union[bool, str]] = None , __magic_name__ : Optional[str] = None , __magic_name__ : bool = False , **__magic_name__ : Any , ) -> Dict:
"""simple docstring"""
UpperCamelCase :Dict = get_file_from_repo(
__magic_name__ , __magic_name__ , cache_dir=__magic_name__ , force_download=__magic_name__ , resume_download=__magic_name__ , proxies=__magic_name__ , use_auth_token=__magic_name__ , revision=__magic_name__ , local_files_only=__magic_name__ , )
if resolved_config_file is None:
logger.info(
"""Could not locate the image processor configuration file, will try to use the model config instead.""" )
return {}
with open(__magic_name__ , encoding="""utf-8""" ) as reader:
return json.load(__magic_name__ )
class _SCREAMING_SNAKE_CASE :
def __init__( self : Any ):
raise EnvironmentError(
"""AutoImageProcessor is designed to be instantiated """
"""using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method.""" )
@classmethod
@replace_list_option_in_docstrings(__lowerCamelCase )
def _A ( cls : List[str] , __lowerCamelCase : List[Any] , **__lowerCamelCase : int ):
UpperCamelCase :Optional[Any] = kwargs.pop("""config""" , __lowerCamelCase )
UpperCamelCase :Union[str, Any] = kwargs.pop("""trust_remote_code""" , __lowerCamelCase )
UpperCamelCase :Any = True
UpperCamelCase , UpperCamelCase :int = ImageProcessingMixin.get_image_processor_dict(__lowerCamelCase , **__lowerCamelCase )
UpperCamelCase :Union[str, Any] = config_dict.get("""image_processor_type""" , __lowerCamelCase )
UpperCamelCase :int = None
if "AutoImageProcessor" in config_dict.get("""auto_map""" , {} ):
UpperCamelCase :Optional[Any] = config_dict["""auto_map"""]["""AutoImageProcessor"""]
# If we still don't have the image processor class, check if we're loading from a previous feature extractor config
# and if so, infer the image processor class from there.
if image_processor_class is None and image_processor_auto_map is None:
UpperCamelCase :Optional[int] = config_dict.pop("""feature_extractor_type""" , __lowerCamelCase )
if feature_extractor_class is not None:
logger.warning(
"""Could not find image processor class in the image processor config or the model config. Loading"""
""" based on pattern matching with the model's feature extractor configuration.""" )
UpperCamelCase :str = feature_extractor_class.replace("""FeatureExtractor""" , """ImageProcessor""" )
if "AutoFeatureExtractor" in config_dict.get("""auto_map""" , {} ):
UpperCamelCase :Any = config_dict["""auto_map"""]["""AutoFeatureExtractor"""]
UpperCamelCase :Dict = feature_extractor_auto_map.replace("""FeatureExtractor""" , """ImageProcessor""" )
logger.warning(
"""Could not find image processor auto map in the image processor config or the model config."""
""" Loading based on pattern matching with the model's feature extractor configuration.""" )
# If we don't find the image processor class in the image processor config, let's try the model config.
if image_processor_class is None and image_processor_auto_map is None:
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
UpperCamelCase :str = AutoConfig.from_pretrained(__lowerCamelCase , **__lowerCamelCase )
# It could be in `config.image_processor_type``
UpperCamelCase :Optional[Any] = getattr(__lowerCamelCase , """image_processor_type""" , __lowerCamelCase )
if hasattr(__lowerCamelCase , """auto_map""" ) and "AutoImageProcessor" in config.auto_map:
UpperCamelCase :Any = config.auto_map["""AutoImageProcessor"""]
if image_processor_class is not None:
UpperCamelCase :Tuple = image_processor_class_from_name(__lowerCamelCase )
UpperCamelCase :List[Any] = image_processor_auto_map is not None
UpperCamelCase :Any = image_processor_class is not None or type(__lowerCamelCase ) in IMAGE_PROCESSOR_MAPPING
UpperCamelCase :Optional[int] = resolve_trust_remote_code(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
if has_remote_code and trust_remote_code:
UpperCamelCase :Optional[int] = get_class_from_dynamic_module(
__lowerCamelCase , __lowerCamelCase , **__lowerCamelCase )
UpperCamelCase :int = kwargs.pop("""code_revision""" , __lowerCamelCase )
if os.path.isdir(__lowerCamelCase ):
image_processor_class.register_for_auto_class()
return image_processor_class.from_dict(__lowerCamelCase , **__lowerCamelCase )
elif image_processor_class is not None:
return image_processor_class.from_dict(__lowerCamelCase , **__lowerCamelCase )
# Last try: we use the IMAGE_PROCESSOR_MAPPING.
elif type(__lowerCamelCase ) in IMAGE_PROCESSOR_MAPPING:
UpperCamelCase :int = IMAGE_PROCESSOR_MAPPING[type(__lowerCamelCase )]
return image_processor_class.from_dict(__lowerCamelCase , **__lowerCamelCase )
raise ValueError(
F"""Unrecognized image processor in {pretrained_model_name_or_path}. Should have a """
F"""`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following """
F"""`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}""" )
@staticmethod
def _A ( __lowerCamelCase : int , __lowerCamelCase : Union[str, Any] ):
IMAGE_PROCESSOR_MAPPING.register(__lowerCamelCase , __lowerCamelCase )
| 62
| 0
|
'''simple docstring'''
import re
from typing import Callable, List, Optional, Union
import tensorflow as tf
try:
from tensorflow.keras.optimizers.legacy import Adam
except ImportError:
from tensorflow.keras.optimizers import Adam
class lowercase ( tf.keras.optimizers.schedules.LearningRateSchedule ):
"""simple docstring"""
def __init__( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = 1.0 , UpperCamelCase_ = None , ):
'''simple docstring'''
super().__init__()
UpperCamelCase__ :Dict = initial_learning_rate
UpperCamelCase__ :Optional[int] = warmup_steps
UpperCamelCase__ :str = power
UpperCamelCase__ :Dict = decay_schedule_fn
UpperCamelCase__ :List[Any] = name
def __call__( self , UpperCamelCase_ ):
'''simple docstring'''
with tf.name_scope(self.name or '''WarmUp''' ) as name:
# Implements polynomial warmup. i.e., if global_step < warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
UpperCamelCase__ :int = tf.cast(UpperCamelCase_ , tf.floataa )
UpperCamelCase__ :int = tf.cast(self.warmup_steps , tf.floataa )
UpperCamelCase__ :Any = global_step_float / warmup_steps_float
UpperCamelCase__ :Union[str, Any] = self.initial_learning_rate * tf.math.pow(UpperCamelCase_ , self.power )
return tf.cond(
global_step_float < warmup_steps_float , lambda: warmup_learning_rate , lambda: self.decay_schedule_fn(step - self.warmup_steps ) , name=UpperCamelCase_ , )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_schedule_fn": self.decay_schedule_fn,
"warmup_steps": self.warmup_steps,
"power": self.power,
"name": self.name,
}
def a ( __a , __a , __a , __a = 0.0 , __a = 0.9 , __a = 0.9_9_9 , __a = 1e-8 , __a = None , __a = None , __a = 0.0 , __a = 1.0 , __a = None , ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=__a , decay_steps=num_train_steps - num_warmup_steps , end_learning_rate=init_lr * min_lr_ratio , power=__a , )
if num_warmup_steps:
UpperCamelCase__ :int = WarmUp(
initial_learning_rate=__a , decay_schedule_fn=__a , warmup_steps=__a , )
if weight_decay_rate > 0.0:
UpperCamelCase__ :int = AdamWeightDecay(
learning_rate=__a , weight_decay_rate=__a , beta_a=__a , beta_a=__a , epsilon=__a , clipnorm=__a , global_clipnorm=__a , exclude_from_weight_decay=['''LayerNorm''', '''layer_norm''', '''bias'''] , include_in_weight_decay=__a , )
else:
UpperCamelCase__ :Optional[int] = tf.keras.optimizers.Adam(
learning_rate=__a , beta_a=__a , beta_a=__a , epsilon=__a , clipnorm=__a , global_clipnorm=__a , )
# We return the optimizer and the LR scheduler in order to better track the
# evolution of the LR independently of the optimizer.
return optimizer, lr_schedule
class lowercase ( A__ ):
"""simple docstring"""
def __init__( self , UpperCamelCase_ = 0.001 , UpperCamelCase_ = 0.9 , UpperCamelCase_ = 0.999 , UpperCamelCase_ = 1e-7 , UpperCamelCase_ = False , UpperCamelCase_ = 0.0 , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = "AdamWeightDecay" , **UpperCamelCase_ , ):
'''simple docstring'''
super().__init__(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ )
UpperCamelCase__ :Any = weight_decay_rate
UpperCamelCase__ :Union[str, Any] = include_in_weight_decay
UpperCamelCase__ :Optional[int] = exclude_from_weight_decay
@classmethod
def lowerCAmelCase__ ( cls , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :Tuple = {'''WarmUp''': WarmUp}
return super(UpperCamelCase_ , cls ).from_config(UpperCamelCase_ , custom_objects=UpperCamelCase_ )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
super(UpperCamelCase_ , self )._prepare_local(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
UpperCamelCase__ :List[str] = tf.constant(
self.weight_decay_rate , name='''adam_weight_decay_rate''' )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :Optional[int] = self._do_use_weight_decay(var.name )
if do_decay:
return var.assign_sub(
learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]['''weight_decay_rate'''] , use_locking=self._use_locking , )
return tf.no_op()
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_=None , **UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ :List[str] = list(zip(*UpperCamelCase_ ) )
return super(UpperCamelCase_ , self ).apply_gradients(zip(UpperCamelCase_ , UpperCamelCase_ ) , name=UpperCamelCase_ , **UpperCamelCase_ )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
if apply_state is None:
return self._decayed_lr_t[var_dtype], {}
UpperCamelCase__ :Dict = apply_state or {}
UpperCamelCase__ :str = apply_state.get((var_device, var_dtype) )
if coefficients is None:
UpperCamelCase__ :List[str] = self._fallback_apply_state(UpperCamelCase_ , UpperCamelCase_ )
UpperCamelCase__ :List[Any] = coefficients
return coefficients["lr_t"], {"apply_state": apply_state}
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=None ):
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ :Union[str, Any] = self._get_lr(var.device , var.dtype.base_dtype , UpperCamelCase_ )
UpperCamelCase__ :Dict = self._decay_weights_op(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
with tf.control_dependencies([decay] ):
return super(UpperCamelCase_ , self )._resource_apply_dense(UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=None ):
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ :Union[str, Any] = self._get_lr(var.device , var.dtype.base_dtype , UpperCamelCase_ )
UpperCamelCase__ :Dict = self._decay_weights_op(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
with tf.control_dependencies([decay] ):
return super(UpperCamelCase_ , self )._resource_apply_sparse(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Any = super().get_config()
config.update({'''weight_decay_rate''': self.weight_decay_rate} )
return config
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
if self.weight_decay_rate == 0:
return False
if self._include_in_weight_decay:
for r in self._include_in_weight_decay:
if re.search(UpperCamelCase_ , UpperCamelCase_ ) is not None:
return True
if self._exclude_from_weight_decay:
for r in self._exclude_from_weight_decay:
if re.search(UpperCamelCase_ , UpperCamelCase_ ) is not None:
return False
return True
class lowercase ( A__ ):
"""simple docstring"""
def __init__( self ):
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = []
UpperCamelCase__ :Tuple = None
@property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
if self._accum_steps is None:
UpperCamelCase__ :List[str] = tf.Variable(
tf.constant(0 , dtype=tf.intaa ) , trainable=UpperCamelCase_ , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
return self._accum_steps.value()
@property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
if not self._gradients:
raise ValueError('''The accumulator should be called first to initialize the gradients''' )
return [gradient.value() if gradient is not None else gradient for gradient in self._gradients]
def __call__( self , UpperCamelCase_ ):
'''simple docstring'''
if not self._gradients:
UpperCamelCase__ :Tuple = self.step # Create the step variable.
self._gradients.extend(
[
tf.Variable(
tf.zeros_like(UpperCamelCase_ ) , trainable=UpperCamelCase_ , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
if gradient is not None
else gradient
for gradient in gradients
] )
if len(UpperCamelCase_ ) != len(self._gradients ):
raise ValueError(F'''Expected {len(self._gradients )} gradients, but got {len(UpperCamelCase_ )}''' )
for accum_gradient, gradient in zip(self._gradients , UpperCamelCase_ ):
if accum_gradient is not None and gradient is not None:
accum_gradient.assign_add(UpperCamelCase_ )
self._accum_steps.assign_add(1 )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
if not self._gradients:
return
self._accum_steps.assign(0 )
for gradient in self._gradients:
if gradient is not None:
gradient.assign(tf.zeros_like(UpperCamelCase_ ) )
| 97
|
from collections import defaultdict
from math import gcd
def __lowercase ( _SCREAMING_SNAKE_CASE = 1_50_00_00 ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = defaultdict(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = 2
while 2 * euclid_m * (euclid_m + 1) <= limit:
for euclid_n in range((euclid_m % 2) + 1 , _SCREAMING_SNAKE_CASE , 2 ):
if gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) > 1:
continue
SCREAMING_SNAKE_CASE = 2 * euclid_m * (euclid_m + euclid_n)
for perimeter in range(_SCREAMING_SNAKE_CASE , limit + 1 , _SCREAMING_SNAKE_CASE ):
frequencies[perimeter] += 1
euclid_m += 1
return sum(1 for frequency in frequencies.values() if frequency == 1 )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 296
| 0
|
"""simple docstring"""
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase ) -> int:
"""simple docstring"""
while second != 0:
lowerCAmelCase_ : Dict = first & second
first ^= second
lowerCAmelCase_ : Tuple = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase__ = int(input("""Enter the first number: """).strip())
lowercase__ = int(input("""Enter the second number: """).strip())
print(F"""{add(first, second) = }""")
| 161
|
"""simple docstring"""
def __lowerCamelCase ( __UpperCamelCase = 50 ) -> int:
"""simple docstring"""
lowerCAmelCase_ : int = [1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(F"""{solution() = }""")
| 161
| 1
|
'''simple docstring'''
import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase : List[str] = get_tests_dir('fixtures/spiece.model')
@require_sentencepiece
@require_tokenizers
class __lowerCAmelCase (lowercase_ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ : int = DebertaVaTokenizer
lowerCAmelCase__ : List[Any] = DebertaVaTokenizerFast
lowerCAmelCase__ : str = True
lowerCAmelCase__ : Tuple = True
def UpperCamelCase__ (self : Tuple ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
lowercase__ = DebertaVaTokenizer(UpperCamelCase , unk_token='''<unk>''' )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase__ (self : Union[str, Any] , UpperCamelCase : str ):
'''simple docstring'''
lowercase__ = '''this is a test'''
lowercase__ = '''this is a test'''
return input_text, output_text
def UpperCamelCase__ (self : Optional[int] ):
'''simple docstring'''
lowercase__ = '''<pad>'''
lowercase__ = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase ) , UpperCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase ) , UpperCamelCase )
def UpperCamelCase__ (self : Dict ):
'''simple docstring'''
lowercase__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<pad>''' )
self.assertEqual(vocab_keys[1] , '''<unk>''' )
self.assertEqual(vocab_keys[-1] , '''[PAD]''' )
self.assertEqual(len(UpperCamelCase ) , 30001 )
def UpperCamelCase__ (self : int ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 30000 )
def UpperCamelCase__ (self : Optional[Any] ):
'''simple docstring'''
lowercase__ = ''' \tHeLLo!how \n Are yoU? '''
lowercase__ = ['''▁hello''', '''!''', '''how''', '''▁are''', '''▁you''', '''?''']
# fmt: on
lowercase__ = DebertaVaTokenizer(UpperCamelCase , do_lower_case=UpperCamelCase )
lowercase__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = DebertaVaTokenizerFast(UpperCamelCase , do_lower_case=UpperCamelCase )
lowercase__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
@unittest.skip('''There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.''' )
def UpperCamelCase__ (self : List[Any] ):
'''simple docstring'''
pass
@unittest.skip('''There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.''' )
def UpperCamelCase__ (self : List[str] ):
'''simple docstring'''
pass
def UpperCamelCase__ (self : Optional[int] ):
'''simple docstring'''
lowercase__ = '''I was born in 92000, and this is falsé.'''
lowercase__ = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ]
# fmt: on
lowercase__ = DebertaVaTokenizer(UpperCamelCase , split_by_punct=UpperCamelCase )
lowercase__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = DebertaVaTokenizerFast(UpperCamelCase , split_by_punct=UpperCamelCase )
lowercase__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
def UpperCamelCase__ (self : int ):
'''simple docstring'''
lowercase__ = '''I was born in 92000, and this is falsé.'''
lowercase__ = ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ]
# fmt: on
lowercase__ = DebertaVaTokenizer(UpperCamelCase , do_lower_case=UpperCamelCase , split_by_punct=UpperCamelCase )
lowercase__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = DebertaVaTokenizerFast(UpperCamelCase , do_lower_case=UpperCamelCase , split_by_punct=UpperCamelCase )
lowercase__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
def UpperCamelCase__ (self : Optional[Any] ):
'''simple docstring'''
lowercase__ = '''I was born in 92000, and this is falsé.'''
lowercase__ = ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''.''', ]
# fmt: on
lowercase__ = DebertaVaTokenizer(UpperCamelCase , do_lower_case=UpperCamelCase , split_by_punct=UpperCamelCase )
lowercase__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = DebertaVaTokenizerFast(UpperCamelCase , do_lower_case=UpperCamelCase , split_by_punct=UpperCamelCase )
lowercase__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
def UpperCamelCase__ (self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = '''I was born in 92000, and this is falsé.'''
lowercase__ = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ]
# fmt: on
lowercase__ = DebertaVaTokenizer(UpperCamelCase , do_lower_case=UpperCamelCase , split_by_punct=UpperCamelCase )
lowercase__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = DebertaVaTokenizerFast(UpperCamelCase , do_lower_case=UpperCamelCase , split_by_punct=UpperCamelCase )
lowercase__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
def UpperCamelCase__ (self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = ''' \tHeLLo!how \n Are yoU? '''
lowercase__ = ['''▁''', '''<unk>''', '''e''', '''<unk>''', '''o''', '''!''', '''how''', '''▁''', '''<unk>''', '''re''', '''▁yo''', '''<unk>''', '''?''']
# fmt: on
lowercase__ = DebertaVaTokenizer(UpperCamelCase , do_lower_case=UpperCamelCase , split_by_punct=UpperCamelCase )
lowercase__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = DebertaVaTokenizerFast(UpperCamelCase , do_lower_case=UpperCamelCase , split_by_punct=UpperCamelCase )
lowercase__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
def UpperCamelCase__ (self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = self.get_tokenizer()
lowercase__ = self.get_rust_tokenizer()
lowercase__ = '''I was born in 92000, and this is falsé.'''
lowercase__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) )
lowercase__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase )
lowercase__ = rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = self.get_rust_tokenizer()
lowercase__ = tokenizer.encode(UpperCamelCase )
lowercase__ = rust_tokenizer.encode(UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
def UpperCamelCase__ (self : int ):
'''simple docstring'''
lowercase__ = '''This is a test'''
lowercase__ = [13, 1, 4398, 25, 21, 1289]
lowercase__ = ['''▁''', '''T''', '''his''', '''▁is''', '''▁a''', '''▁test''']
lowercase__ = ['''▁''', '''<unk>''', '''his''', '''▁is''', '''▁a''', '''▁test''']
lowercase__ = DebertaVaTokenizer(UpperCamelCase , keep_accents=UpperCamelCase )
lowercase__ = DebertaVaTokenizerFast(UpperCamelCase , keep_accents=UpperCamelCase )
lowercase__ = tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = tokenizer.tokenize(UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = tokenizer.convert_ids_to_tokens(UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = rust_tokenizer.tokenize(UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = rust_tokenizer.convert_ids_to_tokens(UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
# fmt: off
lowercase__ = '''I was born in 92000, and this is falsé.'''
lowercase__ = [13, 1, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9]
lowercase__ = ['''▁''', '''I''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''é''', '''.''', ]
lowercase__ = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''.''', ]
# fmt: on
lowercase__ = tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = tokenizer.tokenize(UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = tokenizer.convert_ids_to_tokens(UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = rust_tokenizer.tokenize(UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = rust_tokenizer.convert_ids_to_tokens(UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
def UpperCamelCase__ (self : int ):
'''simple docstring'''
lowercase__ = DebertaVaTokenizer(UpperCamelCase )
lowercase__ = tokenizer.encode('''sequence builders''' )
lowercase__ = tokenizer.encode('''multi-sequence build''' )
lowercase__ = tokenizer.build_inputs_with_special_tokens(UpperCamelCase )
lowercase__ = tokenizer.build_inputs_with_special_tokens(UpperCamelCase , UpperCamelCase )
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , UpperCamelCase )
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , UpperCamelCase , )
@slow
def UpperCamelCase__ (self : int ):
'''simple docstring'''
lowercase__ = {'''input_ids''': [[1, 39867, 36, 19390, 486, 27, 35052, 81436, 18, 60685, 1225, 7, 35052, 81436, 18, 9367, 16899, 18, 15937, 53, 594, 773, 18, 16287, 30465, 36, 15937, 6, 41139, 38, 36979, 60763, 191, 6, 34132, 99, 6, 50538, 390, 43230, 6, 34132, 2779, 20850, 14, 699, 1072, 1194, 36, 382, 10901, 53, 7, 699, 1072, 2084, 36, 20422, 630, 53, 19, 105, 3049, 1896, 1053, 16899, 1506, 11, 37978, 4243, 7, 1237, 31869, 200, 16566, 654, 6, 35052, 81436, 7, 55630, 13593, 4, 2], [1, 26, 15011, 13, 667, 8, 1053, 18, 23611, 1237, 72356, 12820, 34, 104134, 1209, 35, 13313, 6627, 21, 202, 347, 7, 164, 2399, 11, 46, 4485, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1232, 2864, 15785, 14951, 105, 5, 8581, 1250, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCamelCase , model_name='''microsoft/deberta-v2-xlarge''' , revision='''ad6e42c1532ddf3a15c39246b63f5559d558b670''' , )
| 2
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : int = logging.get_logger(__name__)
lowerCamelCase : List[Any] = {
'google/realm-cc-news-pretrained-embedder': (
'https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-encoder': (
'https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-scorer': (
'https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-openqa': (
'https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json'
),
'google/realm-orqa-nq-openqa': 'https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json',
'google/realm-orqa-nq-reader': 'https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json',
'google/realm-orqa-wq-openqa': 'https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json',
'google/realm-orqa-wq-reader': 'https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json',
# See all REALM models at https://huggingface.co/models?filter=realm
}
class __lowerCAmelCase (lowercase_ ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = """realm"""
def __init__(self : str , UpperCamelCase : List[Any]=30522 , UpperCamelCase : List[Any]=768 , UpperCamelCase : int=128 , UpperCamelCase : Any=12 , UpperCamelCase : Tuple=12 , UpperCamelCase : List[Any]=8 , UpperCamelCase : Union[str, Any]=3072 , UpperCamelCase : List[str]="gelu_new" , UpperCamelCase : Any=0.1 , UpperCamelCase : List[str]=0.1 , UpperCamelCase : Dict=512 , UpperCamelCase : Dict=2 , UpperCamelCase : List[Any]=0.02 , UpperCamelCase : List[Any]=1E-12 , UpperCamelCase : Dict=256 , UpperCamelCase : Union[str, Any]=10 , UpperCamelCase : Optional[int]=1E-3 , UpperCamelCase : Tuple=5 , UpperCamelCase : Optional[int]=320 , UpperCamelCase : List[str]=13353718 , UpperCamelCase : Optional[Any]=5000 , UpperCamelCase : str=1 , UpperCamelCase : Union[str, Any]=0 , UpperCamelCase : List[Any]=2 , **UpperCamelCase : int , ):
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase , bos_token_id=UpperCamelCase , eos_token_id=UpperCamelCase , **UpperCamelCase )
# Common config
lowercase__ = vocab_size
lowercase__ = max_position_embeddings
lowercase__ = hidden_size
lowercase__ = retriever_proj_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = num_candidates
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = initializer_range
lowercase__ = type_vocab_size
lowercase__ = layer_norm_eps
# Reader config
lowercase__ = span_hidden_size
lowercase__ = max_span_width
lowercase__ = reader_layer_norm_eps
lowercase__ = reader_beam_size
lowercase__ = reader_seq_len
# Retrieval config
lowercase__ = num_block_records
lowercase__ = searcher_beam_size
| 2
| 1
|
import numpy as np
import pandas as pd
from sklearn.preprocessing import Normalizer
from sklearn.svm import SVR
from statsmodels.tsa.statespace.sarimax import SARIMAX
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> Optional[int]:
UpperCamelCase_ = np.array([[1, item, train_mtch[i]] for i, item in enumerate(A__ )] )
UpperCamelCase_ = np.array(A__ )
UpperCamelCase_ = np.dot(np.dot(np.linalg.inv(np.dot(x.transpose() , A__ ) ) , x.transpose() ) , A__ )
return abs(beta[0] + test_dt[0] * beta[1] + test_mtch[0] + beta[2] )
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> List[Any]:
UpperCamelCase_ = (1, 2, 1)
UpperCamelCase_ = (1, 1, 0, 7)
UpperCamelCase_ = SARIMAX(
A__ , exog=A__ , order=A__ , seasonal_order=A__ )
UpperCamelCase_ = model.fit(disp=A__ , maxiter=600 , method="nm" )
UpperCamelCase_ = model_fit.predict(1 , len(A__ ) , exog=[test_match] )
return result[0]
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> Tuple:
UpperCamelCase_ = SVR(kernel="rbf" , C=1 , gamma=0.1 , epsilon=0.1 )
regressor.fit(A__ , A__ )
UpperCamelCase_ = regressor.predict(A__ )
return y_pred[0]
def lowerCAmelCase_ ( UpperCamelCase_ ) -> Optional[int]:
train_user.sort()
UpperCamelCase_ = np.percentile(A__ , 25 )
UpperCamelCase_ = np.percentile(A__ , 75 )
UpperCamelCase_ = qa - qa
UpperCamelCase_ = qa - (iqr * 0.1)
return low_lim
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ ) -> Tuple:
UpperCamelCase_ = 0
UpperCamelCase_ = 0
for i in list_vote:
if i > actual_result:
UpperCamelCase_ = not_safe + 1
else:
if abs(abs(A__ ) - abs(A__ ) ) <= 0.1:
safe += 1
else:
not_safe += 1
return safe > not_safe
if __name__ == "__main__":
# data_input_df = pd.read_csv("ex_data.csv", header=None)
_UpperCAmelCase = [[1_8_2_3_1, 0.0, 1], [2_2_6_2_1, 1.0, 2], [1_5_6_7_5, 0.0, 3], [2_3_5_8_3, 1.0, 4]]
_UpperCAmelCase = pd.DataFrame(
data_input, columns=['total_user', 'total_even', 'days']
)
_UpperCAmelCase = Normalizer().fit_transform(data_input_df.values)
# split data
_UpperCAmelCase = normalize_df[:, 2].tolist()
_UpperCAmelCase = normalize_df[:, 0].tolist()
_UpperCAmelCase = normalize_df[:, 1].tolist()
# for svr (input variable = total date and total match)
_UpperCAmelCase = normalize_df[:, [1, 2]].tolist()
_UpperCAmelCase = x[: len(x) - 1]
_UpperCAmelCase = x[len(x) - 1 :]
# for linear regression & sarimax
_UpperCAmelCase = total_date[: len(total_date) - 1]
_UpperCAmelCase = total_user[: len(total_user) - 1]
_UpperCAmelCase = total_match[: len(total_match) - 1]
_UpperCAmelCase = total_date[len(total_date) - 1 :]
_UpperCAmelCase = total_user[len(total_user) - 1 :]
_UpperCAmelCase = total_match[len(total_match) - 1 :]
# voting system with forecasting
_UpperCAmelCase = [
linear_regression_prediction(
trn_date, trn_user, trn_match, tst_date, tst_match
),
sarimax_predictor(trn_user, trn_match, tst_match),
support_vector_regressor(x_train, x_test, trn_user),
]
# check the safety of today's data
_UpperCAmelCase = "" if data_safety_checker(res_vote, tst_user) else "not "
print('Today\'s data is {not_str}safe.')
| 351
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
'alibaba-damo/mgp-str-base': 'https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json',
}
class _UpperCamelCase ( lowerCAmelCase_ ):
_UpperCamelCase : str = '''mgp-str'''
def __init__( self: Optional[Any] , _SCREAMING_SNAKE_CASE: Optional[int]=[32, 128] , _SCREAMING_SNAKE_CASE: Tuple=4 , _SCREAMING_SNAKE_CASE: Optional[Any]=3 , _SCREAMING_SNAKE_CASE: Optional[int]=27 , _SCREAMING_SNAKE_CASE: Tuple=38 , _SCREAMING_SNAKE_CASE: Tuple=50257 , _SCREAMING_SNAKE_CASE: List[Any]=30522 , _SCREAMING_SNAKE_CASE: Optional[Any]=768 , _SCREAMING_SNAKE_CASE: Dict=12 , _SCREAMING_SNAKE_CASE: List[str]=12 , _SCREAMING_SNAKE_CASE: Dict=4.0 , _SCREAMING_SNAKE_CASE: int=True , _SCREAMING_SNAKE_CASE: Tuple=False , _SCREAMING_SNAKE_CASE: Tuple=1e-5 , _SCREAMING_SNAKE_CASE: Optional[Any]=0.0 , _SCREAMING_SNAKE_CASE: Tuple=0.0 , _SCREAMING_SNAKE_CASE: List[Any]=0.0 , _SCREAMING_SNAKE_CASE: List[str]=False , _SCREAMING_SNAKE_CASE: int=0.02 , **_SCREAMING_SNAKE_CASE: Any , ) -> str:
"""simple docstring"""
super().__init__(**_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = image_size
UpperCamelCase_ = patch_size
UpperCamelCase_ = num_channels
UpperCamelCase_ = max_token_length
UpperCamelCase_ = num_character_labels
UpperCamelCase_ = num_bpe_labels
UpperCamelCase_ = num_wordpiece_labels
UpperCamelCase_ = hidden_size
UpperCamelCase_ = num_hidden_layers
UpperCamelCase_ = num_attention_heads
UpperCamelCase_ = mlp_ratio
UpperCamelCase_ = distilled
UpperCamelCase_ = layer_norm_eps
UpperCamelCase_ = drop_rate
UpperCamelCase_ = qkv_bias
UpperCamelCase_ = attn_drop_rate
UpperCamelCase_ = drop_path_rate
UpperCamelCase_ = output_aa_attentions
UpperCamelCase_ = initializer_range
| 328
| 0
|
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import numpy as np
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForMaskedImageModeling,
HfArgumentParser,
Trainer,
TrainingArguments,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
__UpperCAmelCase = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt")
__UpperCAmelCase = list(MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING.keys())
__UpperCAmelCase = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class UpperCamelCase__ :
"""simple docstring"""
UpperCAmelCase_ =field(
default="cifar10" , metadata={"help": "Name of a dataset from the datasets package"} )
UpperCAmelCase_ =field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
UpperCAmelCase_ =field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "The column name of the images in the files. If not set, will try to use 'image' or 'img'."} , )
UpperCAmelCase_ =field(default=__SCREAMING_SNAKE_CASE , metadata={"help": "A folder containing the training data."} )
UpperCAmelCase_ =field(default=__SCREAMING_SNAKE_CASE , metadata={"help": "A folder containing the validation data."} )
UpperCAmelCase_ =field(
default=0.15 , metadata={"help": "Percent to split off of train for validation."} )
UpperCAmelCase_ =field(default=32 , metadata={"help": "The size of the square patches to use for masking."} )
UpperCAmelCase_ =field(
default=0.6 , metadata={"help": "Percentage of patches to mask."} , )
UpperCAmelCase_ =field(
default=__SCREAMING_SNAKE_CASE , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
UpperCAmelCase_ =field(
default=__SCREAMING_SNAKE_CASE , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
def _UpperCamelCase ( self ) -> Tuple:
SCREAMING_SNAKE_CASE_ = {}
if self.train_dir is not None:
SCREAMING_SNAKE_CASE_ = self.train_dir
if self.validation_dir is not None:
SCREAMING_SNAKE_CASE_ = self.validation_dir
SCREAMING_SNAKE_CASE_ = data_files if data_files else None
@dataclass
class UpperCamelCase__ :
"""simple docstring"""
UpperCAmelCase_ =field(
default=__SCREAMING_SNAKE_CASE , metadata={
"help": (
"The model checkpoint for weights initialization. Can be a local path to a pytorch_model.bin or a "
"checkpoint identifier on the hub. "
"Don't set if you want to train a model from scratch."
)
} , )
UpperCAmelCase_ =field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(__SCREAMING_SNAKE_CASE )} , )
UpperCAmelCase_ =field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
UpperCAmelCase_ =field(
default=__SCREAMING_SNAKE_CASE , metadata={
"help": (
"Override some existing default config settings when a model is trained from scratch. Example: "
"n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"
)
} , )
UpperCAmelCase_ =field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "Where do you want to store (cache) the pretrained models/datasets downloaded from the hub"} , )
UpperCAmelCase_ =field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
UpperCAmelCase_ =field(default=__SCREAMING_SNAKE_CASE , metadata={"help": "Name or path of preprocessor config."} )
UpperCAmelCase_ =field(
default=__SCREAMING_SNAKE_CASE , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
UpperCAmelCase_ =field(
default=__SCREAMING_SNAKE_CASE , metadata={
"help": (
"The size (resolution) of each image. If not specified, will use `image_size` of the configuration."
)
} , )
UpperCAmelCase_ =field(
default=__SCREAMING_SNAKE_CASE , metadata={
"help": (
"The size (resolution) of each patch. If not specified, will use `patch_size` of the configuration."
)
} , )
UpperCAmelCase_ =field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "Stride to use for the encoder."} , )
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , _A=192 , _A=32 , _A=4 , _A=0.6 ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = input_size
SCREAMING_SNAKE_CASE_ = mask_patch_size
SCREAMING_SNAKE_CASE_ = model_patch_size
SCREAMING_SNAKE_CASE_ = mask_ratio
if self.input_size % self.mask_patch_size != 0:
raise ValueError('''Input size must be divisible by mask patch size''' )
if self.mask_patch_size % self.model_patch_size != 0:
raise ValueError('''Mask patch size must be divisible by model patch size''' )
SCREAMING_SNAKE_CASE_ = self.input_size // self.mask_patch_size
SCREAMING_SNAKE_CASE_ = self.mask_patch_size // self.model_patch_size
SCREAMING_SNAKE_CASE_ = self.rand_size**2
SCREAMING_SNAKE_CASE_ = int(np.ceil(self.token_count * self.mask_ratio ) )
def __call__( self ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = np.random.permutation(self.token_count )[: self.mask_count]
SCREAMING_SNAKE_CASE_ = np.zeros(self.token_count , dtype=_A )
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = mask.reshape((self.rand_size, self.rand_size) )
SCREAMING_SNAKE_CASE_ = mask.repeat(self.scale , axis=0 ).repeat(self.scale , axis=1 )
return torch.tensor(mask.flatten() )
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = torch.stack([example['''pixel_values'''] for example in examples] )
SCREAMING_SNAKE_CASE_ = torch.stack([example['''mask'''] for example in examples] )
return {"pixel_values": pixel_values, "bool_masked_pos": mask}
def A__ ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
SCREAMING_SNAKE_CASE_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_mim''', __lowerCamelCase, __lowerCamelCase )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', handlers=[logging.StreamHandler(sys.stdout )], )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_ = training_args.get_process_log_level()
logger.setLevel(__lowerCamelCase )
transformers.utils.logging.set_verbosity(__lowerCamelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
SCREAMING_SNAKE_CASE_ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
SCREAMING_SNAKE_CASE_ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Initialize our dataset.
SCREAMING_SNAKE_CASE_ = load_dataset(
data_args.dataset_name, data_args.dataset_config_name, data_files=data_args.data_files, cache_dir=model_args.cache_dir, use_auth_token=True if model_args.use_auth_token else None, )
# If we don't have a validation split, split off a percentage of train as validation.
SCREAMING_SNAKE_CASE_ = None if '''validation''' in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split, __lowerCamelCase ) and data_args.train_val_split > 0.0:
SCREAMING_SNAKE_CASE_ = ds['''train'''].train_test_split(data_args.train_val_split )
SCREAMING_SNAKE_CASE_ = split['''train''']
SCREAMING_SNAKE_CASE_ = split['''test''']
# Create config
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
SCREAMING_SNAKE_CASE_ = {
'''cache_dir''': model_args.cache_dir,
'''revision''': model_args.model_revision,
'''use_auth_token''': True if model_args.use_auth_token else None,
}
if model_args.config_name_or_path:
SCREAMING_SNAKE_CASE_ = AutoConfig.from_pretrained(model_args.config_name_or_path, **__lowerCamelCase )
elif model_args.model_name_or_path:
SCREAMING_SNAKE_CASE_ = AutoConfig.from_pretrained(model_args.model_name_or_path, **__lowerCamelCase )
else:
SCREAMING_SNAKE_CASE_ = CONFIG_MAPPING[model_args.model_type]()
logger.warning('''You are instantiating a new config instance from scratch.''' )
if model_args.config_overrides is not None:
logger.info(F'''Overriding config: {model_args.config_overrides}''' )
config.update_from_string(model_args.config_overrides )
logger.info(F'''New config: {config}''' )
# make sure the decoder_type is "simmim" (only relevant for BEiT)
if hasattr(__lowerCamelCase, '''decoder_type''' ):
SCREAMING_SNAKE_CASE_ = '''simmim'''
# adapt config
SCREAMING_SNAKE_CASE_ = model_args.image_size if model_args.image_size is not None else config.image_size
SCREAMING_SNAKE_CASE_ = model_args.patch_size if model_args.patch_size is not None else config.patch_size
SCREAMING_SNAKE_CASE_ = (
model_args.encoder_stride if model_args.encoder_stride is not None else config.encoder_stride
)
config.update(
{
'''image_size''': model_args.image_size,
'''patch_size''': model_args.patch_size,
'''encoder_stride''': model_args.encoder_stride,
} )
# create image processor
if model_args.image_processor_name:
SCREAMING_SNAKE_CASE_ = AutoImageProcessor.from_pretrained(model_args.image_processor_name, **__lowerCamelCase )
elif model_args.model_name_or_path:
SCREAMING_SNAKE_CASE_ = AutoImageProcessor.from_pretrained(model_args.model_name_or_path, **__lowerCamelCase )
else:
SCREAMING_SNAKE_CASE_ = {
conf.model_type: image_processor_class for conf, image_processor_class in IMAGE_PROCESSOR_MAPPING.items()
}
SCREAMING_SNAKE_CASE_ = IMAGE_PROCESSOR_TYPES[model_args.model_type]()
# create model
if model_args.model_name_or_path:
SCREAMING_SNAKE_CASE_ = AutoModelForMaskedImageModeling.from_pretrained(
model_args.model_name_or_path, from_tf=bool('''.ckpt''' in model_args.model_name_or_path ), config=__lowerCamelCase, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, )
else:
logger.info('''Training new model from scratch''' )
SCREAMING_SNAKE_CASE_ = AutoModelForMaskedImageModeling.from_config(__lowerCamelCase )
if training_args.do_train:
SCREAMING_SNAKE_CASE_ = ds['''train'''].column_names
else:
SCREAMING_SNAKE_CASE_ = ds['''validation'''].column_names
if data_args.image_column_name is not None:
SCREAMING_SNAKE_CASE_ = data_args.image_column_name
elif "image" in column_names:
SCREAMING_SNAKE_CASE_ = '''image'''
elif "img" in column_names:
SCREAMING_SNAKE_CASE_ = '''img'''
else:
SCREAMING_SNAKE_CASE_ = column_names[0]
# transformations as done in original SimMIM paper
# source: https://github.com/microsoft/SimMIM/blob/main/data/data_simmim.py
SCREAMING_SNAKE_CASE_ = Compose(
[
Lambda(lambda __lowerCamelCase : img.convert('''RGB''' ) if img.mode != "RGB" else img ),
RandomResizedCrop(model_args.image_size, scale=(0.67, 1.0), ratio=(3.0 / 4.0, 4.0 / 3.0) ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean, std=image_processor.image_std ),
] )
# create mask generator
SCREAMING_SNAKE_CASE_ = MaskGenerator(
input_size=model_args.image_size, mask_patch_size=data_args.mask_patch_size, model_patch_size=model_args.patch_size, mask_ratio=data_args.mask_ratio, )
def preprocess_images(__lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = [transforms(__lowerCamelCase ) for image in examples[image_column_name]]
SCREAMING_SNAKE_CASE_ = [mask_generator() for i in range(len(examples[image_column_name] ) )]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError('''--do_train requires a train dataset''' )
if data_args.max_train_samples is not None:
SCREAMING_SNAKE_CASE_ = ds['''train'''].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(__lowerCamelCase )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError('''--do_eval requires a validation dataset''' )
if data_args.max_eval_samples is not None:
SCREAMING_SNAKE_CASE_ = (
ds['''validation'''].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(__lowerCamelCase )
# Initialize our trainer
SCREAMING_SNAKE_CASE_ = Trainer(
model=__lowerCamelCase, args=__lowerCamelCase, train_dataset=ds['''train'''] if training_args.do_train else None, eval_dataset=ds['''validation'''] if training_args.do_eval else None, tokenizer=__lowerCamelCase, data_collator=__lowerCamelCase, )
# Training
if training_args.do_train:
SCREAMING_SNAKE_CASE_ = None
if training_args.resume_from_checkpoint is not None:
SCREAMING_SNAKE_CASE_ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
SCREAMING_SNAKE_CASE_ = last_checkpoint
SCREAMING_SNAKE_CASE_ = trainer.train(resume_from_checkpoint=__lowerCamelCase )
trainer.save_model()
trainer.log_metrics('''train''', train_result.metrics )
trainer.save_metrics('''train''', train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
SCREAMING_SNAKE_CASE_ = trainer.evaluate()
trainer.log_metrics('''eval''', __lowerCamelCase )
trainer.save_metrics('''eval''', __lowerCamelCase )
# Write model card and (optionally) push to hub
SCREAMING_SNAKE_CASE_ = {
'''finetuned_from''': model_args.model_name_or_path,
'''tasks''': '''masked-image-modeling''',
'''dataset''': data_args.dataset_name,
'''tags''': ['''masked-image-modeling'''],
}
if training_args.push_to_hub:
trainer.push_to_hub(**__lowerCamelCase )
else:
trainer.create_model_card(**__lowerCamelCase )
if __name__ == "__main__":
main()
| 299
|
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
"pipelines_utils",
"0.22.0",
"Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.",
standard_warn=False,
stacklevel=3,
)
| 299
| 1
|
"""simple docstring"""
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class __UpperCamelCase ( a__ ):
def __a ( self ) -> Optional[Any]:
a : List[str] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowerCAmelCase__ , "tf_padding" ) )
self.parent.assertTrue(hasattr(lowerCAmelCase__ , "depth_multiplier" ) )
class __UpperCamelCase :
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=13 , lowerCAmelCase__=3 , lowerCAmelCase__=32 , lowerCAmelCase__=0.25 , lowerCAmelCase__=8 , lowerCAmelCase__=True , lowerCAmelCase__=1024 , lowerCAmelCase__=32 , lowerCAmelCase__="relu6" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.02 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=10 , lowerCAmelCase__=None , ) -> List[Any]:
a : Any = parent
a : str = batch_size
a : int = num_channels
a : Union[str, Any] = image_size
a : List[Any] = depth_multiplier
a : Union[str, Any] = min_depth
a : List[str] = tf_padding
a : int = int(last_hidden_size * depth_multiplier )
a : Optional[int] = output_stride
a : List[Any] = hidden_act
a : List[Any] = classifier_dropout_prob
a : Union[str, Any] = use_labels
a : Optional[Any] = is_training
a : Optional[Any] = num_labels
a : Tuple = initializer_range
a : str = scope
def __a ( self ) -> str:
a : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a : Optional[Any] = None
a : Any = None
if self.use_labels:
a : Optional[Any] = ids_tensor([self.batch_size] , self.num_labels )
a : Optional[int] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
a : List[Any] = self.get_config()
return config, pixel_values, labels, pixel_labels
def __a ( self ) -> Optional[Any]:
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , min_depth=self.min_depth , tf_padding=self.tf_padding , hidden_act=self.hidden_act , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Dict:
a : Optional[Any] = MobileNetVaModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
a : List[str] = model(lowerCAmelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Any:
a : Dict = self.num_labels
a : List[Any] = MobileNetVaForImageClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
a : int = model(lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __a ( self ) -> Optional[int]:
a : List[Any] = self.prepare_config_and_inputs()
a, a, a, a : Dict = config_and_inputs
a : Tuple = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __UpperCamelCase ( a__ , a__ , unittest.TestCase ):
lowerCamelCase : Optional[Any] =(MobileNetVaModel, MobileNetVaForImageClassification) if is_torch_available() else ()
lowerCamelCase : Any =(
{"""feature-extraction""": MobileNetVaModel, """image-classification""": MobileNetVaForImageClassification}
if is_torch_available()
else {}
)
lowerCamelCase : Any =False
lowerCamelCase : int =False
lowerCamelCase : int =False
lowerCamelCase : Optional[int] =False
def __a ( self ) -> str:
a : Tuple = MobileNetVaModelTester(self )
a : Optional[Any] = MobileNetVaConfigTester(self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__ )
def __a ( self ) -> Optional[int]:
self.config_tester.run_common_tests()
@unittest.skip(reason="MobileNetV1 does not use inputs_embeds" )
def __a ( self ) -> Union[str, Any]:
pass
@unittest.skip(reason="MobileNetV1 does not support input and output embeddings" )
def __a ( self ) -> int:
pass
@unittest.skip(reason="MobileNetV1 does not output attentions" )
def __a ( self ) -> Union[str, Any]:
pass
def __a ( self ) -> Optional[int]:
a, a : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a : Dict = model_class(lowerCAmelCase__ )
a : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a : Optional[Any] = [*signature.parameters.keys()]
a : int = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCAmelCase__ )
def __a ( self ) -> Optional[int]:
a : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def __a ( self ) -> Union[str, Any]:
def check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
a : List[str] = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
with torch.no_grad():
a : Dict = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
a : Any = outputs.hidden_states
a : str = 26
self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ )
a, a : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a : Dict = True
check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
a : Any = True
check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def __a ( self ) -> Tuple:
a : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase__ )
@slow
def __a ( self ) -> Optional[int]:
for model_name in MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a : Any = MobileNetVaModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( ) ->str:
'''simple docstring'''
a : List[str] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class __UpperCamelCase ( unittest.TestCase ):
@cached_property
def __a ( self ) -> Union[str, Any]:
return (
MobileNetVaImageProcessor.from_pretrained("google/mobilenet_v1_1.0_224" ) if is_vision_available() else None
)
@slow
def __a ( self ) -> str:
a : List[Any] = MobileNetVaForImageClassification.from_pretrained("google/mobilenet_v1_1.0_224" ).to(lowerCAmelCase__ )
a : Optional[int] = self.default_image_processor
a : int = prepare_img()
a : Tuple = image_processor(images=lowerCAmelCase__ , return_tensors="pt" ).to(lowerCAmelCase__ )
# forward pass
with torch.no_grad():
a : Dict = model(**lowerCAmelCase__ )
# verify the logits
a : int = torch.Size((1, 1001) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase__ )
a : List[Any] = torch.tensor([-4.1_739, -1.1_233, 3.1_205] ).to(lowerCAmelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase__ , atol=1E-4 ) )
| 79
|
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def _SCREAMING_SNAKE_CASE ( ) ->List[str]:
'''simple docstring'''
a : Optional[Any] = ArgumentParser("Accelerate CLI tool" , usage="accelerate <command> [<args>]" , allow_abbrev=_lowercase )
a : Optional[Any] = parser.add_subparsers(help="accelerate command helpers" )
# Register commands
get_config_parser(subparsers=_lowercase )
env_command_parser(subparsers=_lowercase )
launch_command_parser(subparsers=_lowercase )
tpu_command_parser(subparsers=_lowercase )
test_command_parser(subparsers=_lowercase )
# Let's go
a : int = parser.parse_args()
if not hasattr(_lowercase , "func" ):
parser.print_help()
exit(1 )
# Run
args.func(_lowercase )
if __name__ == "__main__":
main()
| 79
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCamelCase = {'''configuration_wavlm''': ['''WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''WavLMConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
'''WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''WavLMForAudioFrameClassification''',
'''WavLMForCTC''',
'''WavLMForSequenceClassification''',
'''WavLMForXVector''',
'''WavLMModel''',
'''WavLMPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavlm import (
WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST,
WavLMForAudioFrameClassification,
WavLMForCTC,
WavLMForSequenceClassification,
WavLMForXVector,
WavLMModel,
WavLMPreTrainedModel,
)
else:
import sys
__UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 69
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_A = {
'configuration_jukebox': [
'JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP',
'JukeboxConfig',
'JukeboxPriorConfig',
'JukeboxVQVAEConfig',
],
'tokenization_jukebox': ['JukeboxTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST',
'JukeboxModel',
'JukeboxPreTrainedModel',
'JukeboxVQVAE',
'JukeboxPrior',
]
if TYPE_CHECKING:
from .configuration_jukebox import (
JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP,
JukeboxConfig,
JukeboxPriorConfig,
JukeboxVQVAEConfig,
)
from .tokenization_jukebox import JukeboxTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_jukebox import (
JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST,
JukeboxModel,
JukeboxPreTrainedModel,
JukeboxPrior,
JukeboxVQVAE,
)
else:
import sys
_A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 62
| 0
|
def __UpperCamelCase ( lowerCAmelCase__ : Any=2_8_1_2_3 ):
__a : Tuple = [1] * (limit + 1)
for i in range(2 , int(limit**0.5 ) + 1 ):
sum_divs[i * i] += i
for k in range(i + 1 , limit // i + 1 ):
sum_divs[k * i] += k + i
__a : Union[str, Any] = set()
__a : Dict = 0
for n in range(1 , limit + 1 ):
if sum_divs[n] > n:
abundants.add(_SCREAMING_SNAKE_CASE )
if not any((n - a in abundants) for a in abundants ):
res += n
return res
if __name__ == "__main__":
print(solution())
| 351
|
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCamelCase__ ( unittest.TestCase ):
@slow
def lowerCAmelCase (self : Tuple ):
__a : List[str] = AutoModelForSeqaSeqLM.from_pretrained('''google/mt5-small''' , return_dict=snake_case_ ).to(snake_case_ )
__a : List[Any] = AutoTokenizer.from_pretrained('''google/mt5-small''' )
__a : Optional[int] = tokenizer('''Hello there''' , return_tensors='''pt''' ).input_ids
__a : Dict = tokenizer('''Hi I am''' , return_tensors='''pt''' ).input_ids
__a : Optional[Any] = model(input_ids.to(snake_case_ ) , labels=labels.to(snake_case_ ) ).loss
__a : Tuple = -(labels.shape[-1] * loss.item())
__a : Dict = -84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
| 90
| 0
|
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
a__ : Optional[int] = logging.get_logger(__name__)
a__ : List[str] = {
"allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json",
"allenai/longformer-large-4096": "https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json",
"allenai/longformer-large-4096-finetuned-triviaqa": (
"https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json"
),
"allenai/longformer-base-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json"
),
"allenai/longformer-large-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json"
),
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE):
UpperCAmelCase__ : int = 'longformer'
def __init__( self :Tuple , _A :Union[List[int], int] = 512 , _A :int = 2 , _A :int = 1 , _A :int = 0 , _A :int = 2 , _A :int = 30_522 , _A :int = 768 , _A :int = 12 , _A :int = 12 , _A :int = 3_072 , _A :str = "gelu" , _A :float = 0.1 , _A :float = 0.1 , _A :int = 512 , _A :int = 2 , _A :float = 0.02 , _A :float = 1E-12 , _A :bool = False , **_A :Optional[int] , ) -> Tuple:
'''simple docstring'''
super().__init__(pad_token_id=_A , **_A )
__A = attention_window
__A = sep_token_id
__A = bos_token_id
__A = eos_token_id
__A = vocab_size
__A = hidden_size
__A = num_hidden_layers
__A = num_attention_heads
__A = hidden_act
__A = intermediate_size
__A = hidden_dropout_prob
__A = attention_probs_dropout_prob
__A = max_position_embeddings
__A = type_vocab_size
__A = initializer_range
__A = layer_norm_eps
__A = onnx_export
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE):
def __init__( self :Dict , _A :"PretrainedConfig" , _A :str = "default" , _A :"List[PatchingSpec]" = None ) -> Optional[Any]:
'''simple docstring'''
super().__init__(_A , _A , _A )
__A = True
@property
def lowercase_ ( self :Union[str, Any] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
__A = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__A = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('global_attention_mask', dynamic_axis),
] )
@property
def lowercase_ ( self :List[str] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
__A = super().outputs
if self.task == "default":
__A = {0: 'batch'}
return outputs
@property
def lowercase_ ( self :int ) -> float:
'''simple docstring'''
return 1E-4
@property
def lowercase_ ( self :Any ) -> int:
'''simple docstring'''
return max(super().default_onnx_opset , 14 )
def lowercase_ ( self :Optional[Any] , _A :"PreTrainedTokenizerBase" , _A :int = -1 , _A :int = -1 , _A :bool = False , _A :Optional[TensorType] = None , ) -> Mapping[str, Any]:
'''simple docstring'''
__A = super().generate_dummy_inputs(
preprocessor=_A , batch_size=_A , seq_length=_A , is_pair=_A , framework=_A )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
__A = torch.zeros_like(inputs['input_ids'] )
# make every second token global
__A = 1
return inputs
| 161
|
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
a__ : List[Any] = logging.get_logger(__name__)
a__ : Union[str, Any] = {
"Salesforce/instruct-blip-flan-t5": "https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json",
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE):
UpperCAmelCase__ : List[str] = 'instructblip_vision_model'
def __init__( self :List[str] , _A :str=1_408 , _A :List[str]=6_144 , _A :List[Any]=39 , _A :Optional[Any]=16 , _A :Tuple=224 , _A :Tuple=14 , _A :Tuple="gelu" , _A :Optional[Any]=1E-6 , _A :List[Any]=0.0 , _A :Dict=1E-10 , _A :List[str]=True , **_A :Dict , ) -> Dict:
'''simple docstring'''
super().__init__(**_A )
__A = hidden_size
__A = intermediate_size
__A = num_hidden_layers
__A = num_attention_heads
__A = patch_size
__A = image_size
__A = initializer_range
__A = attention_dropout
__A = layer_norm_eps
__A = hidden_act
__A = qkv_bias
@classmethod
def lowercase_ ( cls :Any , _A :Union[str, os.PathLike] , **_A :Tuple ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(_A )
__A , __A = cls.get_config_dict(_A , **_A )
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get('model_type' ) == "instructblip":
__A = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(_A , **_A )
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE):
UpperCAmelCase__ : List[str] = 'instructblip_qformer'
def __init__( self :Tuple , _A :int=30_522 , _A :List[str]=768 , _A :str=12 , _A :Optional[Any]=12 , _A :Union[str, Any]=3_072 , _A :str="gelu" , _A :Tuple=0.1 , _A :Dict=0.1 , _A :Dict=512 , _A :Union[str, Any]=0.02 , _A :int=1E-12 , _A :str=0 , _A :Union[str, Any]="absolute" , _A :List[str]=2 , _A :Optional[Any]=1_408 , **_A :Any , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(pad_token_id=_A , **_A )
__A = vocab_size
__A = hidden_size
__A = num_hidden_layers
__A = num_attention_heads
__A = hidden_act
__A = intermediate_size
__A = hidden_dropout_prob
__A = attention_probs_dropout_prob
__A = max_position_embeddings
__A = initializer_range
__A = layer_norm_eps
__A = position_embedding_type
__A = cross_attention_frequency
__A = encoder_hidden_size
@classmethod
def lowercase_ ( cls :int , _A :Union[str, os.PathLike] , **_A :int ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(_A )
__A , __A = cls.get_config_dict(_A , **_A )
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get('model_type' ) == "instructblip":
__A = config_dict['qformer_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(_A , **_A )
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE):
UpperCAmelCase__ : Any = 'instructblip'
UpperCAmelCase__ : List[Any] = True
def __init__( self :Dict , _A :int=None , _A :Optional[Any]=None , _A :Optional[Any]=None , _A :Optional[Any]=32 , **_A :List[Any] ) -> Tuple:
'''simple docstring'''
super().__init__(**_A )
if vision_config is None:
__A = {}
logger.info('vision_config is None. initializing the InstructBlipVisionConfig with default values.' )
if qformer_config is None:
__A = {}
logger.info('qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.' )
if text_config is None:
__A = {}
logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).' )
__A = InstructBlipVisionConfig(**_A )
__A = InstructBlipQFormerConfig(**_A )
__A = text_config['model_type'] if 'model_type' in text_config else 'opt'
__A = CONFIG_MAPPING[text_model_type](**_A )
__A = self.text_config.tie_word_embeddings
__A = self.text_config.is_encoder_decoder
__A = num_query_tokens
__A = self.vision_config.hidden_size
__A = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
__A = 1.0
__A = 0.02
@classmethod
def lowercase_ ( cls :int , _A :InstructBlipVisionConfig , _A :InstructBlipQFormerConfig , _A :PretrainedConfig , **_A :Any , ) -> Any:
'''simple docstring'''
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **_A , )
def lowercase_ ( self :int ) -> Tuple:
'''simple docstring'''
__A = copy.deepcopy(self.__dict__ )
__A = self.vision_config.to_dict()
__A = self.qformer_config.to_dict()
__A = self.text_config.to_dict()
__A = self.__class__.model_type
return output
| 161
| 1
|
"""simple docstring"""
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
__lowercase = logging.get_logger()
@dataclass
class _A :
"""simple docstring"""
UpperCAmelCase : nn.Module
UpperCAmelCase : List[nn.Module] = field(default_factory=_a )
UpperCAmelCase : list = field(default_factory=_a )
def __snake_case ( self : Dict , __UpperCAmelCase : str , __UpperCAmelCase : Tensor , __UpperCAmelCase : Tensor):
a : Any = len(list(m.modules())) == 1 or isinstance(__UpperCAmelCase , nn.Convad) or isinstance(__UpperCAmelCase , nn.BatchNormad)
if has_not_submodules:
self.traced.append(__UpperCAmelCase)
def __call__( self : Dict , __UpperCAmelCase : Tensor):
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook))
self.module(__UpperCAmelCase)
[x.remove() for x in self.handles]
return self
@property
def __snake_case ( self : Any):
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda __UpperCAmelCase: len(list(x.state_dict().keys())) > 0 , self.traced))
@dataclass
class _A :
"""simple docstring"""
UpperCAmelCase : nn.Module
UpperCAmelCase : nn.Module
UpperCAmelCase : int = 0
UpperCAmelCase : List = field(default_factory=_a )
UpperCAmelCase : List = field(default_factory=_a )
def __call__( self : str , __UpperCAmelCase : Tensor):
a : Any = Tracker(self.dest)(__UpperCAmelCase).parametrized
a : Union[str, Any] = Tracker(self.src)(__UpperCAmelCase).parametrized
a : Any = list(filter(lambda __UpperCAmelCase: type(__UpperCAmelCase) not in self.src_skip , __UpperCAmelCase))
a : Dict = list(filter(lambda __UpperCAmelCase: type(__UpperCAmelCase) not in self.dest_skip , __UpperCAmelCase))
if len(__UpperCAmelCase) != len(__UpperCAmelCase):
raise Exception(
f'''Numbers of operations are different. Source module has {len(__UpperCAmelCase)} operations while'''
f''' destination module has {len(__UpperCAmelCase)}.''')
for dest_m, src_m in zip(__UpperCAmelCase , __UpperCAmelCase):
dest_m.load_state_dict(src_m.state_dict())
if self.verbose == 1:
print(f'''Transfered from={src_m} to={dest_m}''')
def lowercase ( A_ , A_ , A_ , A_ = True )-> int:
'''simple docstring'''
print(F'''Converting {name}...''' )
with torch.no_grad():
a : Dict = timm.create_model(A_ , pretrained=A_ ).eval()
a : Tuple = ResNetForImageClassification(A_ ).eval()
a : str = ModuleTransfer(src=A_ , dest=A_ )
a : Any = torch.randn((1, 3, 224, 224) )
module_transfer(A_ )
assert torch.allclose(from_model(A_ ) , our_model(A_ ).logits ), "The model logits don't match the original one."
a : List[Any] = F'''resnet{"-".join(name.split("resnet" ) )}'''
print(A_ )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message="Add model" , use_temp_dir=A_ , )
# we can use the convnext one
a : str = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k" )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message="Add image processor" , use_temp_dir=A_ , )
print(F'''Pushed {checkpoint_name}''' )
def lowercase ( A_ , A_ = None , A_ = True )-> Optional[Any]:
'''simple docstring'''
a : str = "imagenet-1k-id2label.json"
a : Any = 1_000
a : Dict = (1, num_labels)
a : str = "huggingface/label-files"
a : List[Any] = num_labels
a : Optional[int] = json.load(open(hf_hub_download(A_ , A_ , repo_type="dataset" ) , "r" ) )
a : List[str] = {int(A_ ): v for k, v in idalabel.items()}
a : Union[str, Any] = idalabel
a : str = {v: k for k, v in idalabel.items()}
a : List[Any] = partial(A_ , num_labels=A_ , idalabel=A_ , labelaid=A_ )
a : Optional[int] = {
"resnet18": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[64, 128, 256, 512] , layer_type="basic" ),
"resnet26": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[256, 512, 1_024, 2_048] , layer_type="bottleneck" ),
"resnet34": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[64, 128, 256, 512] , layer_type="basic" ),
"resnet50": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[256, 512, 1_024, 2_048] , layer_type="bottleneck" ),
"resnet101": ImageNetPreTrainedConfig(
depths=[3, 4, 23, 3] , hidden_sizes=[256, 512, 1_024, 2_048] , layer_type="bottleneck" ),
"resnet152": ImageNetPreTrainedConfig(
depths=[3, 8, 36, 3] , hidden_sizes=[256, 512, 1_024, 2_048] , layer_type="bottleneck" ),
}
if model_name:
convert_weight_and_push(A_ , names_to_config[model_name] , A_ , A_ )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(A_ , A_ , A_ , A_ )
return config, expected_shape
if __name__ == "__main__":
__lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default=None,
type=str,
help=(
"""The name of the model you wish to convert, it must be one of the supported resnet* architecture,"""
""" currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=Path,
required=True,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""",
default=True,
type=bool,
required=False,
help="""If True, push model and image processor to the hub.""",
)
__lowercase = parser.parse_args()
__lowercase = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 359
|
"""simple docstring"""
import copy
import fnmatch
import json
import os
import pickle as pkl
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial
from hashlib import shaaaa
from io import BytesIO
from pathlib import Path
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import cva
import numpy as np
import requests
import wget
from filelock import FileLock
from PIL import Image
from tqdm.auto import tqdm
from yaml import Loader, dump, load
try:
import torch
__lowercase = True
except ImportError:
__lowercase = False
try:
from torch.hub import _get_torch_home
__lowercase = _get_torch_home()
except ImportError:
__lowercase = os.path.expanduser(
os.getenv("""TORCH_HOME""", os.path.join(os.getenv("""XDG_CACHE_HOME""", """~/.cache"""), """torch"""))
)
__lowercase = os.path.join(torch_cache_home, """transformers""")
__lowercase = """https://cdn.huggingface.co"""
__lowercase = """https://s3.amazonaws.com/models.huggingface.co/bert"""
__lowercase = """/""".join(str(Path(__file__).resolve()).split("""/""")[:-1])
__lowercase = os.path.join(PATH, """config.yaml""")
__lowercase = os.path.join(PATH, """attributes.txt""")
__lowercase = os.path.join(PATH, """objects.txt""")
__lowercase = os.getenv("""PYTORCH_PRETRAINED_BERT_CACHE""", default_cache_path)
__lowercase = os.getenv("""PYTORCH_TRANSFORMERS_CACHE""", PYTORCH_PRETRAINED_BERT_CACHE)
__lowercase = os.getenv("""TRANSFORMERS_CACHE""", PYTORCH_TRANSFORMERS_CACHE)
__lowercase = """pytorch_model.bin"""
__lowercase = """config.yaml"""
def lowercase ( A_=OBJECTS , A_=ATTRIBUTES )-> Union[str, Any]:
'''simple docstring'''
a : Optional[Any] = []
with open(A_ ) as f:
for object in f.readlines():
vg_classes.append(object.split("," )[0].lower().strip() )
a : Union[str, Any] = []
with open(A_ ) as f:
for object in f.readlines():
vg_attrs.append(object.split("," )[0].lower().strip() )
return vg_classes, vg_attrs
def lowercase ( A_ )-> Optional[Any]:
'''simple docstring'''
a : Dict = OrderedDict()
with open(A_ , "rb" ) as f:
a : Optional[Any] = pkl.load(A_ )["model"]
for k in copy.deepcopy(list(ckp.keys() ) ):
a : Dict = ckp.pop(A_ )
if isinstance(A_ , np.ndarray ):
a : Optional[Any] = torch.tensor(A_ )
else:
assert isinstance(A_ , torch.tensor ), type(A_ )
a : int = v
return r
class _A :
"""simple docstring"""
UpperCAmelCase : int = {}
def __init__( self : Any , __UpperCAmelCase : dict , __UpperCAmelCase : str = "root" , __UpperCAmelCase : Optional[int]=0):
a : List[str] = name
a : Tuple = level
a : int = {}
for k, v in dictionary.items():
if v is None:
raise ValueError()
a : List[Any] = copy.deepcopy(__UpperCAmelCase)
a : int = copy.deepcopy(__UpperCAmelCase)
if isinstance(__UpperCAmelCase , __UpperCAmelCase):
a : Union[str, Any] = Config(__UpperCAmelCase , name=__UpperCAmelCase , level=level + 1)
a : Dict = v
setattr(self , __UpperCAmelCase , __UpperCAmelCase)
a : Tuple = d
def __repr__( self : List[str]):
return str(list((self._pointer.keys())))
def __setattr__( self : Dict , __UpperCAmelCase : str , __UpperCAmelCase : Tuple):
a : Optional[Any] = val
a : Tuple = val
a : Dict = key.split(".")
a : Union[str, Any] = len(__UpperCAmelCase) - 1
a : Optional[int] = self._pointer
if len(__UpperCAmelCase) > 1:
for i, l in enumerate(__UpperCAmelCase):
if hasattr(self , __UpperCAmelCase) and isinstance(getattr(self , __UpperCAmelCase) , __UpperCAmelCase):
setattr(getattr(self , __UpperCAmelCase) , ".".join(levels[i:]) , __UpperCAmelCase)
if l == last_level:
a : int = val
else:
a : str = pointer[l]
def __snake_case ( self : str):
return self._pointer
def __snake_case ( self : int , __UpperCAmelCase : Tuple , __UpperCAmelCase : List[Any]):
with open(f'''{file_name}''' , "w") as stream:
dump(__UpperCAmelCase , __UpperCAmelCase)
def __snake_case ( self : int , __UpperCAmelCase : Dict , __UpperCAmelCase : int):
with open(f'''{file_name}''' , "w") as stream:
json.dump(__UpperCAmelCase , __UpperCAmelCase)
@staticmethod
def __snake_case ( __UpperCAmelCase : Dict):
with open(__UpperCAmelCase) as stream:
a : List[str] = load(__UpperCAmelCase , Loader=__UpperCAmelCase)
return data
def __str__( self : Tuple):
a : str = " "
if self._name != "root":
a : List[str] = f'''{t * (self._level-1)}{self._name}:\n'''
else:
a : Optional[Any] = ""
a : List[Any] = self._level
for i, (k, v) in enumerate(self._pointer.items()):
if isinstance(__UpperCAmelCase , __UpperCAmelCase):
r += f'''{t * (self._level)}{v}\n'''
self._level += 1
else:
r += f'''{t * (self._level)}{k}: {v} ({type(__UpperCAmelCase).__name__})\n'''
a : Tuple = level
return r[:-1]
@classmethod
def __snake_case ( cls : str , __UpperCAmelCase : str , **__UpperCAmelCase : List[Any]):
a , a : Tuple = cls.get_config_dict(__UpperCAmelCase , **__UpperCAmelCase)
return cls(__UpperCAmelCase)
@classmethod
def __snake_case ( cls : Union[str, Any] , __UpperCAmelCase : str , **__UpperCAmelCase : List[str]):
a : int = kwargs.pop("cache_dir" , __UpperCAmelCase)
a : List[Any] = kwargs.pop("force_download" , __UpperCAmelCase)
a : Optional[int] = kwargs.pop("resume_download" , __UpperCAmelCase)
a : Tuple = kwargs.pop("proxies" , __UpperCAmelCase)
a : int = kwargs.pop("local_files_only" , __UpperCAmelCase)
if os.path.isdir(__UpperCAmelCase):
a : Union[str, Any] = os.path.join(__UpperCAmelCase , __UpperCAmelCase)
elif os.path.isfile(__UpperCAmelCase) or is_remote_url(__UpperCAmelCase):
a : List[Any] = pretrained_model_name_or_path
else:
a : int = hf_bucket_url(__UpperCAmelCase , filename=__UpperCAmelCase , use_cdn=__UpperCAmelCase)
try:
# Load from URL or cache if already cached
a : Optional[Any] = cached_path(
__UpperCAmelCase , cache_dir=__UpperCAmelCase , force_download=__UpperCAmelCase , proxies=__UpperCAmelCase , resume_download=__UpperCAmelCase , local_files_only=__UpperCAmelCase , )
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
a : Union[str, Any] = Config.load_yaml(__UpperCAmelCase)
except EnvironmentError:
a : str = "Can't load config for"
raise EnvironmentError(__UpperCAmelCase)
if resolved_config_file == config_file:
print("loading configuration file from path")
else:
print("loading configuration file cache")
return Config.load_yaml(__UpperCAmelCase), kwargs
def lowercase ( A_ )-> str:
'''simple docstring'''
a : Tuple = torch.load("dump.pt" , map_location=in_tensor.device )
a : Any = in_tensor.numpy()
a : Optional[int] = out_tensor.numpy()[0]
print(na.shape , na[0, 0, :5] )
print(na.shape , na[0, 0, :5] )
assert np.allclose(A_ , A_ , rtol=0.0_1 , atol=0.1 ), (
F'''{sum([1 for x in np.isclose(A_ , A_ , rtol=0.0_1 , atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*100:.4f} %'''
" element-wise mismatch"
)
raise Exception("tensors are all good" )
# Hugging face functions below
def lowercase ( A_ )-> Optional[Any]:
'''simple docstring'''
a : Optional[Any] = urlparse(A_ )
return parsed.scheme in ("http", "https")
def lowercase ( A_ , A_ , A_=True )-> str:
'''simple docstring'''
a : List[Any] = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
a : str = "/" not in model_id
if legacy_format:
return F'''{endpoint}/{model_id}-{filename}'''
else:
return F'''{endpoint}/{model_id}/{filename}'''
def lowercase ( A_ , A_ , A_=None , A_=0 , A_=None , )-> List[str]:
'''simple docstring'''
a : Optional[int] = "python/{}".format(sys.version.split()[0] )
if _torch_available:
ua += "; torch/{}".format(torch.__version__ )
if isinstance(A_ , A_ ):
ua += "; " + "; ".join("{}/{}".format(A_ , A_ ) for k, v in user_agent.items() )
elif isinstance(A_ , A_ ):
ua += "; " + user_agent
a : str = {"user-agent": ua}
if resume_size > 0:
a : List[Any] = "bytes=%d-" % (resume_size,)
a : str = requests.get(A_ , stream=A_ , proxies=A_ , headers=A_ )
if response.status_code == 416: # Range not satisfiable
return
a : Optional[int] = response.headers.get("Content-Length" )
a : List[Any] = resume_size + int(A_ ) if content_length is not None else None
a : List[Any] = tqdm(
unit="B" , unit_scale=A_ , total=A_ , initial=A_ , desc="Downloading" , )
for chunk in response.iter_content(chunk_size=1_024 ):
if chunk: # filter out keep-alive new chunks
progress.update(len(A_ ) )
temp_file.write(A_ )
progress.close()
def lowercase ( A_ , A_=None , A_=False , A_=None , A_=10 , A_=False , A_=None , A_=False , )-> str:
'''simple docstring'''
if cache_dir is None:
a : List[Any] = TRANSFORMERS_CACHE
if isinstance(A_ , A_ ):
a : Tuple = str(A_ )
os.makedirs(A_ , exist_ok=A_ )
a : Optional[Any] = None
if not local_files_only:
try:
a : Dict = requests.head(A_ , allow_redirects=A_ , proxies=A_ , timeout=A_ )
if response.status_code == 200:
a : int = response.headers.get("ETag" )
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
a : List[str] = url_to_filename(A_ , A_ )
# get cache path to put the file
a : List[str] = os.path.join(A_ , A_ )
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(A_ ):
return cache_path
else:
a : Any = [
file
for file in fnmatch.filter(os.listdir(A_ ) , filename + ".*" )
if not file.endswith(".json" ) and not file.endswith(".lock" )
]
if len(A_ ) > 0:
return os.path.join(A_ , matching_files[-1] )
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
"Cannot find the requested files in the cached path and outgoing traffic has been"
" disabled. To enable model look-ups and downloads online, set 'local_files_only'"
" to False." )
return None
# From now on, etag is not None.
if os.path.exists(A_ ) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
a : Dict = cache_path + ".lock"
with FileLock(A_ ):
# If the download just completed while the lock was activated.
if os.path.exists(A_ ) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
a : Optional[Any] = cache_path + ".incomplete"
@contextmanager
def _resumable_file_manager():
with open(A_ , "a+b" ) as f:
yield f
a : Tuple = _resumable_file_manager
if os.path.exists(A_ ):
a : Optional[Any] = os.stat(A_ ).st_size
else:
a : Optional[int] = 0
else:
a : Union[str, Any] = partial(tempfile.NamedTemporaryFile , dir=A_ , delete=A_ )
a : Dict = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
print(
"%s not found in cache or force_download set to True, downloading to %s" , A_ , temp_file.name , )
http_get(
A_ , A_ , proxies=A_ , resume_size=A_ , user_agent=A_ , )
os.replace(temp_file.name , A_ )
a : List[str] = {"url": url, "etag": etag}
a : Tuple = cache_path + ".json"
with open(A_ , "w" ) as meta_file:
json.dump(A_ , A_ )
return cache_path
def lowercase ( A_ , A_=None )-> Any:
'''simple docstring'''
a : Dict = url.encode("utf-8" )
a : Optional[Any] = shaaaa(A_ )
a : Any = url_hash.hexdigest()
if etag:
a : Union[str, Any] = etag.encode("utf-8" )
a : Tuple = shaaaa(A_ )
filename += "." + etag_hash.hexdigest()
if url.endswith(".h5" ):
filename += ".h5"
return filename
def lowercase ( A_ , A_=None , A_=False , A_=None , A_=False , A_=None , A_=False , A_=False , A_=False , )-> Tuple:
'''simple docstring'''
if cache_dir is None:
a : Union[str, Any] = TRANSFORMERS_CACHE
if isinstance(A_ , A_ ):
a : List[Any] = str(A_ )
if isinstance(A_ , A_ ):
a : int = str(A_ )
if is_remote_url(A_ ):
# URL, so get it from the cache (downloading if necessary)
a : Optional[Any] = get_from_cache(
A_ , cache_dir=A_ , force_download=A_ , proxies=A_ , resume_download=A_ , user_agent=A_ , local_files_only=A_ , )
elif os.path.exists(A_ ):
# File, and it exists.
a : Union[str, Any] = url_or_filename
elif urlparse(A_ ).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError("file {} not found".format(A_ ) )
else:
# Something unknown
raise ValueError("unable to parse {} as a URL or as a local path".format(A_ ) )
if extract_compressed_file:
if not is_zipfile(A_ ) and not tarfile.is_tarfile(A_ ):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
a , a : Dict = os.path.split(A_ )
a : List[str] = output_file.replace("." , "-" ) + "-extracted"
a : Optional[Any] = os.path.join(A_ , A_ )
if os.path.isdir(A_ ) and os.listdir(A_ ) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
a : Tuple = output_path + ".lock"
with FileLock(A_ ):
shutil.rmtree(A_ , ignore_errors=A_ )
os.makedirs(A_ )
if is_zipfile(A_ ):
with ZipFile(A_ , "r" ) as zip_file:
zip_file.extractall(A_ )
zip_file.close()
elif tarfile.is_tarfile(A_ ):
a : List[str] = tarfile.open(A_ )
tar_file.extractall(A_ )
tar_file.close()
else:
raise EnvironmentError("Archive format of {} could not be identified".format(A_ ) )
return output_path_extracted
return output_path
def lowercase ( A_ , A_="," )-> Union[str, Any]:
'''simple docstring'''
assert isinstance(A_ , A_ )
if os.path.isfile(A_ ):
with open(A_ ) as f:
a : str = eval(f.read() )
else:
a : List[Any] = requests.get(A_ )
try:
a : Any = requests.json()
except Exception:
a : Any = req.content.decode()
assert data is not None, "could not connect"
try:
a : Optional[Any] = eval(A_ )
except Exception:
a : Any = data.split("\n" )
req.close()
return data
def lowercase ( A_ )-> str:
'''simple docstring'''
a : Optional[int] = requests.get(A_ )
a : List[str] = np.array(Image.open(BytesIO(response.content ) ) )
return img
def lowercase ( A_ )-> Any:
'''simple docstring'''
a : List[Any] = url.split("/" )[-1]
if fn not in os.listdir(os.getcwd() ):
wget.download(A_ )
with open(A_ , "rb" ) as stream:
a : Any = pkl.load(A_ )
a : List[str] = weights.pop("model" )
a : Dict = {}
for k, v in model.items():
a : List[str] = torch.from_numpy(A_ )
if "running_var" in k:
a : Dict = torch.tensor([0] )
a : Any = k.replace("running_var" , "num_batches_tracked" )
a : List[Any] = zero
return new
def lowercase ( )-> Optional[int]:
'''simple docstring'''
print(F'''{os.path.abspath(os.path.join(A_ , os.pardir ) )}/demo.ipynb''' )
def lowercase ( A_ , A_="RGB" )-> Any:
'''simple docstring'''
assert isinstance(A_ , A_ )
if os.path.isfile(A_ ):
a : Dict = cva.imread(A_ )
else:
a : Union[str, Any] = get_image_from_url(A_ )
assert img is not None, F'''could not connect to: {im}'''
a : int = cva.cvtColor(A_ , cva.COLOR_BGR2RGB )
if input_format == "RGB":
a : List[str] = img[:, :, ::-1]
return img
def lowercase ( A_ , A_=1 )-> int:
'''simple docstring'''
return (images[i : i + batch] for i in range(0 , len(A_ ) , A_ ))
| 226
| 0
|
"""simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_lowercase : Optional[Any] = logging.get_logger(__name__)
_lowercase : List[str] = {'vocab_file': 'spiece.model'}
_lowercase : str = {
'vocab_file': {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/spiece.model',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/spiece.model',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/spiece.model',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/spiece.model',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model',
}
}
_lowercase : List[Any] = {
'albert-base-v1': 5_12,
'albert-large-v1': 5_12,
'albert-xlarge-v1': 5_12,
'albert-xxlarge-v1': 5_12,
'albert-base-v2': 5_12,
'albert-large-v2': 5_12,
'albert-xlarge-v2': 5_12,
'albert-xxlarge-v2': 5_12,
}
_lowercase : Any = '▁'
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : int = VOCAB_FILES_NAMES
a__ : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
a__ : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Dict , _lowercase : List[Any] , _lowercase : Union[str, Any]=True , _lowercase : Union[str, Any]=True , _lowercase : str=False , _lowercase : str="[CLS]" , _lowercase : Union[str, Any]="[SEP]" , _lowercase : Optional[Any]="<unk>" , _lowercase : Optional[int]="[SEP]" , _lowercase : Tuple="<pad>" , _lowercase : int="[CLS]" , _lowercase : Tuple="[MASK]" , _lowercase : Optional[Dict[str, Any]] = None , **_lowercase : Any , ):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
__UpperCAmelCase = (
AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase , normalized=_lowercase )
if isinstance(_lowercase , _lowercase )
else mask_token
)
__UpperCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_lowercase , remove_space=_lowercase , keep_accents=_lowercase , bos_token=_lowercase , eos_token=_lowercase , unk_token=_lowercase , sep_token=_lowercase , pad_token=_lowercase , cls_token=_lowercase , mask_token=_lowercase , sp_model_kwargs=self.sp_model_kwargs , **_lowercase , )
__UpperCAmelCase = do_lower_case
__UpperCAmelCase = remove_space
__UpperCAmelCase = keep_accents
__UpperCAmelCase = vocab_file
__UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_lowercase )
@property
def a ( self : Optional[int] ):
return len(self.sp_model )
def a ( self : Union[str, Any] ):
__UpperCAmelCase = {self.convert_ids_to_tokens(_lowercase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Tuple ):
__UpperCAmelCase = self.__dict__.copy()
__UpperCAmelCase = None
return state
def __setstate__( self : Union[str, Any] , _lowercase : List[str] ):
__UpperCAmelCase = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
__UpperCAmelCase = {}
__UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def a ( self : Any , _lowercase : List[Any] ):
if self.remove_space:
__UpperCAmelCase = ''' '''.join(inputs.strip().split() )
else:
__UpperCAmelCase = inputs
__UpperCAmelCase = outputs.replace('''``''' , '''"''' ).replace('''\'\'''' , '''"''' )
if not self.keep_accents:
__UpperCAmelCase = unicodedata.normalize('''NFKD''' , _lowercase )
__UpperCAmelCase = ''''''.join([c for c in outputs if not unicodedata.combining(_lowercase )] )
if self.do_lower_case:
__UpperCAmelCase = outputs.lower()
return outputs
def a ( self : Any , _lowercase : str ):
__UpperCAmelCase = self.preprocess_text(_lowercase )
__UpperCAmelCase = self.sp_model.encode(_lowercase , out_type=_lowercase )
__UpperCAmelCase = []
for piece in pieces:
if len(_lowercase ) > 1 and piece[-1] == str(''',''' ) and piece[-2].isdigit():
__UpperCAmelCase = self.sp_model.EncodeAsPieces(piece[:-1].replace(_lowercase , '''''' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
__UpperCAmelCase = cur_pieces[1:]
else:
__UpperCAmelCase = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(_lowercase )
else:
new_pieces.append(_lowercase )
return new_pieces
def a ( self : Optional[Any] , _lowercase : List[Any] ):
return self.sp_model.PieceToId(_lowercase )
def a ( self : List[str] , _lowercase : Optional[int] ):
return self.sp_model.IdToPiece(_lowercase )
def a ( self : Optional[Any] , _lowercase : List[Any] ):
__UpperCAmelCase = []
__UpperCAmelCase = ''''''
__UpperCAmelCase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_lowercase ) + token
__UpperCAmelCase = True
__UpperCAmelCase = []
else:
current_sub_tokens.append(_lowercase )
__UpperCAmelCase = False
out_string += self.sp_model.decode(_lowercase )
return out_string.strip()
def a ( self : List[Any] , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ):
__UpperCAmelCase = [self.sep_token_id]
__UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def a ( self : Dict , _lowercase : List[int] , _lowercase : Optional[List[int]] = None , _lowercase : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowercase , token_ids_a=_lowercase , already_has_special_tokens=_lowercase )
if token_ids_a is not None:
return [1] + ([0] * len(_lowercase )) + [1] + ([0] * len(_lowercase )) + [1]
return [1] + ([0] * len(_lowercase )) + [1]
def a ( self : Optional[Any] , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ):
__UpperCAmelCase = [self.sep_token_id]
__UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def a ( self : int , _lowercase : str , _lowercase : Optional[str] = None ):
if not os.path.isdir(_lowercase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
__UpperCAmelCase = os.path.join(
_lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowercase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _lowercase )
elif not os.path.isfile(self.vocab_file ):
with open(_lowercase , '''wb''' ) as fi:
__UpperCAmelCase = self.sp_model.serialized_model_proto()
fi.write(_lowercase )
return (out_vocab_file,)
| 332
|
"""simple docstring"""
from collections import defaultdict
def lowercase__ ( snake_case_ :str , snake_case_ :str ):
__UpperCAmelCase = first_str.lower().strip()
__UpperCAmelCase = second_str.lower().strip()
# Remove whitespace
__UpperCAmelCase = first_str.replace(''' ''' , '''''' )
__UpperCAmelCase = second_str.replace(''' ''' , '''''' )
# Strings of different lengths are not anagrams
if len(snake_case_ ) != len(snake_case_ ):
return False
# Default values for count should be 0
__UpperCAmelCase = defaultdict(snake_case_ )
# For each character in input strings,
# increment count in the corresponding
for i in range(len(snake_case_ ) ):
count[first_str[i]] += 1
count[second_str[i]] -= 1
return all(_count == 0 for _count in count.values() )
if __name__ == "__main__":
from doctest import testmod
testmod()
_lowercase : List[Any] = input('Enter the first string ').strip()
_lowercase : Tuple = input('Enter the second string ').strip()
_lowercase : str = check_anagrams(input_a, input_b)
print(f"""{input_a} and {input_b} are {"" if status else "not "}anagrams.""")
| 332
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCAmelCase__ = {
'configuration_bloom': ['BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BloomConfig', 'BloomOnnxConfig'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['BloomTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST',
'BloomForCausalLM',
'BloomModel',
'BloomPreTrainedModel',
'BloomForSequenceClassification',
'BloomForTokenClassification',
'BloomForQuestionAnswering',
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 119
|
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'adapter_layer': 'encoder.layers.*.adapter_layer',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
'pooling_layer.linear': 'projector',
'pooling_layer.projection': 'classifier',
}
lowerCAmelCase__ = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
'projector',
'classifier',
]
def __lowerCamelCase ( lowerCAmelCase__ ):
lowerCAmelCase__ = {}
with open(lowerCAmelCase__ , 'r' ) as file:
for line_number, line in enumerate(lowerCAmelCase__ ):
lowerCAmelCase__ = line.strip()
if line:
lowerCAmelCase__ = line.split()
lowerCAmelCase__ = line_number
lowerCAmelCase__ = words[0]
lowerCAmelCase__ = value
return result
def __lowerCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
for attribute in key.split('.' ):
lowerCAmelCase__ = getattr(lowerCAmelCase__ , lowerCAmelCase__ )
lowerCAmelCase__ = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(lowerCAmelCase__ ):
lowerCAmelCase__ = PARAM_MAPPING[full_name.split('.' )[-1]]
lowerCAmelCase__ = 'param'
if weight_type is not None and weight_type != "param":
lowerCAmelCase__ = getattr(lowerCAmelCase__ , lowerCAmelCase__ ).shape
elif weight_type is not None and weight_type == "param":
lowerCAmelCase__ = hf_pointer
for attribute in hf_param_name.split('.' ):
lowerCAmelCase__ = getattr(lowerCAmelCase__ , lowerCAmelCase__ )
lowerCAmelCase__ = shape_pointer.shape
# let's reduce dimension
lowerCAmelCase__ = value[0]
else:
lowerCAmelCase__ = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}""" )
if weight_type == "weight":
lowerCAmelCase__ = value
elif weight_type == "weight_g":
lowerCAmelCase__ = value
elif weight_type == "weight_v":
lowerCAmelCase__ = value
elif weight_type == "bias":
lowerCAmelCase__ = value
elif weight_type == "param":
for attribute in hf_param_name.split('.' ):
lowerCAmelCase__ = getattr(lowerCAmelCase__ , lowerCAmelCase__ )
lowerCAmelCase__ = value
else:
lowerCAmelCase__ = value
logger.info(F"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def __lowerCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
lowerCAmelCase__ = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(lowerCAmelCase__ ):
lowerCAmelCase__ = PARAM_MAPPING[full_name.split('.' )[-1]]
lowerCAmelCase__ = 'param'
if weight_type is not None and weight_type != "param":
lowerCAmelCase__ = '.'.join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
lowerCAmelCase__ = '.'.join([key, hf_param_name] )
else:
lowerCAmelCase__ = key
lowerCAmelCase__ = value if 'lm_head' in full_key else value[0]
lowerCAmelCase__ = {
'W_a': 'linear_1.weight',
'W_b': 'linear_2.weight',
'b_a': 'linear_1.bias',
'b_b': 'linear_2.bias',
'ln_W': 'norm.weight',
'ln_b': 'norm.bias',
}
def __lowerCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None ):
lowerCAmelCase__ = False
for key, mapped_key in MAPPING.items():
lowerCAmelCase__ = 'wav2vec2.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
lowerCAmelCase__ = True
if "*" in mapped_key:
lowerCAmelCase__ = name.split(lowerCAmelCase__ )[0].split('.' )[-2]
lowerCAmelCase__ = mapped_key.replace('*' , lowerCAmelCase__ )
if "weight_g" in name:
lowerCAmelCase__ = 'weight_g'
elif "weight_v" in name:
lowerCAmelCase__ = 'weight_v'
elif "bias" in name:
lowerCAmelCase__ = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
lowerCAmelCase__ = 'weight'
else:
lowerCAmelCase__ = None
if hf_dict is not None:
rename_dict(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
else:
set_recursively(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
return is_used
return is_used
def __lowerCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
lowerCAmelCase__ = []
lowerCAmelCase__ = fairseq_model.state_dict()
lowerCAmelCase__ = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
lowerCAmelCase__ = False
if "conv_layers" in name:
load_conv_layer(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , hf_model.config.feat_extract_norm == 'group' , )
lowerCAmelCase__ = True
else:
lowerCAmelCase__ = load_wavaveca_layer(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
if not is_used:
unused_weights.append(lowerCAmelCase__ )
logger.warning(F"""Unused weights: {unused_weights}""" )
def __lowerCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
lowerCAmelCase__ = full_name.split('conv_layers.' )[-1]
lowerCAmelCase__ = name.split('.' )
lowerCAmelCase__ = int(items[0] )
lowerCAmelCase__ = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
lowerCAmelCase__ = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
lowerCAmelCase__ = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
lowerCAmelCase__ = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
lowerCAmelCase__ = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(lowerCAmelCase__ )
@torch.no_grad()
def __lowerCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=True , lowerCAmelCase__=False ):
if config_path is not None:
lowerCAmelCase__ = WavaVecaConfig.from_pretrained(lowerCAmelCase__ )
else:
lowerCAmelCase__ = WavaVecaConfig()
if is_seq_class:
lowerCAmelCase__ = read_txt_into_dict(lowerCAmelCase__ )
lowerCAmelCase__ = idalabel
lowerCAmelCase__ = WavaVecaForSequenceClassification(lowerCAmelCase__ )
lowerCAmelCase__ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , )
feature_extractor.save_pretrained(lowerCAmelCase__ )
elif is_finetuned:
if dict_path:
lowerCAmelCase__ = Dictionary.load(lowerCAmelCase__ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
lowerCAmelCase__ = target_dict.pad_index
lowerCAmelCase__ = target_dict.bos_index
lowerCAmelCase__ = target_dict.eos_index
lowerCAmelCase__ = len(target_dict.symbols )
lowerCAmelCase__ = os.path.join(lowerCAmelCase__ , 'vocab.json' )
if not os.path.isdir(lowerCAmelCase__ ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(lowerCAmelCase__ ) )
return
os.makedirs(lowerCAmelCase__ , exist_ok=lowerCAmelCase__ )
lowerCAmelCase__ = target_dict.indices
# fairseq has the <pad> and <s> switched
lowerCAmelCase__ = 0
lowerCAmelCase__ = 1
with open(lowerCAmelCase__ , 'w' , encoding='utf-8' ) as vocab_handle:
json.dump(lowerCAmelCase__ , lowerCAmelCase__ )
lowerCAmelCase__ = WavaVecaCTCTokenizer(
lowerCAmelCase__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=lowerCAmelCase__ , )
lowerCAmelCase__ = True if config.feat_extract_norm == 'layer' else False
lowerCAmelCase__ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , )
lowerCAmelCase__ = WavaVecaProcessor(feature_extractor=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ )
processor.save_pretrained(lowerCAmelCase__ )
lowerCAmelCase__ = WavaVecaForCTC(lowerCAmelCase__ )
else:
lowerCAmelCase__ = WavaVecaForPreTraining(lowerCAmelCase__ )
if is_finetuned or is_seq_class:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
else:
lowerCAmelCase__ = argparse.Namespace(task='audio_pretraining' )
lowerCAmelCase__ = fairseq.tasks.setup_task(lowerCAmelCase__ )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=lowerCAmelCase__ )
lowerCAmelCase__ = model[0].eval()
recursively_load_weights(lowerCAmelCase__ , lowerCAmelCase__ , not is_finetuned )
hf_wavavec.save_pretrained(lowerCAmelCase__ )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
parser.add_argument(
'--is_seq_class',
action='store_true',
help='Whether the model to convert is a fine-tuned sequence classification model or not',
)
lowerCAmelCase__ = parser.parse_args()
lowerCAmelCase__ = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 119
| 1
|
'''simple docstring'''
lowerCamelCase_ = 8.314462 # Unit - J mol-1 K-1
def __lowercase ( __lowercase , __lowercase , __lowercase ) -> float:
'''simple docstring'''
if moles < 0 or kelvin < 0 or volume < 0:
raise ValueError("Invalid inputs. Enter positive value." )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume
def __lowercase ( __lowercase , __lowercase , __lowercase ) -> float:
'''simple docstring'''
if moles < 0 or kelvin < 0 or pressure < 0:
raise ValueError("Invalid inputs. Enter positive value." )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure
if __name__ == "__main__":
from doctest import testmod
testmod()
| 79
|
'''simple docstring'''
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : List[str] , __UpperCAmelCase : list[int] ):
'''simple docstring'''
_A = len(__UpperCAmelCase )
_A = [0] * len_array
if len_array > 0:
_A = array[0]
for i in range(1 , __UpperCAmelCase ):
_A = self.prefix_sum[i - 1] + array[i]
def lowerCAmelCase ( self : Dict , __UpperCAmelCase : int , __UpperCAmelCase : int ):
'''simple docstring'''
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def lowerCAmelCase ( self : List[str] , __UpperCAmelCase : int ):
'''simple docstring'''
_A = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(__UpperCAmelCase )
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 79
| 1
|
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
lowerCAmelCase__ : List[str] =logging.get_logger('transformers.models.speecht5')
def a__ ( A__, A__, A__ ):
hf_model.apply_weight_norm()
SCREAMING_SNAKE_CASE_ : str = checkpoint['input_conv.weight_g']
SCREAMING_SNAKE_CASE_ : List[Any] = checkpoint['input_conv.weight_v']
SCREAMING_SNAKE_CASE_ : Tuple = checkpoint['input_conv.bias']
for i in range(len(config.upsample_rates ) ):
SCREAMING_SNAKE_CASE_ : int = checkpoint[F'''upsamples.{i}.1.weight_g''']
SCREAMING_SNAKE_CASE_ : int = checkpoint[F'''upsamples.{i}.1.weight_v''']
SCREAMING_SNAKE_CASE_ : Dict = checkpoint[F'''upsamples.{i}.1.bias''']
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
SCREAMING_SNAKE_CASE_ : str = checkpoint[F'''blocks.{i}.convs1.{j}.1.weight_g''']
SCREAMING_SNAKE_CASE_ : Tuple = checkpoint[F'''blocks.{i}.convs1.{j}.1.weight_v''']
SCREAMING_SNAKE_CASE_ : List[Any] = checkpoint[F'''blocks.{i}.convs1.{j}.1.bias''']
SCREAMING_SNAKE_CASE_ : List[Any] = checkpoint[F'''blocks.{i}.convs2.{j}.1.weight_g''']
SCREAMING_SNAKE_CASE_ : Union[str, Any] = checkpoint[F'''blocks.{i}.convs2.{j}.1.weight_v''']
SCREAMING_SNAKE_CASE_ : List[Any] = checkpoint[F'''blocks.{i}.convs2.{j}.1.bias''']
SCREAMING_SNAKE_CASE_ : Tuple = checkpoint['output_conv.1.weight_g']
SCREAMING_SNAKE_CASE_ : Tuple = checkpoint['output_conv.1.weight_v']
SCREAMING_SNAKE_CASE_ : Optional[Any] = checkpoint['output_conv.1.bias']
hf_model.remove_weight_norm()
@torch.no_grad()
def a__ ( A__, A__, A__, A__=None, A__=None, ):
if config_path is not None:
SCREAMING_SNAKE_CASE_ : Optional[Any] = SpeechTaHifiGanConfig.from_pretrained(A__ )
else:
SCREAMING_SNAKE_CASE_ : Tuple = SpeechTaHifiGanConfig()
SCREAMING_SNAKE_CASE_ : List[str] = SpeechTaHifiGan(A__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.load(A__ )
load_weights(orig_checkpoint['model']['generator'], A__, A__ )
SCREAMING_SNAKE_CASE_ : List[str] = np.load(A__ )
SCREAMING_SNAKE_CASE_ : int = stats[0].reshape(-1 )
SCREAMING_SNAKE_CASE_ : int = stats[1].reshape(-1 )
SCREAMING_SNAKE_CASE_ : Tuple = torch.from_numpy(A__ ).float()
SCREAMING_SNAKE_CASE_ : List[Any] = torch.from_numpy(A__ ).float()
model.save_pretrained(A__ )
if repo_id:
print('Pushing to the hub...' )
model.push_to_hub(A__ )
if __name__ == "__main__":
lowerCAmelCase__ : List[str] =argparse.ArgumentParser()
parser.add_argument('--checkpoint_path', required=True, default=None, type=str, help='Path to original checkpoint')
parser.add_argument('--stats_path', required=True, default=None, type=str, help='Path to stats.npy file')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--pytorch_dump_folder_path', required=True, default=None, type=str, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.'
)
lowerCAmelCase__ : Optional[Any] =parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 162
|
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def a__ ( A__ ):
if is_torch_version('<', '2.0.0' ) or not hasattr(A__, '_dynamo' ):
return False
return isinstance(A__, torch._dynamo.eval_frame.OptimizedModule )
def a__ ( A__, A__ = True ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
SCREAMING_SNAKE_CASE_ : List[str] = is_compiled_module(A__ )
if is_compiled:
SCREAMING_SNAKE_CASE_ : List[Any] = model
SCREAMING_SNAKE_CASE_ : Dict = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(A__, A__ ):
SCREAMING_SNAKE_CASE_ : int = model.module
if not keep_fpaa_wrapper:
SCREAMING_SNAKE_CASE_ : str = getattr(A__, 'forward' )
SCREAMING_SNAKE_CASE_ : Any = model.__dict__.pop('_original_forward', A__ )
if original_forward is not None:
while hasattr(A__, '__wrapped__' ):
SCREAMING_SNAKE_CASE_ : Optional[int] = forward.__wrapped__
if forward == original_forward:
break
SCREAMING_SNAKE_CASE_ : Any = forward
if getattr(A__, '_converted_to_transformer_engine', A__ ):
convert_model(A__, to_transformer_engine=A__ )
if is_compiled:
SCREAMING_SNAKE_CASE_ : List[str] = model
SCREAMING_SNAKE_CASE_ : Dict = compiled_model
return model
def a__ ( ):
PartialState().wait_for_everyone()
def a__ ( A__, A__ ):
if PartialState().distributed_type == DistributedType.TPU:
xm.save(A__, A__ )
elif PartialState().local_process_index == 0:
torch.save(A__, A__ )
@contextmanager
def a__ ( **A__ ):
for key, value in kwargs.items():
SCREAMING_SNAKE_CASE_ : List[Any] = str(A__ )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def a__ ( A__ ):
if not hasattr(A__, '__qualname__' ) and not hasattr(A__, '__name__' ):
SCREAMING_SNAKE_CASE_ : Optional[int] = getattr(A__, '__class__', A__ )
if hasattr(A__, '__qualname__' ):
return obj.__qualname__
if hasattr(A__, '__name__' ):
return obj.__name__
return str(A__ )
def a__ ( A__, A__ ):
for key, value in source.items():
if isinstance(A__, A__ ):
SCREAMING_SNAKE_CASE_ : Dict = destination.setdefault(A__, {} )
merge_dicts(A__, A__ )
else:
SCREAMING_SNAKE_CASE_ : Tuple = value
return destination
def a__ ( A__ = None ):
if port is None:
SCREAMING_SNAKE_CASE_ : Tuple = 2_9_5_0_0
with socket.socket(socket.AF_INET, socket.SOCK_STREAM ) as s:
return s.connect_ex(('localhost', port) ) == 0
| 162
| 1
|
import warnings
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
UpperCamelCase__ = logging.get_logger(__name__)
class A ( UpperCAmelCase_ ):
__UpperCAmelCase : int = ['input_values', 'attention_mask']
def __init__(self : Any , __UpperCAmelCase : int = 1 , __UpperCAmelCase : int = 1_6_0_0_0 , __UpperCAmelCase : float = 0.0 , __UpperCAmelCase : bool = False , __UpperCAmelCase : int = 8_0 , __UpperCAmelCase : int = 1_6 , __UpperCAmelCase : int = 6_4 , __UpperCAmelCase : str = "hann_window" , __UpperCAmelCase : float = 1.0 , __UpperCAmelCase : float = 8_0 , __UpperCAmelCase : float = 7_6_0_0 , __UpperCAmelCase : float = 1E-10 , __UpperCAmelCase : int = 2 , __UpperCAmelCase : bool = True , **__UpperCAmelCase : Any , ) -> str:
"""simple docstring"""
super().__init__(feature_size=__UpperCAmelCase , sampling_rate=__UpperCAmelCase , padding_value=__UpperCAmelCase , **__UpperCAmelCase )
UpperCAmelCase__ = do_normalize
UpperCAmelCase__ = return_attention_mask
UpperCAmelCase__ = num_mel_bins
UpperCAmelCase__ = hop_length
UpperCAmelCase__ = win_length
UpperCAmelCase__ = win_function
UpperCAmelCase__ = frame_signal_scale
UpperCAmelCase__ = fmin
UpperCAmelCase__ = fmax
UpperCAmelCase__ = mel_floor
UpperCAmelCase__ = reduction_factor
UpperCAmelCase__ = win_length * sampling_rate // 1_0_0_0
UpperCAmelCase__ = hop_length * sampling_rate // 1_0_0_0
UpperCAmelCase__ = optimal_fft_length(self.sample_size )
UpperCAmelCase__ = (self.n_fft // 2) + 1
UpperCAmelCase__ = window_function(window_length=self.sample_size , name=self.win_function , periodic=__UpperCAmelCase )
UpperCAmelCase__ = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.num_mel_bins , min_frequency=self.fmin , max_frequency=self.fmax , sampling_rate=self.sampling_rate , norm="slaney" , mel_scale="slaney" , )
if frame_signal_scale != 1.0:
warnings.warn(
"The argument `frame_signal_scale` is deprecated and will be removed in version 4.30.0 of Transformers" , __UpperCAmelCase , )
if reduction_factor != 2.0:
warnings.warn(
"The argument `reduction_factor` is deprecated and will be removed in version 4.30.0 of Transformers" , __UpperCAmelCase , )
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def lowercase_ (__UpperCAmelCase : List[np.ndarray] , __UpperCAmelCase : List[np.ndarray] , __UpperCAmelCase : float = 0.0 ) -> List[np.ndarray]:
"""simple docstring"""
if attention_mask is not None:
UpperCAmelCase__ = np.array(__UpperCAmelCase , np.intaa )
UpperCAmelCase__ = []
for vector, length in zip(__UpperCAmelCase , attention_mask.sum(-1 ) ):
UpperCAmelCase__ = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 )
if length < normed_slice.shape[0]:
UpperCAmelCase__ = padding_value
normed_input_values.append(__UpperCAmelCase )
else:
UpperCAmelCase__ = [(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values]
return normed_input_values
def lowercase_ (self : Optional[int] , __UpperCAmelCase : np.ndarray , ) -> np.ndarray:
"""simple docstring"""
UpperCAmelCase__ = spectrogram(
__UpperCAmelCase , window=self.window , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , mel_filters=self.mel_filters , mel_floor=self.mel_floor , log_mel="log10" , )
return log_mel_spec.T
def __call__(self : Any , __UpperCAmelCase : Optional[Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]]] = None , __UpperCAmelCase : Optional[Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]]] = None , __UpperCAmelCase : Union[bool, str, PaddingStrategy] = False , __UpperCAmelCase : Optional[int] = None , __UpperCAmelCase : bool = False , __UpperCAmelCase : Optional[int] = None , __UpperCAmelCase : Optional[bool] = None , __UpperCAmelCase : Optional[Union[str, TensorType]] = None , __UpperCAmelCase : Optional[int] = None , **__UpperCAmelCase : str , ) -> BatchFeature:
"""simple docstring"""
if audio is None and audio_target is None:
raise ValueError("You must provide either `audio` or `audio_target` values." )
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"""The model corresponding to this feature extractor: {self} was trained using a sampling rate of"""
f""" {self.sampling_rate}. Please make sure that the provided audio input was sampled with"""
f""" {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
"It is strongly recommended to pass the ``sampling_rate`` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
if audio is not None:
UpperCAmelCase__ = self._process_audio(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase , )
else:
UpperCAmelCase__ = None
if audio_target is not None:
UpperCAmelCase__ = self._process_audio(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase , )
if inputs is None:
return inputs_target
else:
UpperCAmelCase__ = inputs_target["input_values"]
UpperCAmelCase__ = inputs_target.get("attention_mask" )
if decoder_attention_mask is not None:
UpperCAmelCase__ = decoder_attention_mask
return inputs
def lowercase_ (self : Optional[int] , __UpperCAmelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , __UpperCAmelCase : bool = False , __UpperCAmelCase : Union[bool, str, PaddingStrategy] = False , __UpperCAmelCase : Optional[int] = None , __UpperCAmelCase : bool = False , __UpperCAmelCase : Optional[int] = None , __UpperCAmelCase : Optional[bool] = None , __UpperCAmelCase : Optional[Union[str, TensorType]] = None , **__UpperCAmelCase : Any , ) -> BatchFeature:
"""simple docstring"""
UpperCAmelCase__ = isinstance(__UpperCAmelCase , np.ndarray ) and len(speech.shape ) > 1
if is_batched_numpy and len(speech.shape ) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" )
UpperCAmelCase__ = is_batched_numpy or (
isinstance(__UpperCAmelCase , (list, tuple) ) and (isinstance(speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
UpperCAmelCase__ = [np.asarray(__UpperCAmelCase , dtype=np.floataa ) for speech in speech]
elif not is_batched and not isinstance(__UpperCAmelCase , np.ndarray ):
UpperCAmelCase__ = np.asarray(__UpperCAmelCase , dtype=np.floataa )
elif isinstance(__UpperCAmelCase , np.ndarray ) and speech.dtype is np.dtype(np.floataa ):
UpperCAmelCase__ = speech.astype(np.floataa )
# always return batch
if not is_batched:
UpperCAmelCase__ = [speech]
# needed to make pad() work on spectrogram inputs
UpperCAmelCase__ = self.feature_size
# convert into correct format for padding
if is_target:
UpperCAmelCase__ = [self._extract_mel_features(__UpperCAmelCase ) for waveform in speech]
UpperCAmelCase__ = BatchFeature({"input_values": features} )
UpperCAmelCase__ = self.num_mel_bins
else:
UpperCAmelCase__ = BatchFeature({"input_values": speech} )
UpperCAmelCase__ = self.pad(
__UpperCAmelCase , padding=__UpperCAmelCase , max_length=__UpperCAmelCase , truncation=__UpperCAmelCase , pad_to_multiple_of=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , **__UpperCAmelCase , )
UpperCAmelCase__ = feature_size_hack
# convert input values to correct format
UpperCAmelCase__ = padded_inputs["input_values"]
if not isinstance(input_values[0] , np.ndarray ):
UpperCAmelCase__ = [np.asarray(__UpperCAmelCase , dtype=np.floataa ) for array in input_values]
elif (
not isinstance(__UpperCAmelCase , np.ndarray )
and isinstance(input_values[0] , np.ndarray )
and input_values[0].dtype is np.dtype(np.floataa )
):
UpperCAmelCase__ = [array.astype(np.floataa ) for array in input_values]
elif isinstance(__UpperCAmelCase , np.ndarray ) and input_values.dtype is np.dtype(np.floataa ):
UpperCAmelCase__ = input_values.astype(np.floataa )
# convert attention_mask to correct format
UpperCAmelCase__ = padded_inputs.get("attention_mask" )
if attention_mask is not None:
UpperCAmelCase__ = [np.asarray(__UpperCAmelCase , dtype=np.intaa ) for array in attention_mask]
# zero-mean and unit-variance normalization
if not is_target and self.do_normalize:
UpperCAmelCase__ = (
attention_mask
if self._get_padding_strategies(__UpperCAmelCase , max_length=__UpperCAmelCase ) is not PaddingStrategy.DO_NOT_PAD
else None
)
UpperCAmelCase__ = self.zero_mean_unit_var_norm(
padded_inputs["input_values"] , attention_mask=__UpperCAmelCase , padding_value=self.padding_value )
if return_tensors is not None:
UpperCAmelCase__ = padded_inputs.convert_to_tensors(__UpperCAmelCase )
return padded_inputs
def lowercase_ (self : Tuple ) -> Dict[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = super().to_dict()
# Don't serialize these as they are derived from the other properties.
UpperCAmelCase__ = ["window", "mel_filters", "sample_size", "sample_stride", "n_fft", "n_freqs"]
for name in names:
if name in output:
del output[name]
return output
| 65
|
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self ) -> Tuple:
'''simple docstring'''
__lowerCamelCase = {
'task_specific_params': {
'summarization': {'length_penalty': 1.0, 'max_length': 128, 'min_length': 12, 'num_beams': 4},
'summarization_cnn': {'length_penalty': 2.0, 'max_length': 142, 'min_length': 56, 'num_beams': 4},
'summarization_xsum': {'length_penalty': 1.0, 'max_length': 62, 'min_length': 11, 'num_beams': 6},
}
}
__lowerCamelCase = {
'task_specific_params.summarization.length_penalty': 1.0,
'task_specific_params.summarization.max_length': 128,
'task_specific_params.summarization.min_length': 12,
'task_specific_params.summarization.num_beams': 4,
'task_specific_params.summarization_cnn.length_penalty': 2.0,
'task_specific_params.summarization_cnn.max_length': 142,
'task_specific_params.summarization_cnn.min_length': 56,
'task_specific_params.summarization_cnn.num_beams': 4,
'task_specific_params.summarization_xsum.length_penalty': 1.0,
'task_specific_params.summarization_xsum.max_length': 62,
'task_specific_params.summarization_xsum.min_length': 11,
'task_specific_params.summarization_xsum.num_beams': 6,
}
self.assertEqual(flatten_dict(lowerCamelCase__ ) , lowerCamelCase__ )
def lowercase_ ( self ) -> Tuple:
'''simple docstring'''
__lowerCamelCase = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(transpose(lowerCamelCase__ ) , x.transpose() ) )
__lowerCamelCase = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(transpose(lowerCamelCase__ , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) )
@require_torch
def lowercase_ ( self ) -> int:
'''simple docstring'''
__lowerCamelCase = np.random.randn(3 , 4 )
__lowerCamelCase = torch.tensor(lowerCamelCase__ )
self.assertTrue(np.allclose(transpose(lowerCamelCase__ ) , transpose(lowerCamelCase__ ).numpy() ) )
__lowerCamelCase = np.random.randn(3 , 4 , 5 )
__lowerCamelCase = torch.tensor(lowerCamelCase__ )
self.assertTrue(np.allclose(transpose(lowerCamelCase__ , axes=(1, 2, 0) ) , transpose(lowerCamelCase__ , axes=(1, 2, 0) ).numpy() ) )
@require_tf
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = np.random.randn(3 , 4 )
__lowerCamelCase = tf.constant(lowerCamelCase__ )
self.assertTrue(np.allclose(transpose(lowerCamelCase__ ) , transpose(lowerCamelCase__ ).numpy() ) )
__lowerCamelCase = np.random.randn(3 , 4 , 5 )
__lowerCamelCase = tf.constant(lowerCamelCase__ )
self.assertTrue(np.allclose(transpose(lowerCamelCase__ , axes=(1, 2, 0) ) , transpose(lowerCamelCase__ , axes=(1, 2, 0) ).numpy() ) )
@require_flax
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
__lowerCamelCase = np.random.randn(3 , 4 )
__lowerCamelCase = jnp.array(lowerCamelCase__ )
self.assertTrue(np.allclose(transpose(lowerCamelCase__ ) , np.asarray(transpose(lowerCamelCase__ ) ) ) )
__lowerCamelCase = np.random.randn(3 , 4 , 5 )
__lowerCamelCase = jnp.array(lowerCamelCase__ )
self.assertTrue(np.allclose(transpose(lowerCamelCase__ , axes=(1, 2, 0) ) , np.asarray(transpose(lowerCamelCase__ , axes=(1, 2, 0) ) ) ) )
def lowercase_ ( self ) -> int:
'''simple docstring'''
__lowerCamelCase = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(reshape(lowerCamelCase__ , (4, 3) ) , np.reshape(lowerCamelCase__ , (4, 3) ) ) )
__lowerCamelCase = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(reshape(lowerCamelCase__ , (12, 5) ) , np.reshape(lowerCamelCase__ , (12, 5) ) ) )
@require_torch
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = np.random.randn(3 , 4 )
__lowerCamelCase = torch.tensor(lowerCamelCase__ )
self.assertTrue(np.allclose(reshape(lowerCamelCase__ , (4, 3) ) , reshape(lowerCamelCase__ , (4, 3) ).numpy() ) )
__lowerCamelCase = np.random.randn(3 , 4 , 5 )
__lowerCamelCase = torch.tensor(lowerCamelCase__ )
self.assertTrue(np.allclose(reshape(lowerCamelCase__ , (12, 5) ) , reshape(lowerCamelCase__ , (12, 5) ).numpy() ) )
@require_tf
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
__lowerCamelCase = np.random.randn(3 , 4 )
__lowerCamelCase = tf.constant(lowerCamelCase__ )
self.assertTrue(np.allclose(reshape(lowerCamelCase__ , (4, 3) ) , reshape(lowerCamelCase__ , (4, 3) ).numpy() ) )
__lowerCamelCase = np.random.randn(3 , 4 , 5 )
__lowerCamelCase = tf.constant(lowerCamelCase__ )
self.assertTrue(np.allclose(reshape(lowerCamelCase__ , (12, 5) ) , reshape(lowerCamelCase__ , (12, 5) ).numpy() ) )
@require_flax
def lowercase_ ( self ) -> Tuple:
'''simple docstring'''
__lowerCamelCase = np.random.randn(3 , 4 )
__lowerCamelCase = jnp.array(lowerCamelCase__ )
self.assertTrue(np.allclose(reshape(lowerCamelCase__ , (4, 3) ) , np.asarray(reshape(lowerCamelCase__ , (4, 3) ) ) ) )
__lowerCamelCase = np.random.randn(3 , 4 , 5 )
__lowerCamelCase = jnp.array(lowerCamelCase__ )
self.assertTrue(np.allclose(reshape(lowerCamelCase__ , (12, 5) ) , np.asarray(reshape(lowerCamelCase__ , (12, 5) ) ) ) )
def lowercase_ ( self ) -> int:
'''simple docstring'''
__lowerCamelCase = np.random.randn(1 , 3 , 4 )
self.assertTrue(np.allclose(squeeze(lowerCamelCase__ ) , np.squeeze(lowerCamelCase__ ) ) )
__lowerCamelCase = np.random.randn(1 , 4 , 1 , 5 )
self.assertTrue(np.allclose(squeeze(lowerCamelCase__ , axis=2 ) , np.squeeze(lowerCamelCase__ , axis=2 ) ) )
@require_torch
def lowercase_ ( self ) -> int:
'''simple docstring'''
__lowerCamelCase = np.random.randn(1 , 3 , 4 )
__lowerCamelCase = torch.tensor(lowerCamelCase__ )
self.assertTrue(np.allclose(squeeze(lowerCamelCase__ ) , squeeze(lowerCamelCase__ ).numpy() ) )
__lowerCamelCase = np.random.randn(1 , 4 , 1 , 5 )
__lowerCamelCase = torch.tensor(lowerCamelCase__ )
self.assertTrue(np.allclose(squeeze(lowerCamelCase__ , axis=2 ) , squeeze(lowerCamelCase__ , axis=2 ).numpy() ) )
@require_tf
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = np.random.randn(1 , 3 , 4 )
__lowerCamelCase = tf.constant(lowerCamelCase__ )
self.assertTrue(np.allclose(squeeze(lowerCamelCase__ ) , squeeze(lowerCamelCase__ ).numpy() ) )
__lowerCamelCase = np.random.randn(1 , 4 , 1 , 5 )
__lowerCamelCase = tf.constant(lowerCamelCase__ )
self.assertTrue(np.allclose(squeeze(lowerCamelCase__ , axis=2 ) , squeeze(lowerCamelCase__ , axis=2 ).numpy() ) )
@require_flax
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
__lowerCamelCase = np.random.randn(1 , 3 , 4 )
__lowerCamelCase = jnp.array(lowerCamelCase__ )
self.assertTrue(np.allclose(squeeze(lowerCamelCase__ ) , np.asarray(squeeze(lowerCamelCase__ ) ) ) )
__lowerCamelCase = np.random.randn(1 , 4 , 1 , 5 )
__lowerCamelCase = jnp.array(lowerCamelCase__ )
self.assertTrue(np.allclose(squeeze(lowerCamelCase__ , axis=2 ) , np.asarray(squeeze(lowerCamelCase__ , axis=2 ) ) ) )
def lowercase_ ( self ) -> int:
'''simple docstring'''
__lowerCamelCase = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(expand_dims(lowerCamelCase__ , axis=1 ) , np.expand_dims(lowerCamelCase__ , axis=1 ) ) )
@require_torch
def lowercase_ ( self ) -> Any:
'''simple docstring'''
__lowerCamelCase = np.random.randn(3 , 4 )
__lowerCamelCase = torch.tensor(lowerCamelCase__ )
self.assertTrue(np.allclose(expand_dims(lowerCamelCase__ , axis=1 ) , expand_dims(lowerCamelCase__ , axis=1 ).numpy() ) )
@require_tf
def lowercase_ ( self ) -> Any:
'''simple docstring'''
__lowerCamelCase = np.random.randn(3 , 4 )
__lowerCamelCase = tf.constant(lowerCamelCase__ )
self.assertTrue(np.allclose(expand_dims(lowerCamelCase__ , axis=1 ) , expand_dims(lowerCamelCase__ , axis=1 ).numpy() ) )
@require_flax
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = np.random.randn(3 , 4 )
__lowerCamelCase = jnp.array(lowerCamelCase__ )
self.assertTrue(np.allclose(expand_dims(lowerCamelCase__ , axis=1 ) , np.asarray(expand_dims(lowerCamelCase__ , axis=1 ) ) ) )
| 90
| 0
|
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM import WavLM as WavLMOrig
from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig
from transformers import WavLMConfig, WavLMModel, logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn.grep_linear''': '''encoder.layers.*.attention.gru_rel_pos_linear''',
'''self_attn.relative_attention_bias''': '''encoder.layers.*.attention.rel_attn_embed''',
'''self_attn.grep_a''': '''encoder.layers.*.attention.gru_rel_pos_const''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''ctc_proj''',
'''mask_emb''': '''masked_spec_embed''',
}
lowerCAmelCase__ = [
'''ctc_proj''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
for attribute in key.split("." ):
UpperCamelCase = getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if weight_type is not None:
UpperCamelCase = getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).shape
else:
UpperCamelCase = hf_pointer.shape
assert hf_shape == value.shape, (
F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
F" {value.shape} for {full_name}"
)
if weight_type == "weight":
UpperCamelCase = value
elif weight_type == "weight_g":
UpperCamelCase = value
elif weight_type == "weight_v":
UpperCamelCase = value
elif weight_type == "bias":
UpperCamelCase = value
else:
UpperCamelCase = value
logger.info(F"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = []
UpperCamelCase = fairseq_model.state_dict()
UpperCamelCase = hf_model.feature_extractor
for name, value in fairseq_dict.items():
UpperCamelCase = False
if "conv_layers" in name:
load_conv_layer(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , hf_model.config.feat_extract_norm == "group" , )
UpperCamelCase = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
UpperCamelCase = True
if "*" in mapped_key:
UpperCamelCase = name.split(SCREAMING_SNAKE_CASE_ )[0].split("." )[-2]
UpperCamelCase = mapped_key.replace("*" , SCREAMING_SNAKE_CASE_ )
if "weight_g" in name:
UpperCamelCase = "weight_g"
elif "weight_v" in name:
UpperCamelCase = "weight_v"
elif "bias" in name and "relative_attention_bias" not in name:
UpperCamelCase = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
UpperCamelCase = "weight"
else:
UpperCamelCase = None
set_recursively(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
continue
if not is_used:
unused_weights.append(SCREAMING_SNAKE_CASE_ )
logger.warning(F"Unused weights: {unused_weights}" )
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = full_name.split("conv_layers." )[-1]
UpperCamelCase = name.split("." )
UpperCamelCase = int(items[0] )
UpperCamelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."
)
UpperCamelCase = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."
)
UpperCamelCase = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"
" found."
)
UpperCamelCase = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."
)
UpperCamelCase = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(SCREAMING_SNAKE_CASE_ )
@torch.no_grad()
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ):
"""simple docstring"""
UpperCamelCase = torch.load(SCREAMING_SNAKE_CASE_ )
UpperCamelCase = WavLMConfigOrig(checkpoint["cfg"] )
UpperCamelCase = WavLMOrig(SCREAMING_SNAKE_CASE_ )
model.load_state_dict(checkpoint["model"] )
model.eval()
if config_path is not None:
UpperCamelCase = WavLMConfig.from_pretrained(SCREAMING_SNAKE_CASE_ )
else:
UpperCamelCase = WavLMConfig()
UpperCamelCase = WavLMModel(SCREAMING_SNAKE_CASE_ )
recursively_load_weights(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
hf_wavlm.save_pretrained(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
lowerCAmelCase__ = parser.parse_args()
convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 366
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_download, hf_hub_url
from PIL import Image
from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = SwinConfig(
embed_dim=192 , depths=(2, 2, 18, 2) , num_heads=(6, 12, 24, 48) , window_size=12 , out_features=["stage2", "stage3", "stage4"] , )
UpperCamelCase = DetaConfig(
backbone_config=_SCREAMING_SNAKE_CASE , num_queries=900 , encoder_ffn_dim=2_048 , decoder_ffn_dim=2_048 , num_feature_levels=5 , assign_first_stage=_SCREAMING_SNAKE_CASE , with_box_refine=_SCREAMING_SNAKE_CASE , two_stage=_SCREAMING_SNAKE_CASE , )
# set labels
UpperCamelCase = "huggingface/label-files"
if "o365" in model_name:
UpperCamelCase = 366
UpperCamelCase = "object365-id2label.json"
else:
UpperCamelCase = 91
UpperCamelCase = "coco-detection-id2label.json"
UpperCamelCase = num_labels
UpperCamelCase = json.load(open(cached_download(hf_hub_url(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type="dataset" ) ) , "r" ) )
UpperCamelCase = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
UpperCamelCase = idalabel
UpperCamelCase = {v: k for k, v in idalabel.items()}
return config
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = []
# stem
# fmt: off
rename_keys.append(("backbone.0.body.patch_embed.proj.weight", "model.backbone.model.embeddings.patch_embeddings.projection.weight") )
rename_keys.append(("backbone.0.body.patch_embed.proj.bias", "model.backbone.model.embeddings.patch_embeddings.projection.bias") )
rename_keys.append(("backbone.0.body.patch_embed.norm.weight", "model.backbone.model.embeddings.norm.weight") )
rename_keys.append(("backbone.0.body.patch_embed.norm.bias", "model.backbone.model.embeddings.norm.bias") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.norm1.weight", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.norm1.bias", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.norm2.weight", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.norm2.bias", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias") )
if i < 3:
rename_keys.append((F"backbone.0.body.layers.{i}.downsample.reduction.weight", F"model.backbone.model.encoder.layers.{i}.downsample.reduction.weight") )
rename_keys.append((F"backbone.0.body.layers.{i}.downsample.norm.weight", F"model.backbone.model.encoder.layers.{i}.downsample.norm.weight") )
rename_keys.append((F"backbone.0.body.layers.{i}.downsample.norm.bias", F"model.backbone.model.encoder.layers.{i}.downsample.norm.bias") )
rename_keys.append(("backbone.0.body.norm1.weight", "model.backbone.model.hidden_states_norms.stage2.weight") )
rename_keys.append(("backbone.0.body.norm1.bias", "model.backbone.model.hidden_states_norms.stage2.bias") )
rename_keys.append(("backbone.0.body.norm2.weight", "model.backbone.model.hidden_states_norms.stage3.weight") )
rename_keys.append(("backbone.0.body.norm2.bias", "model.backbone.model.hidden_states_norms.stage3.bias") )
rename_keys.append(("backbone.0.body.norm3.weight", "model.backbone.model.hidden_states_norms.stage4.weight") )
rename_keys.append(("backbone.0.body.norm3.bias", "model.backbone.model.hidden_states_norms.stage4.bias") )
# transformer encoder
for i in range(config.encoder_layers ):
rename_keys.append((F"transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight", F"model.encoder.layers.{i}.self_attn.sampling_offsets.weight") )
rename_keys.append((F"transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias", F"model.encoder.layers.{i}.self_attn.sampling_offsets.bias") )
rename_keys.append((F"transformer.encoder.layers.{i}.self_attn.attention_weights.weight", F"model.encoder.layers.{i}.self_attn.attention_weights.weight") )
rename_keys.append((F"transformer.encoder.layers.{i}.self_attn.attention_weights.bias", F"model.encoder.layers.{i}.self_attn.attention_weights.bias") )
rename_keys.append((F"transformer.encoder.layers.{i}.self_attn.value_proj.weight", F"model.encoder.layers.{i}.self_attn.value_proj.weight") )
rename_keys.append((F"transformer.encoder.layers.{i}.self_attn.value_proj.bias", F"model.encoder.layers.{i}.self_attn.value_proj.bias") )
rename_keys.append((F"transformer.encoder.layers.{i}.self_attn.output_proj.weight", F"model.encoder.layers.{i}.self_attn.output_proj.weight") )
rename_keys.append((F"transformer.encoder.layers.{i}.self_attn.output_proj.bias", F"model.encoder.layers.{i}.self_attn.output_proj.bias") )
rename_keys.append((F"transformer.encoder.layers.{i}.norm1.weight", F"model.encoder.layers.{i}.self_attn_layer_norm.weight") )
rename_keys.append((F"transformer.encoder.layers.{i}.norm1.bias", F"model.encoder.layers.{i}.self_attn_layer_norm.bias") )
rename_keys.append((F"transformer.encoder.layers.{i}.linear1.weight", F"model.encoder.layers.{i}.fc1.weight") )
rename_keys.append((F"transformer.encoder.layers.{i}.linear1.bias", F"model.encoder.layers.{i}.fc1.bias") )
rename_keys.append((F"transformer.encoder.layers.{i}.linear2.weight", F"model.encoder.layers.{i}.fc2.weight") )
rename_keys.append((F"transformer.encoder.layers.{i}.linear2.bias", F"model.encoder.layers.{i}.fc2.bias") )
rename_keys.append((F"transformer.encoder.layers.{i}.norm2.weight", F"model.encoder.layers.{i}.final_layer_norm.weight") )
rename_keys.append((F"transformer.encoder.layers.{i}.norm2.bias", F"model.encoder.layers.{i}.final_layer_norm.bias") )
# transformer decoder
for i in range(config.decoder_layers ):
rename_keys.append((F"transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight", F"model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias", F"model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias") )
rename_keys.append((F"transformer.decoder.layers.{i}.cross_attn.attention_weights.weight", F"model.decoder.layers.{i}.encoder_attn.attention_weights.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.cross_attn.attention_weights.bias", F"model.decoder.layers.{i}.encoder_attn.attention_weights.bias") )
rename_keys.append((F"transformer.decoder.layers.{i}.cross_attn.value_proj.weight", F"model.decoder.layers.{i}.encoder_attn.value_proj.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.cross_attn.value_proj.bias", F"model.decoder.layers.{i}.encoder_attn.value_proj.bias") )
rename_keys.append((F"transformer.decoder.layers.{i}.cross_attn.output_proj.weight", F"model.decoder.layers.{i}.encoder_attn.output_proj.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.cross_attn.output_proj.bias", F"model.decoder.layers.{i}.encoder_attn.output_proj.bias") )
rename_keys.append((F"transformer.decoder.layers.{i}.norm1.weight", F"model.decoder.layers.{i}.encoder_attn_layer_norm.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.norm1.bias", F"model.decoder.layers.{i}.encoder_attn_layer_norm.bias") )
rename_keys.append((F"transformer.decoder.layers.{i}.self_attn.out_proj.weight", F"model.decoder.layers.{i}.self_attn.out_proj.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.self_attn.out_proj.bias", F"model.decoder.layers.{i}.self_attn.out_proj.bias") )
rename_keys.append((F"transformer.decoder.layers.{i}.norm2.weight", F"model.decoder.layers.{i}.self_attn_layer_norm.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.norm2.bias", F"model.decoder.layers.{i}.self_attn_layer_norm.bias") )
rename_keys.append((F"transformer.decoder.layers.{i}.linear1.weight", F"model.decoder.layers.{i}.fc1.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.linear1.bias", F"model.decoder.layers.{i}.fc1.bias") )
rename_keys.append((F"transformer.decoder.layers.{i}.linear2.weight", F"model.decoder.layers.{i}.fc2.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.linear2.bias", F"model.decoder.layers.{i}.fc2.bias") )
rename_keys.append((F"transformer.decoder.layers.{i}.norm3.weight", F"model.decoder.layers.{i}.final_layer_norm.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.norm3.bias", F"model.decoder.layers.{i}.final_layer_norm.bias") )
# fmt: on
return rename_keys
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = dct.pop(_SCREAMING_SNAKE_CASE )
UpperCamelCase = val
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
UpperCamelCase = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
UpperCamelCase = state_dict.pop(F"backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight" )
UpperCamelCase = state_dict.pop(F"backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase = in_proj_weight[:dim, :]
UpperCamelCase = in_proj_bias[: dim]
UpperCamelCase = in_proj_weight[
dim : dim * 2, :
]
UpperCamelCase = in_proj_bias[
dim : dim * 2
]
UpperCamelCase = in_proj_weight[
-dim :, :
]
UpperCamelCase = in_proj_bias[-dim :]
# fmt: on
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = config.d_model
for i in range(config.decoder_layers ):
# read in weights + bias of input projection layer of self-attention
UpperCamelCase = state_dict.pop(F"transformer.decoder.layers.{i}.self_attn.in_proj_weight" )
UpperCamelCase = state_dict.pop(F"transformer.decoder.layers.{i}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase = in_proj_weight[:hidden_size, :]
UpperCamelCase = in_proj_bias[:hidden_size]
UpperCamelCase = in_proj_weight[
hidden_size : hidden_size * 2, :
]
UpperCamelCase = in_proj_bias[hidden_size : hidden_size * 2]
UpperCamelCase = in_proj_weight[-hidden_size:, :]
UpperCamelCase = in_proj_bias[-hidden_size:]
def a__ ( ):
"""simple docstring"""
UpperCamelCase = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCamelCase = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw )
return im
@torch.no_grad()
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = get_deta_config(_SCREAMING_SNAKE_CASE )
# load original state dict
if model_name == "deta-swin-large":
UpperCamelCase = hf_hub_download(repo_id="nielsr/deta-checkpoints" , filename="adet_swin_ft.pth" )
elif model_name == "deta-swin-large-o365":
UpperCamelCase = hf_hub_download(repo_id="jozhang97/deta-swin-l-o365" , filename="deta_swin_pt_o365.pth" )
else:
raise ValueError(F"Model name {model_name} not supported" )
UpperCamelCase = torch.load(_SCREAMING_SNAKE_CASE , map_location="cpu" )["model"]
# original state dict
for name, param in state_dict.items():
print(_SCREAMING_SNAKE_CASE , param.shape )
# rename keys
UpperCamelCase = create_rename_keys(_SCREAMING_SNAKE_CASE )
for src, dest in rename_keys:
rename_key(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
read_in_swin_q_k_v(_SCREAMING_SNAKE_CASE , config.backbone_config )
read_in_decoder_q_k_v(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# fix some prefixes
for key in state_dict.copy().keys():
if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key:
UpperCamelCase = state_dict.pop(_SCREAMING_SNAKE_CASE )
UpperCamelCase = val
if "input_proj" in key:
UpperCamelCase = state_dict.pop(_SCREAMING_SNAKE_CASE )
UpperCamelCase = val
if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key:
UpperCamelCase = state_dict.pop(_SCREAMING_SNAKE_CASE )
UpperCamelCase = val
# finally, create HuggingFace model and load state dict
UpperCamelCase = DetaForObjectDetection(_SCREAMING_SNAKE_CASE )
model.load_state_dict(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase = "cuda" if torch.cuda.is_available() else "cpu"
model.to(_SCREAMING_SNAKE_CASE )
# load image processor
UpperCamelCase = DetaImageProcessor(format="coco_detection" )
# verify our conversion on image
UpperCamelCase = prepare_img()
UpperCamelCase = processor(images=_SCREAMING_SNAKE_CASE , return_tensors="pt" )
UpperCamelCase = encoding["pixel_values"]
UpperCamelCase = model(pixel_values.to(_SCREAMING_SNAKE_CASE ) )
# verify logits
print("Logits:" , outputs.logits[0, :3, :3] )
print("Boxes:" , outputs.pred_boxes[0, :3, :3] )
if model_name == "deta-swin-large":
UpperCamelCase = torch.tensor(
[[-7.63_08, -2.84_85, -5.37_37], [-7.20_37, -4.55_05, -4.80_27], [-7.29_43, -4.26_11, -4.66_17]] )
UpperCamelCase = torch.tensor([[0.49_87, 0.49_69, 0.99_99], [0.25_49, 0.54_98, 0.48_05], [0.54_98, 0.27_57, 0.05_69]] )
elif model_name == "deta-swin-large-o365":
UpperCamelCase = torch.tensor(
[[-8.01_22, -3.57_20, -4.97_17], [-8.15_47, -3.68_86, -4.63_89], [-7.66_10, -3.61_94, -5.01_34]] )
UpperCamelCase = torch.tensor([[0.25_23, 0.55_49, 0.48_81], [0.77_15, 0.41_49, 0.46_01], [0.55_03, 0.27_53, 0.05_75]] )
assert torch.allclose(outputs.logits[0, :3, :3] , expected_logits.to(_SCREAMING_SNAKE_CASE ) , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , expected_boxes.to(_SCREAMING_SNAKE_CASE ) , atol=1e-4 )
print("Everything ok!" )
if pytorch_dump_folder_path:
# Save model and processor
logger.info(F"Saving PyTorch model and processor to {pytorch_dump_folder_path}..." )
Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
processor.save_pretrained(_SCREAMING_SNAKE_CASE )
# Push to hub
if push_to_hub:
print("Pushing model and processor to hub..." )
model.push_to_hub(F"jozhang97/{model_name}" )
processor.push_to_hub(F"jozhang97/{model_name}" )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
type=str,
default='''deta-swin-large''',
choices=['''deta-swin-large''', '''deta-swin-large-o365'''],
help='''Name of the model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
help='''Path to the folder to output PyTorch model.''',
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
lowerCAmelCase__ = parser.parse_args()
convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 244
| 0
|
'''simple docstring'''
from scipy.stats import spearmanr
import datasets
__snake_case ="""\nThe Spearman rank-order correlation coefficient is a measure of the\nrelationship between two datasets. Like other correlation coefficients,\nthis one varies between -1 and +1 with 0 implying no correlation.\nPositive correlations imply that as data in dataset x increases, so\ndoes data in dataset y. Negative correlations imply that as x increases,\ny decreases. Correlations of -1 or +1 imply an exact monotonic relationship.\n\nUnlike the Pearson correlation, the Spearman correlation does not\nassume that both datasets are normally distributed.\n\nThe p-value roughly indicates the probability of an uncorrelated system\nproducing datasets that have a Spearman correlation at least as extreme\nas the one computed from these datasets. The p-values are not entirely\nreliable but are probably reasonable for datasets larger than 500 or so.\n"""
__snake_case ="""\nArgs:\n predictions (`List[float]`): Predicted labels, as returned by a model.\n references (`List[float]`): Ground truth labels.\n return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns\n only the spearmanr score. Defaults to `False`.\nReturns:\n spearmanr (`float`): Spearman correlation coefficient.\n p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.\nExamples:\n Example 1:\n >>> spearmanr_metric = datasets.load_metric(\"spearmanr\")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])\n >>> print(results)\n {'spearmanr': -0.7}\n\n Example 2:\n >>> spearmanr_metric = datasets.load_metric(\"spearmanr\")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],\n ... predictions=[10, 9, 2.5, 6, 4],\n ... return_pvalue=True)\n >>> print(results['spearmanr'])\n -0.7\n >>> print(round(results['spearmanr_pvalue'], 2))\n 0.19\n"""
__snake_case =R"""\\n@book{kokoska2000crc,\n title={CRC standard probability and statistics tables and formulae},\n author={Kokoska, Stephen and Zwillinger, Daniel},\n year={2000},\n publisher={Crc Press}\n}\n@article{2020SciPy-NMeth,\n author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\n title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\n journal = {Nature Methods},\n year = {2020},\n volume = {17},\n pages = {261--272},\n adsurl = {https://rdcu.be/b08Wh},\n doi = {10.1038/s41592-019-0686-2},\n}\n"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
def __UpperCAmelCase ( self : str ) -> Any:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('float' ),
'references': datasets.Value('float' ),
} ) , reference_urls=['https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html'] , )
def __UpperCAmelCase ( self : Dict , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[Any]=False ) -> Tuple:
lowerCAmelCase = spearmanr(a_ , a_ )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 4
|
import unittest
from transformers import load_tool
from transformers.utils import is_torch_available
if is_torch_available():
import torch
from transformers.testing_utils import require_torch
from .test_tools_common import ToolTesterMixin
@require_torch
class UpperCAmelCase__ ( unittest.TestCase ,__UpperCamelCase ):
'''simple docstring'''
def snake_case__ ( self : List[Any] ):
'''simple docstring'''
__UpperCAmelCase : Tuple = load_tool('''text-to-speech''' )
self.tool.setup()
def snake_case__ ( self : Union[str, Any] ):
'''simple docstring'''
torch.manual_seed(0 )
__UpperCAmelCase : List[Any] = self.tool('''hey''' )
__UpperCAmelCase : Union[str, Any] = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_0_0_5_9_6_6_6_6_8_8_3_2_1_1_5_8_2_9, -0.0_0_0_3_6_5_7_6_4_0_1_9_0_7_9_5_0_6_4, -0.0_0_0_1_3_4_3_9_5_0_2_7_9_9_8_8_3_4_8_5] ) , ) )
def snake_case__ ( self : Optional[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
__UpperCAmelCase : Optional[int] = self.tool('''hey''' )
__UpperCAmelCase : List[str] = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_0_0_5_9_6_6_6_6_8_8_3_2_1_1_5_8_2_9, -0.0_0_0_3_6_5_7_6_4_0_1_9_0_7_9_5_0_6_4, -0.0_0_0_1_3_4_3_9_5_0_2_7_9_9_8_8_3_4_8_5] ) , ) )
| 226
| 0
|
"""simple docstring"""
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def lowerCAmelCase_( lowercase_ : str , lowercase_ : List[str] ) -> Tuple:
assert isinstance(lowercase_ , lowercase_ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def lowerCAmelCase_( lowercase_ : str , lowercase_ : str , lowercase_ : Tuple ) -> List[str]:
_lowerCamelCase = tmp_path / '''cache'''
_lowerCamelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_lowerCamelCase = ParquetDatasetReader(lowercase_ , cache_dir=lowercase_ , keep_in_memory=lowercase_ ).read()
_check_parquet_dataset(lowercase_ , lowercase_ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def lowerCAmelCase_( lowercase_ : Tuple , lowercase_ : str , lowercase_ : Optional[int] ) -> List[str]:
_lowerCamelCase = tmp_path / '''cache'''
_lowerCamelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
_lowerCamelCase = features.copy() if features else default_expected_features
_lowerCamelCase = (
Features({feature: Value(lowercase_ ) for feature, dtype in features.items()} ) if features is not None else None
)
_lowerCamelCase = ParquetDatasetReader(lowercase_ , features=lowercase_ , cache_dir=lowercase_ ).read()
_check_parquet_dataset(lowercase_ , lowercase_ )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def lowerCAmelCase_( lowercase_ : Optional[int] , lowercase_ : Dict , lowercase_ : List[str] ) -> List[Any]:
_lowerCamelCase = tmp_path / '''cache'''
_lowerCamelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
_lowerCamelCase = ParquetDatasetReader(lowercase_ , cache_dir=lowercase_ , split=lowercase_ ).read()
_check_parquet_dataset(lowercase_ , lowercase_ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('''path_type''' , [str, list] )
def lowerCAmelCase_( lowercase_ : int , lowercase_ : List[str] , lowercase_ : Optional[int] ) -> Any:
if issubclass(lowercase_ , lowercase_ ):
_lowerCamelCase = parquet_path
elif issubclass(lowercase_ , lowercase_ ):
_lowerCamelCase = [parquet_path]
_lowerCamelCase = tmp_path / '''cache'''
_lowerCamelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
_lowerCamelCase = ParquetDatasetReader(lowercase_ , cache_dir=lowercase_ ).read()
_check_parquet_dataset(lowercase_ , lowercase_ )
def lowerCAmelCase_( lowercase_ : List[Any] , lowercase_ : List[Any] , lowercase_ : List[str]=("train",) ) -> Dict:
assert isinstance(lowercase_ , lowercase_ )
for split in splits:
_lowerCamelCase = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def lowerCAmelCase_( lowercase_ : int , lowercase_ : str , lowercase_ : int ) -> Union[str, Any]:
_lowerCamelCase = tmp_path / '''cache'''
_lowerCamelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_lowerCamelCase = ParquetDatasetReader(
{'''train''': parquet_path} , cache_dir=lowercase_ , keep_in_memory=lowercase_ ).read()
_check_parquet_datasetdict(lowercase_ , lowercase_ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def lowerCAmelCase_( lowercase_ : List[str] , lowercase_ : List[Any] , lowercase_ : Tuple ) -> str:
_lowerCamelCase = tmp_path / '''cache'''
_lowerCamelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
_lowerCamelCase = features.copy() if features else default_expected_features
_lowerCamelCase = (
Features({feature: Value(lowercase_ ) for feature, dtype in features.items()} ) if features is not None else None
)
_lowerCamelCase = ParquetDatasetReader({'''train''': parquet_path} , features=lowercase_ , cache_dir=lowercase_ ).read()
_check_parquet_datasetdict(lowercase_ , lowercase_ )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def lowerCAmelCase_( lowercase_ : List[str] , lowercase_ : Union[str, Any] , lowercase_ : Dict ) -> Any:
if split:
_lowerCamelCase = {split: parquet_path}
else:
_lowerCamelCase = '''train'''
_lowerCamelCase = {'''train''': parquet_path, '''test''': parquet_path}
_lowerCamelCase = tmp_path / '''cache'''
_lowerCamelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
_lowerCamelCase = ParquetDatasetReader(lowercase_ , cache_dir=lowercase_ ).read()
_check_parquet_datasetdict(lowercase_ , lowercase_ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def lowerCAmelCase_( lowercase_ : Union[str, Any] , lowercase_ : Optional[Any] ) -> Any:
_lowerCamelCase = ParquetDatasetWriter(lowercase_ , tmp_path / '''foo.parquet''' )
assert writer.write() > 0
_lowerCamelCase = pq.ParquetFile(tmp_path / '''foo.parquet''' )
_lowerCamelCase = pf.read()
assert dataset.data.table == output_table
def lowerCAmelCase_( lowercase_ : List[Any] , lowercase_ : Optional[int] ) -> Union[str, Any]:
_lowerCamelCase = str(shared_datadir / '''test_image_rgb.jpg''' )
_lowerCamelCase = {'''image''': [image_path]}
_lowerCamelCase = Features({'''image''': Image()} )
_lowerCamelCase = Dataset.from_dict(lowercase_ , features=lowercase_ )
_lowerCamelCase = ParquetDatasetWriter(lowercase_ , tmp_path / '''foo.parquet''' )
assert writer.write() > 0
_lowerCamelCase = Dataset.from_parquet(str(tmp_path / '''foo.parquet''' ) )
assert dataset.features == reloaded_dataset.features
_lowerCamelCase = ParquetDatasetReader(str(tmp_path / '''foo.parquet''' ) , streaming=lowercase_ ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
'''feature, expected''' , [
(Features({'''foo''': Value('''int32''' )} ), None),
(Features({'''image''': Image(), '''foo''': Value('''int32''' )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({'''nested''': Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def lowerCAmelCase_( lowercase_ : List[str] , lowercase_ : Any ) -> Tuple:
assert get_writer_batch_size(lowercase_ ) == expected
| 73
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__SCREAMING_SNAKE_CASE : Dict = {
'''configuration_mega''': ['''MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MegaConfig''', '''MegaOnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Union[str, Any] = [
'''MEGA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MegaForCausalLM''',
'''MegaForMaskedLM''',
'''MegaForMultipleChoice''',
'''MegaForQuestionAnswering''',
'''MegaForSequenceClassification''',
'''MegaForTokenClassification''',
'''MegaModel''',
'''MegaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 73
| 1
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
__UpperCAmelCase = [
'''EAGER''',
'''AOT_EAGER''',
'''INDUCTOR''',
'''NVFUSER''',
'''AOT_NVFUSER''',
'''AOT_CUDAGRAPHS''',
'''OFI''',
'''FX2TRT''',
'''ONNXRT''',
'''IPEX''',
]
def UpperCamelCase ( snake_case__ : Optional[Any] , snake_case__ : Any=None , snake_case__ : int=None , snake_case__ : List[str]=None ) -> Union[str, Any]:
UpperCamelCase : str = True
while ask_again:
UpperCamelCase : Any = input(snake_case__ )
try:
if default is not None and len(snake_case__ ) == 0:
return default
return convert_value(snake_case__ ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(snake_case__ )
def UpperCamelCase ( snake_case__ : Tuple , snake_case__ : int=[] , snake_case__ : Optional[Any]=None , snake_case__ : str=0 ) -> Any:
UpperCamelCase : Dict = BulletMenu(snake_case__ , snake_case__ )
UpperCamelCase : List[Any] = menu.run(default_choice=snake_case__ )
return convert_value(snake_case__ ) if convert_value is not None else result
def UpperCamelCase ( snake_case__ : Tuple ) -> Dict:
UpperCamelCase : Any = int(snake_case__ )
return ComputeEnvironment(['LOCAL_MACHINE', 'AMAZON_SAGEMAKER'][value] )
def UpperCamelCase ( snake_case__ : Dict ) -> List[str]:
UpperCamelCase : Dict = int(snake_case__ )
return DistributedType(['NO', 'MULTI_CPU', 'MULTI_XPU', 'MULTI_GPU', 'MULTI_NPU', 'TPU'][value] )
def UpperCamelCase ( snake_case__ : Tuple ) -> List[Any]:
UpperCamelCase : Optional[Any] = int(snake_case__ )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def UpperCamelCase ( snake_case__ : List[str] ) -> Tuple:
UpperCamelCase : Any = int(snake_case__ )
return PrecisionType(['no', 'fp16', 'bf16', 'fp8'][value] )
def UpperCamelCase ( snake_case__ : Dict ) -> List[str]:
UpperCamelCase : Union[str, Any] = int(snake_case__ )
return SageMakerDistributedType(['NO', 'DATA_PARALLEL', 'MODEL_PARALLEL'][value] )
def UpperCamelCase ( snake_case__ : Optional[int] ) -> Union[str, Any]:
return {"yes": True, "no": False}[value.lower()]
class lowerCAmelCase_ ( argparse.RawDescriptionHelpFormatter ):
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> List[str]:
UpperCamelCase : Dict = super()._format_usage(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Tuple = usage.replace('<command> [<args>] ', '' )
return usage
| 119
|
import inspect
import unittest
from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel
from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCAmelCase_ :
def __init__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=13, SCREAMING_SNAKE_CASE_=32, SCREAMING_SNAKE_CASE_=3, SCREAMING_SNAKE_CASE_=4, SCREAMING_SNAKE_CASE_=[10, 20, 30, 40], SCREAMING_SNAKE_CASE_=[2, 2, 3, 2], SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=37, SCREAMING_SNAKE_CASE_="gelu", SCREAMING_SNAKE_CASE_=10, SCREAMING_SNAKE_CASE_=0.02, SCREAMING_SNAKE_CASE_=["stage2", "stage3", "stage4"], SCREAMING_SNAKE_CASE_=[2, 3, 4], SCREAMING_SNAKE_CASE_=None, ) -> Optional[int]:
UpperCamelCase : Dict = parent
UpperCamelCase : Optional[Any] = batch_size
UpperCamelCase : Union[str, Any] = image_size
UpperCamelCase : Union[str, Any] = num_channels
UpperCamelCase : List[Any] = num_stages
UpperCamelCase : Any = hidden_sizes
UpperCamelCase : Optional[int] = depths
UpperCamelCase : Optional[int] = is_training
UpperCamelCase : List[str] = use_labels
UpperCamelCase : Dict = intermediate_size
UpperCamelCase : List[Any] = hidden_act
UpperCamelCase : Union[str, Any] = num_labels
UpperCamelCase : str = initializer_range
UpperCamelCase : List[str] = out_features
UpperCamelCase : List[str] = out_indices
UpperCamelCase : str = scope
def snake_case_ ( self ) -> Optional[Any]:
UpperCamelCase : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase : Any = None
if self.use_labels:
UpperCamelCase : List[Any] = ids_tensor([self.batch_size], self.num_labels )
UpperCamelCase : List[Any] = self.get_config()
return config, pixel_values, labels
def snake_case_ ( self ) -> Any:
return ConvNextConfig(
num_channels=self.num_channels, hidden_sizes=self.hidden_sizes, depths=self.depths, num_stages=self.num_stages, hidden_act=self.hidden_act, is_decoder=SCREAMING_SNAKE_CASE_, initializer_range=self.initializer_range, out_features=self.out_features, out_indices=self.out_indices, num_labels=self.num_labels, )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Tuple:
UpperCamelCase : Dict = ConvNextModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase : str = model(SCREAMING_SNAKE_CASE_ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32), )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
UpperCamelCase : Optional[int] = ConvNextForImageClassification(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase : Dict = model(SCREAMING_SNAKE_CASE_, labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Dict:
UpperCamelCase : int = ConvNextBackbone(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase : Any = model(SCREAMING_SNAKE_CASE_ )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ), len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ), [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ), len(config.out_features ) )
self.parent.assertListEqual(model.channels, config.hidden_sizes[1:] )
# verify backbone works with out_features=None
UpperCamelCase : Any = None
UpperCamelCase : List[str] = ConvNextBackbone(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase : Tuple = model(SCREAMING_SNAKE_CASE_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ), 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ), [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ), 1 )
self.parent.assertListEqual(model.channels, [config.hidden_sizes[-1]] )
def snake_case_ ( self ) -> int:
UpperCamelCase : Optional[int] = self.prepare_config_and_inputs()
UpperCamelCase , UpperCamelCase , UpperCamelCase : Optional[Any] = config_and_inputs
UpperCamelCase : str = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( a__ , a__ , unittest.TestCase ):
UpperCAmelCase__ : List[Any] = (
(
ConvNextModel,
ConvNextForImageClassification,
ConvNextBackbone,
)
if is_torch_available()
else ()
)
UpperCAmelCase__ : Union[str, Any] = (
{"feature-extraction": ConvNextModel, "image-classification": ConvNextForImageClassification}
if is_torch_available()
else {}
)
UpperCAmelCase__ : Dict = True
UpperCAmelCase__ : List[str] = False
UpperCAmelCase__ : Tuple = False
UpperCAmelCase__ : List[Any] = False
UpperCAmelCase__ : Tuple = False
def snake_case_ ( self ) -> Optional[Any]:
UpperCamelCase : Dict = ConvNextModelTester(self )
UpperCamelCase : int = ConfigTester(self, config_class=SCREAMING_SNAKE_CASE_, has_text_modality=SCREAMING_SNAKE_CASE_, hidden_size=37 )
def snake_case_ ( self ) -> List[str]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def snake_case_ ( self ) -> Union[str, Any]:
return
@unittest.skip(reason='ConvNext does not use inputs_embeds' )
def snake_case_ ( self ) -> List[str]:
pass
@unittest.skip(reason='ConvNext does not support input and output embeddings' )
def snake_case_ ( self ) -> str:
pass
@unittest.skip(reason='ConvNext does not use feedforward chunking' )
def snake_case_ ( self ) -> str:
pass
def snake_case_ ( self ) -> Any:
UpperCamelCase , UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase : Optional[Any] = model_class(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase : List[Any] = [*signature.parameters.keys()]
UpperCamelCase : Union[str, Any] = ['pixel_values']
self.assertListEqual(arg_names[:1], SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ) -> List[str]:
UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ) -> Dict:
UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ) -> Dict:
def check_hidden_states_output(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : List[Any] = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
with torch.no_grad():
UpperCamelCase : List[Any] = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) )
UpperCamelCase : List[str] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCamelCase : int = self.model_tester.num_stages
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ), expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ), [self.model_tester.image_size // 4, self.model_tester.image_size // 4], )
UpperCamelCase , UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase : Optional[int] = True
check_hidden_states_output(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase : str = True
check_hidden_states_output(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ) -> List[str]:
UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE_ )
@slow
def snake_case_ ( self ) -> Tuple:
for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase : Union[str, Any] = ConvNextModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
def UpperCamelCase ( ) -> int:
UpperCamelCase : Optional[int] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class lowerCAmelCase_ ( unittest.TestCase ):
@cached_property
def snake_case_ ( self ) -> Optional[int]:
return AutoImageProcessor.from_pretrained('facebook/convnext-tiny-224' ) if is_vision_available() else None
@slow
def snake_case_ ( self ) -> str:
UpperCamelCase : Dict = ConvNextForImageClassification.from_pretrained('facebook/convnext-tiny-224' ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = self.default_image_processor
UpperCamelCase : Any = prepare_img()
UpperCamelCase : List[str] = image_processor(images=SCREAMING_SNAKE_CASE_, return_tensors='pt' ).to(SCREAMING_SNAKE_CASE_ )
# forward pass
with torch.no_grad():
UpperCamelCase : Optional[Any] = model(**SCREAMING_SNAKE_CASE_ )
# verify the logits
UpperCamelCase : List[Any] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape, SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = torch.tensor([-0.02_60, -0.47_39, 0.19_11] ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3], SCREAMING_SNAKE_CASE_, atol=1e-4 ) )
@require_torch
class lowerCAmelCase_ ( unittest.TestCase , a__ ):
UpperCAmelCase__ : Tuple = (ConvNextBackbone,) if is_torch_available() else ()
UpperCAmelCase__ : List[str] = ConvNextConfig
UpperCAmelCase__ : Tuple = False
def snake_case_ ( self ) -> int:
UpperCamelCase : List[Any] = ConvNextModelTester(self )
| 119
| 1
|
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
__snake_case = '''\
'''
__snake_case = '''
Perplexity (PPL) is one of the most common metrics for evaluating language models.
It is defined as the exponentiated average negative log-likelihood of a sequence.
For more information, see https://huggingface.co/docs/transformers/perplexity
'''
__snake_case = '''
Args:
model_id (str): model used for calculating Perplexity
NOTE: Perplexity can only be calculated for causal language models.
This includes models such as gpt2, causal variations of bert,
causal versions of t5, and more (the full list can be found
in the AutoModelForCausalLM documentation here:
https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )
input_texts (list of str): input text, each separate text snippet
is one list entry.
batch_size (int): the batch size to run texts through the model. Defaults to 16.
add_start_token (bool): whether to add the start token to the texts,
so the perplexity can include the probability of the first word. Defaults to True.
device (str): device to run on, defaults to \'cuda\' when available
Returns:
perplexity: dictionary containing the perplexity scores for the texts
in the input list, as well as the mean perplexity. If one of the input texts is
longer than the max input length of the model, then it is truncated to the
max length for the perplexity computation.
Examples:
Example 1:
>>> perplexity = datasets.load_metric("perplexity")
>>> input_texts = ["lorem ipsum", "Happy Birthday!", "Bienvenue"]
>>> results = perplexity.compute(model_id=\'gpt2\',
... add_start_token=False,
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
[\'perplexities\', \'mean_perplexity\']
>>> print(round(results["mean_perplexity"], 2))
78.22
>>> print(round(results["perplexities"][0], 2))
11.11
Example 2:
>>> perplexity = datasets.load_metric("perplexity")
>>> input_texts = datasets.load_dataset("wikitext",
... "wikitext-2-raw-v1",
... split="test")["text"][:50] # doctest:+ELLIPSIS
[...]
>>> input_texts = [s for s in input_texts if s!=\'\']
>>> results = perplexity.compute(model_id=\'gpt2\',
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
[\'perplexities\', \'mean_perplexity\']
>>> print(round(results["mean_perplexity"], 2))
60.35
>>> print(round(results["perplexities"][0], 2))
81.12
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __snake_case ( datasets.Metric ):
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''input_texts''': datasets.Value('''string''' ),
} ) , reference_urls=['''https://huggingface.co/docs/transformers/perplexity'''] , )
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ , snake_case__ = 16 , snake_case__ = True , snake_case__=None ) -> List[str]:
'''simple docstring'''
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
UpperCAmelCase : List[str] ='''cuda'''
else:
UpperCAmelCase : Dict ='''cuda''' if torch.cuda.is_available() else '''cpu'''
UpperCAmelCase : Any =AutoModelForCausalLM.from_pretrained(snake_case__ )
UpperCAmelCase : Union[str, Any] =model.to(snake_case__ )
UpperCAmelCase : int =AutoTokenizer.from_pretrained(snake_case__ )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
UpperCAmelCase : Dict =list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(snake_case__ ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({'''pad_token''': existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
UpperCAmelCase : Tuple =model.config.max_length - 1
else:
UpperCAmelCase : List[Any] =model.config.max_length
UpperCAmelCase : List[str] =tokenizer(
snake_case__ , add_special_tokens=snake_case__ , padding=snake_case__ , truncation=snake_case__ , max_length=snake_case__ , return_tensors='''pt''' , return_attention_mask=snake_case__ , ).to(snake_case__ )
UpperCAmelCase : List[Any] =encodings['''input_ids''']
UpperCAmelCase : List[str] =encodings['''attention_mask''']
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
UpperCAmelCase : List[Any] =[]
UpperCAmelCase : List[str] =CrossEntropyLoss(reduction='''none''' )
for start_index in logging.tqdm(range(0 , len(snake_case__ ) , snake_case__ ) ):
UpperCAmelCase : List[Any] =min(start_index + batch_size , len(snake_case__ ) )
UpperCAmelCase : Optional[int] =encoded_texts[start_index:end_index]
UpperCAmelCase : Optional[Any] =attn_masks[start_index:end_index]
if add_start_token:
UpperCAmelCase : List[str] =torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(snake_case__ )
UpperCAmelCase : str =torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 )
UpperCAmelCase : Optional[int] =torch.cat(
[torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(snake_case__ ), attn_mask] , dim=1 )
UpperCAmelCase : str =encoded_batch
with torch.no_grad():
UpperCAmelCase : List[str] =model(snake_case__ , attention_mask=snake_case__ ).logits
UpperCAmelCase : Optional[Any] =out_logits[..., :-1, :].contiguous()
UpperCAmelCase : Tuple =labels[..., 1:].contiguous()
UpperCAmelCase : List[str] =attn_mask[..., 1:].contiguous()
UpperCAmelCase : List[str] =torch.expa(
(loss_fct(shift_logits.transpose(1 , 2 ) , snake_case__ ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(snake_case__ )}
| 78
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__snake_case = {
'''configuration_llama''': ['''LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LlamaConfig'''],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ['''LlamaTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ['''LlamaTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''LlamaForCausalLM''',
'''LlamaModel''',
'''LlamaPreTrainedModel''',
'''LlamaForSequenceClassification''',
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
__snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 78
| 1
|
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {
'''microsoft/unispeech-large-1500h-cv''': (
'''https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json'''
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class A__ ( _snake_case ):
lowercase = "unispeech"
def __init__( self , UpperCamelCase__=32 , UpperCamelCase__=768 , UpperCamelCase__=12 , UpperCamelCase__=12 , UpperCamelCase__=3072 , UpperCamelCase__="gelu" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=0.0 , UpperCamelCase__=0.0 , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=0.02 , UpperCamelCase__=1e-5 , UpperCamelCase__="group" , UpperCamelCase__="gelu" , UpperCamelCase__=(512, 512, 512, 512, 512, 512, 512) , UpperCamelCase__=(5, 2, 2, 2, 2, 2, 2) , UpperCamelCase__=(10, 3, 3, 3, 3, 2, 2) , UpperCamelCase__=False , UpperCamelCase__=128 , UpperCamelCase__=16 , UpperCamelCase__=False , UpperCamelCase__=True , UpperCamelCase__=0.05 , UpperCamelCase__=10 , UpperCamelCase__=2 , UpperCamelCase__=0.0 , UpperCamelCase__=10 , UpperCamelCase__=0 , UpperCamelCase__=320 , UpperCamelCase__=2 , UpperCamelCase__=0.1 , UpperCamelCase__=100 , UpperCamelCase__=256 , UpperCamelCase__=256 , UpperCamelCase__=0.1 , UpperCamelCase__="mean" , UpperCamelCase__=False , UpperCamelCase__=False , UpperCamelCase__=256 , UpperCamelCase__=80 , UpperCamelCase__=0 , UpperCamelCase__=1 , UpperCamelCase__=2 , UpperCamelCase__=0.5 , **UpperCamelCase__ , ) -> List[Any]:
'''simple docstring'''
super().__init__(**UpperCamelCase__ , pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ )
A_ = hidden_size
A_ = feat_extract_norm
A_ = feat_extract_activation
A_ = list(UpperCamelCase__ )
A_ = list(UpperCamelCase__ )
A_ = list(UpperCamelCase__ )
A_ = conv_bias
A_ = num_conv_pos_embeddings
A_ = num_conv_pos_embedding_groups
A_ = len(self.conv_dim )
A_ = num_hidden_layers
A_ = intermediate_size
A_ = hidden_act
A_ = num_attention_heads
A_ = hidden_dropout
A_ = attention_dropout
A_ = activation_dropout
A_ = feat_proj_dropout
A_ = final_dropout
A_ = layerdrop
A_ = layer_norm_eps
A_ = initializer_range
A_ = num_ctc_classes
A_ = vocab_size
A_ = do_stable_layer_norm
A_ = use_weighted_layer_sum
A_ = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
f''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
f''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
A_ = apply_spec_augment
A_ = mask_time_prob
A_ = mask_time_length
A_ = mask_time_min_masks
A_ = mask_feature_prob
A_ = mask_feature_length
A_ = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
A_ = num_codevectors_per_group
A_ = num_codevector_groups
A_ = contrastive_logits_temperature
A_ = feat_quantizer_dropout
A_ = num_negatives
A_ = codevector_dim
A_ = proj_codevector_dim
A_ = diversity_loss_weight
# ctc loss
A_ = ctc_loss_reduction
A_ = ctc_zero_infinity
# pretraining loss
A_ = replace_prob
@property
def snake_case_ ( self ) -> Dict:
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 162
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCamelCase = {
'''configuration_clap''': [
'''CLAP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ClapAudioConfig''',
'''ClapConfig''',
'''ClapTextConfig''',
],
'''processing_clap''': ['''ClapProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
'''CLAP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ClapModel''',
'''ClapPreTrainedModel''',
'''ClapTextModel''',
'''ClapTextModelWithProjection''',
'''ClapAudioModel''',
'''ClapAudioModelWithProjection''',
]
__lowerCamelCase = ['''ClapFeatureExtractor''']
if TYPE_CHECKING:
from .configuration_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioConfig,
ClapConfig,
ClapTextConfig,
)
from .processing_clap import ClapProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clap import ClapFeatureExtractor
from .modeling_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioModel,
ClapAudioModelWithProjection,
ClapModel,
ClapPreTrainedModel,
ClapTextModel,
ClapTextModelWithProjection,
)
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 162
| 1
|
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_OBJECT_DETECTION_MAPPING,
AutoFeatureExtractor,
AutoModelForObjectDetection,
ObjectDetectionPipeline,
is_vision_available,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_pytesseract,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
@staticmethod
def lowerCamelCase_ ( *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : Tuple ):
"""simple docstring"""
pass
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = MODEL_FOR_OBJECT_DETECTION_MAPPING
def lowerCamelCase_ ( self : Dict , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : int ):
"""simple docstring"""
__UpperCAmelCase : List[str] = ObjectDetectionPipeline(model=UpperCAmelCase_ , image_processor=UpperCAmelCase_ )
return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"]
def lowerCamelCase_ ( self : Any , UpperCAmelCase_ : int , UpperCAmelCase_ : List[str] ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = object_detector("./tests/fixtures/tests_samples/COCO/000000039769.png" , threshold=0.0 )
self.assertGreater(len(UpperCAmelCase_ ) , 0 )
for detected_object in outputs:
self.assertEqual(
UpperCAmelCase_ , {
"score": ANY(UpperCAmelCase_ ),
"label": ANY(UpperCAmelCase_ ),
"box": {"xmin": ANY(UpperCAmelCase_ ), "ymin": ANY(UpperCAmelCase_ ), "xmax": ANY(UpperCAmelCase_ ), "ymax": ANY(UpperCAmelCase_ )},
} , )
import datasets
__UpperCAmelCase : List[str] = datasets.load_dataset("hf-internal-testing/fixtures_image_utils" , "image" , split="test" )
__UpperCAmelCase : Optional[int] = [
Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ),
"http://images.cocodataset.org/val2017/000000039769.jpg",
# RGBA
dataset[0]["file"],
# LA
dataset[1]["file"],
# L
dataset[2]["file"],
]
__UpperCAmelCase : int = object_detector(UpperCAmelCase_ , threshold=0.0 )
self.assertEqual(len(UpperCAmelCase_ ) , len(UpperCAmelCase_ ) )
for outputs in batch_outputs:
self.assertGreater(len(UpperCAmelCase_ ) , 0 )
for detected_object in outputs:
self.assertEqual(
UpperCAmelCase_ , {
"score": ANY(UpperCAmelCase_ ),
"label": ANY(UpperCAmelCase_ ),
"box": {"xmin": ANY(UpperCAmelCase_ ), "ymin": ANY(UpperCAmelCase_ ), "xmax": ANY(UpperCAmelCase_ ), "ymax": ANY(UpperCAmelCase_ )},
} , )
@require_tf
@unittest.skip("Object detection not implemented in TF" )
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
pass
@require_torch
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
__UpperCAmelCase : int = "hf-internal-testing/tiny-detr-mobilenetsv3"
__UpperCAmelCase : Dict = AutoModelForObjectDetection.from_pretrained(UpperCAmelCase_ )
__UpperCAmelCase : List[str] = AutoFeatureExtractor.from_pretrained(UpperCAmelCase_ )
__UpperCAmelCase : str = ObjectDetectionPipeline(model=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ )
__UpperCAmelCase : str = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" , threshold=0.0 )
self.assertEqual(
nested_simplify(UpperCAmelCase_ , decimals=4 ) , [
{"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
{"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
] , )
__UpperCAmelCase : Any = object_detector(
[
"http://images.cocodataset.org/val2017/000000039769.jpg",
"http://images.cocodataset.org/val2017/000000039769.jpg",
] , threshold=0.0 , )
self.assertEqual(
nested_simplify(UpperCAmelCase_ , decimals=4 ) , [
[
{"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
{"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
],
[
{"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
{"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
],
] , )
@require_torch
@slow
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = "facebook/detr-resnet-50"
__UpperCAmelCase : Tuple = AutoModelForObjectDetection.from_pretrained(UpperCAmelCase_ )
__UpperCAmelCase : Optional[Any] = AutoFeatureExtractor.from_pretrained(UpperCAmelCase_ )
__UpperCAmelCase : Tuple = ObjectDetectionPipeline(model=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ )
__UpperCAmelCase : str = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" )
self.assertEqual(
nested_simplify(UpperCAmelCase_ , decimals=4 ) , [
{"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
] , )
__UpperCAmelCase : List[Any] = object_detector(
[
"http://images.cocodataset.org/val2017/000000039769.jpg",
"http://images.cocodataset.org/val2017/000000039769.jpg",
] )
self.assertEqual(
nested_simplify(UpperCAmelCase_ , decimals=4 ) , [
[
{"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
],
[
{"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
],
] , )
@require_torch
@slow
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
__UpperCAmelCase : int = "facebook/detr-resnet-50"
__UpperCAmelCase : List[str] = pipeline("object-detection" , model=UpperCAmelCase_ )
__UpperCAmelCase : Optional[Any] = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" )
self.assertEqual(
nested_simplify(UpperCAmelCase_ , decimals=4 ) , [
{"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
] , )
__UpperCAmelCase : List[str] = object_detector(
[
"http://images.cocodataset.org/val2017/000000039769.jpg",
"http://images.cocodataset.org/val2017/000000039769.jpg",
] )
self.assertEqual(
nested_simplify(UpperCAmelCase_ , decimals=4 ) , [
[
{"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
],
[
{"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
],
] , )
@require_torch
@slow
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = 0.9985
__UpperCAmelCase : Optional[Any] = "facebook/detr-resnet-50"
__UpperCAmelCase : int = pipeline("object-detection" , model=UpperCAmelCase_ )
__UpperCAmelCase : str = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" , threshold=UpperCAmelCase_ )
self.assertEqual(
nested_simplify(UpperCAmelCase_ , decimals=4 ) , [
{"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
] , )
@require_torch
@require_pytesseract
@slow
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
__UpperCAmelCase : List[str] = "Narsil/layoutlmv3-finetuned-funsd"
__UpperCAmelCase : Dict = 0.9993
__UpperCAmelCase : List[str] = pipeline("object-detection" , model=UpperCAmelCase_ , threshold=UpperCAmelCase_ )
__UpperCAmelCase : Tuple = object_detector(
"https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png" )
self.assertEqual(
nested_simplify(UpperCAmelCase_ , decimals=4 ) , [
{"score": 0.9993, "label": "I-ANSWER", "box": {"xmin": 294, "ymin": 254, "xmax": 343, "ymax": 264}},
{"score": 0.9993, "label": "I-ANSWER", "box": {"xmin": 294, "ymin": 254, "xmax": 343, "ymax": 264}},
] , )
| 37
|
'''simple docstring'''
import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
lowerCAmelCase__ : Optional[Any] = {
"/attention/": "/0/SelfAttention/",
"/self_attention/": "/0/SelfAttention/",
"/encoder_decoder_attention/": "/1/EncDecAttention/",
"value": "v",
"query": "q",
"key": "k",
"out": "o",
"pre_self_attention_layer_norm": "0/layer_norm",
"pre_cross_attention_layer_norm": "1/layer_norm",
"pre_attention_layer_norm": "0/layer_norm", # previously 1, but seems wrong
"token_embedder": "shared",
"encoder_norm": "final_layer_norm",
"decoder_norm": "final_layer_norm",
"relpos_bias/rel_embedding": "block/0/layer/0/SelfAttention/relative_attention_bias/weight",
"router/router_weights/w/": "router/classifier/",
"roer/roer_weights/w/": "router/classifier/",
"logits_dense": "lm_head",
}
def __UpperCamelCase ( _UpperCAmelCase ):
# 1. in HF T5, we have block.{x}.layer.{y}. which corresponds to layer.{x} in
# the original model
__UpperCAmelCase : List[str] = list(s_dict.keys() )
for key in keys:
__UpperCAmelCase : int = R".*/layers_(\d+)"
__UpperCAmelCase : List[str] = key
if re.match(_UpperCAmelCase, _UpperCAmelCase ):
__UpperCAmelCase : Optional[int] = re.sub(R"layers_(\d+)", R"block/\1/layer", _UpperCAmelCase )
__UpperCAmelCase : Any = R"(encoder|decoder)\/"
if re.match(_UpperCAmelCase, _UpperCAmelCase ):
__UpperCAmelCase : List[Any] = re.match(_UpperCAmelCase, _UpperCAmelCase ).groups()
if groups[0] == "encoder":
__UpperCAmelCase : Optional[Any] = re.sub(R"/mlp/", R"/1/mlp/", _UpperCAmelCase )
__UpperCAmelCase : List[Any] = re.sub(R"/pre_mlp_layer_norm/", R"/1/layer_norm/", _UpperCAmelCase )
elif groups[0] == "decoder":
__UpperCAmelCase : List[Any] = re.sub(R"/mlp/", R"/2/mlp/", _UpperCAmelCase )
__UpperCAmelCase : Any = re.sub(R"/pre_mlp_layer_norm/", R"/2/layer_norm/", _UpperCAmelCase )
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
__UpperCAmelCase : List[str] = new_key.replace(_UpperCAmelCase, _UpperCAmelCase )
print(F"{key} -> {new_key}" )
__UpperCAmelCase : Any = s_dict.pop(_UpperCAmelCase )
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
__UpperCAmelCase : Tuple = s_dict[
"encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
__UpperCAmelCase : Optional[Any] = s_dict[
"decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys() ):
if "expert" in key:
__UpperCAmelCase : Any = s_dict[key].shape[0]
__UpperCAmelCase : str = s_dict[key]
for idx in range(_UpperCAmelCase ):
__UpperCAmelCase : Optional[Any] = expert_weihts[idx]
print(F"{key} -> {key.replace('expert/', 'nested fstring' )}" )
s_dict.pop(_UpperCAmelCase )
return s_dict
lowerCAmelCase__ : Optional[Any] = {
"NUM_ENCODER_LAYERS": "num_layers",
"NUM_DECODER_LAYERS": "num_decoder_layers",
"NUM_HEADS": "num_heads",
"HEAD_DIM": "d_kv",
"EMBED_DIM": "d_model",
"MLP_DIM": "d_ff",
"NUM_SELECTED_EXPERTS": "num_selected_experts",
"NUM_ENCODER_SPARSE_LAYERS": "num_sparse_encoder_layers",
"NUM_DECODER_SPARSE_LAYERS": "num_sparse_decoder_layers",
"dense.MlpBlock.activations": "feed_forward_proj",
}
def __UpperCamelCase ( _UpperCAmelCase, _UpperCAmelCase ):
# Convert a google style config to the hugging face fromat
import regex as re
with open(_UpperCAmelCase, "r" ) as f:
__UpperCAmelCase : List[Any] = f.read()
__UpperCAmelCase : Union[str, Any] = re.findall(R"(.*) = ([0-9.]*)", _UpperCAmelCase )
__UpperCAmelCase : Dict = {}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
__UpperCAmelCase : Tuple = float(_UpperCAmelCase ) if "." in value else int(_UpperCAmelCase )
__UpperCAmelCase : str = re.findall(R"(.*activations) = \(\'(.*)\',\)", _UpperCAmelCase )[0]
__UpperCAmelCase : int = str(activation[1] )
__UpperCAmelCase : int = num_experts
__UpperCAmelCase : List[str] = SwitchTransformersConfig(**_UpperCAmelCase )
return config
def __UpperCamelCase ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase=None, _UpperCAmelCase="./", _UpperCAmelCase=8 ):
# Initialise PyTorch model
print(F"Loading flax weights from : {flax_checkpoint_path}" )
__UpperCAmelCase : Dict = checkpoints.load_tax_checkpoint(_UpperCAmelCase )
if gin_file is not None:
__UpperCAmelCase : int = convert_gin_to_config(_UpperCAmelCase, _UpperCAmelCase )
else:
__UpperCAmelCase : int = SwitchTransformersConfig.from_pretrained(_UpperCAmelCase )
__UpperCAmelCase : Any = SwitchTransformersForConditionalGeneration(_UpperCAmelCase )
__UpperCAmelCase : str = flax_params["target"]
__UpperCAmelCase : Any = flatten_dict(_UpperCAmelCase, sep="/" )
__UpperCAmelCase : Optional[Any] = rename_keys(_UpperCAmelCase )
__UpperCAmelCase : Any = unflatten_dict(_UpperCAmelCase, sep="/" )
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(_UpperCAmelCase, _UpperCAmelCase )
print(F"Save PyTorch model to {pytorch_dump_path}" )
pt_model.save_pretrained(_UpperCAmelCase )
if __name__ == "__main__":
lowerCAmelCase__ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--switch_t5x_checkpoint_path",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the"
" model architecture. If not provided, a `gin_file` has to be provided."
),
)
parser.add_argument(
"--gin_file",
default=None,
type=str,
required=False,
help="Path to the gin config file. If not provided, a `config_file` has to be passed ",
)
parser.add_argument(
"--config_name", default=None, type=str, required=False, help="Config name of SwitchTransformers model."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output pytorch model."
)
parser.add_argument("--num_experts", default=8, type=int, required=False, help="Number of experts")
lowerCAmelCase__ : int = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 37
| 1
|
import json
import os
import tempfile
import datasets
from utils import generate_example_dataset, get_duration
UpperCAmelCase_ : Tuple = 5_0000
UpperCAmelCase_ : str = 5000
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = os.path.split(__file__)
UpperCAmelCase_ : Any = os.path.join(RESULTS_BASEPATH, 'results', RESULTS_FILENAME.replace('.py', '.json'))
@get_duration
def SCREAMING_SNAKE_CASE_ ( __A : datasets.Dataset , __A : Any ) -> Optional[Any]:
"""simple docstring"""
for i in range(__A ):
a_ : List[Any] = dataset[i]
@get_duration
def SCREAMING_SNAKE_CASE_ ( __A : datasets.Dataset , __A : Tuple , __A : List[Any] ) -> str:
"""simple docstring"""
for i in range(0 , len(__A ) , __A ):
a_ : Tuple = dataset[i : i + batch_size]
@get_duration
def SCREAMING_SNAKE_CASE_ ( __A : datasets.Dataset , __A : int , __A : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
with dataset.formatted_as(type=__A ):
for i in range(__A ):
a_ : int = dataset[i]
@get_duration
def SCREAMING_SNAKE_CASE_ ( __A : datasets.Dataset , __A : Tuple , __A : Any , __A : Optional[Any] ) -> int:
"""simple docstring"""
with dataset.formatted_as(type=__A ):
for i in range(0 , __A , __A ):
a_ : str = dataset[i : i + batch_size]
def SCREAMING_SNAKE_CASE_ ( ) -> List[str]:
"""simple docstring"""
a_ : Any = {'num examples': SPEED_TEST_N_EXAMPLES}
a_ : Optional[int] = [
(read, {'length': SMALL_TEST}),
(read, {'length': SPEED_TEST_N_EXAMPLES}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 10}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 1_00}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 10_00}),
(read_formatted, {'type': 'numpy', 'length': SMALL_TEST}),
(read_formatted, {'type': 'pandas', 'length': SMALL_TEST}),
(read_formatted, {'type': 'torch', 'length': SMALL_TEST}),
(read_formatted, {'type': 'tensorflow', 'length': SMALL_TEST}),
(read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 10}),
(read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 10_00}),
]
a_ : Dict = [
(read, {'length': SMALL_TEST}),
(read, {'length': SPEED_TEST_N_EXAMPLES}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 10}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 1_00}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 10_00}),
(read_formatted, {'type': 'numpy', 'length': SMALL_TEST}),
(read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 10}),
(read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 10_00}),
]
with tempfile.TemporaryDirectory() as tmp_dir:
print('generating dataset' )
a_ : Optional[int] = datasets.Features(
{'list': datasets.Sequence(datasets.Value('float32' ) ), 'numbers': datasets.Value('float32' )} )
a_ : str = generate_example_dataset(
os.path.join(__A , 'dataset.arrow' ) , __A , num_examples=__A , seq_shapes={'list': (1_00,)} , )
print('first set of iterations' )
for func, kwargs in functions:
print(func.__name__ , str(__A ) )
a_ : Any = func(__A , **__A )
print('shuffling dataset' )
a_ : int = dataset.shuffle()
print('Second set of iterations (after shuffling' )
for func, kwargs in functions_shuffled:
print('shuffled ' , func.__name__ , str(__A ) )
a_ : Any = func(
__A , **__A )
with open(__A , 'wb' ) as f:
f.write(json.dumps(__A ).encode('utf-8' ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_iterating()
| 32
|
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
lowerCamelCase_ = ['''small''', '''medium''', '''large''']
lowerCamelCase_ = '''lm_head.decoder.weight'''
lowerCamelCase_ = '''lm_head.weight'''
def __magic_name__ ( __a : str , __a : str ):
'''simple docstring'''
UpperCamelCase__ = torch.load(__a )
UpperCamelCase__ = d.pop(__a )
os.makedirs(__a , exist_ok=__a )
torch.save(__a , os.path.join(__a , __a ) )
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser()
parser.add_argument('''--dialogpt_path''', default='''.''', type=str)
lowerCamelCase_ = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
lowerCamelCase_ = os.path.join(args.dialogpt_path, f'{MODEL}_ft.pkl')
lowerCamelCase_ = f'./DialoGPT-{MODEL}'
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 244
| 0
|
from typing import Dict, Optional
import numpy as np
import datasets
a__ = '''
IoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union
between the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation,
the mean IoU of the image is calculated by taking the IoU of each class and averaging them.
'''
a__ = '''
Args:
predictions (`List[ndarray]`):
List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.
references (`List[ndarray]`):
List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.
num_labels (`int`):
Number of classes (categories).
ignore_index (`int`):
Index that will be ignored during evaluation.
nan_to_num (`int`, *optional*):
If specified, NaN values will be replaced by the number defined by the user.
label_map (`dict`, *optional*):
If specified, dictionary mapping old label indices to new label indices.
reduce_labels (`bool`, *optional*, defaults to `False`):
Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,
and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.
Returns:
`Dict[str, float | ndarray]` comprising various elements:
- *mean_iou* (`float`):
Mean Intersection-over-Union (IoU averaged over all categories).
- *mean_accuracy* (`float`):
Mean accuracy (averaged over all categories).
- *overall_accuracy* (`float`):
Overall accuracy on all images.
- *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):
Per category accuracy.
- *per_category_iou* (`ndarray` of shape `(num_labels,)`):
Per category IoU.
Examples:
>>> import numpy as np
>>> mean_iou = datasets.load_metric("mean_iou")
>>> # suppose one has 3 different segmentation maps predicted
>>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])
>>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])
>>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])
>>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])
>>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])
>>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])
>>> predicted = [predicted_1, predicted_2, predicted_3]
>>> ground_truth = [actual_1, actual_2, actual_3]
>>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)
>>> print(results) # doctest: +NORMALIZE_WHITESPACE
{\'mean_iou\': 0.47750000000000004, \'mean_accuracy\': 0.5916666666666666, \'overall_accuracy\': 0.5263157894736842, \'per_category_iou\': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), \'per_category_accuracy\': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}
'''
a__ = '''\
@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,
author = {{MMSegmentation Contributors}},
license = {Apache-2.0},
month = {7},
title = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},
url = {https://github.com/open-mmlab/mmsegmentation},
year = {2020}
}'''
def __UpperCAmelCase ( __a : List[Any] ,__a : List[Any] ,__a : Tuple ,__a : bool ,__a : Optional[Dict[int, int]] = None ,__a : bool = False ,) -> Any:
"""simple docstring"""
if label_map is not None:
for old_id, new_id in label_map.items():
_a : Dict = new_id
# turn into Numpy arrays
_a : Union[str, Any] = np.array(__a )
_a : Optional[Any] = np.array(__a )
if reduce_labels:
_a : List[str] = 255
_a : Optional[int] = label - 1
_a : int = 255
_a : str = label != ignore_index
_a : List[Any] = np.not_equal(__a ,__a )
_a : str = pred_label[mask]
_a : List[Any] = np.array(__a )[mask]
_a : Union[str, Any] = pred_label[pred_label == label]
_a : List[str] = np.histogram(__a ,bins=__a ,range=(0, num_labels - 1) )[0]
_a : Union[str, Any] = np.histogram(__a ,bins=__a ,range=(0, num_labels - 1) )[0]
_a : Optional[Any] = np.histogram(__a ,bins=__a ,range=(0, num_labels - 1) )[0]
_a : Dict = area_pred_label + area_label - area_intersect
return area_intersect, area_union, area_pred_label, area_label
def __UpperCAmelCase ( __a : Tuple ,__a : Union[str, Any] ,__a : Any ,__a : bool ,__a : Optional[Dict[int, int]] = None ,__a : bool = False ,) -> Tuple:
"""simple docstring"""
_a : List[str] = np.zeros((num_labels,) ,dtype=np.floataa )
_a : str = np.zeros((num_labels,) ,dtype=np.floataa )
_a : str = np.zeros((num_labels,) ,dtype=np.floataa )
_a : Union[str, Any] = np.zeros((num_labels,) ,dtype=np.floataa )
for result, gt_seg_map in zip(__a ,__a ):
_a , _a , _a , _a : List[str] = intersect_and_union(
__a ,__a ,__a ,__a ,__a ,__a )
total_area_intersect += area_intersect
total_area_union += area_union
total_area_pred_label += area_pred_label
total_area_label += area_label
return total_area_intersect, total_area_union, total_area_pred_label, total_area_label
def __UpperCAmelCase ( __a : Union[str, Any] ,__a : Tuple ,__a : List[Any] ,__a : bool ,__a : Optional[int] = None ,__a : Optional[Dict[int, int]] = None ,__a : bool = False ,) -> Optional[Any]:
"""simple docstring"""
_a , _a , _a , _a : Union[str, Any] = total_intersect_and_union(
__a ,__a ,__a ,__a ,__a ,__a )
# compute metrics
_a : Union[str, Any] = {}
_a : Tuple = total_area_intersect.sum() / total_area_label.sum()
_a : int = total_area_intersect / total_area_union
_a : Optional[int] = total_area_intersect / total_area_label
_a : Tuple = np.nanmean(__a )
_a : str = np.nanmean(__a )
_a : Any = all_acc
_a : Dict = iou
_a : str = acc
if nan_to_num is not None:
_a : List[Any] = {metric: np.nan_to_num(__a ,nan=__a ) for metric, metric_value in metrics.items()}
return metrics
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
"""simple docstring"""
def __lowercase ( self ) -> str:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
# 1st Seq - height dim, 2nd - width dim
{
'''predictions''': datasets.Sequence(datasets.Sequence(datasets.Value('''uint16''' ) ) ),
'''references''': datasets.Sequence(datasets.Sequence(datasets.Value('''uint16''' ) ) ),
} ) , reference_urls=[
'''https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py'''
] , )
def __lowercase ( self , _a , _a , _a , _a , _a = None , _a = None , _a = False , ) -> Any:
_a : List[Any] = mean_iou(
results=_a , gt_seg_maps=_a , num_labels=_a , ignore_index=_a , nan_to_num=_a , label_map=_a , reduce_labels=_a , )
return iou_result
| 15
|
from __future__ import annotations
def __UpperCAmelCase ( __a : list ) -> float:
"""simple docstring"""
if not nums:
raise ValueError('''List is empty''' )
return sum(__a ) / len(__a )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 15
| 1
|
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
a =["""text""", """image""", """audio"""]
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> List[Any]:
__lowerCamelCase : List[Any] = []
for input_type in input_types:
if input_type == "text":
inputs.append('Text input' )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir('fixtures/tests_samples/COCO' ) ) / '000000039769.png' ).resize((5_1_2, 5_1_2) ) )
elif input_type == "audio":
inputs.append(torch.ones(3_0_0_0 ) )
elif isinstance(lowerCamelCase__ , lowerCamelCase__ ):
inputs.append(create_inputs(lowerCamelCase__ ) )
else:
raise ValueError(F"Invalid type requested: {input_type}" )
return inputs
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> List[Any]:
__lowerCamelCase : List[Any] = []
for output in outputs:
if isinstance(lowerCamelCase__ , (str, AgentText) ):
output_types.append('text' )
elif isinstance(lowerCamelCase__ , (Image.Image, AgentImage) ):
output_types.append('image' )
elif isinstance(lowerCamelCase__ , (torch.Tensor, AgentAudio) ):
output_types.append('audio' )
else:
raise ValueError(F"Invalid output: {output}" )
return output_types
@is_tool_test
class A_ :
def lowerCAmelCase ( self : Union[str, Any]):
self.assertTrue(hasattr(self.tool ,'inputs'))
self.assertTrue(hasattr(self.tool ,'outputs'))
__lowerCamelCase : Dict = self.tool.inputs
for _input in inputs:
if isinstance(_input ,SCREAMING_SNAKE_CASE__):
for __input in _input:
self.assertTrue(__input in authorized_types)
else:
self.assertTrue(_input in authorized_types)
__lowerCamelCase : Dict = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types)
def lowerCAmelCase ( self : List[str]):
__lowerCamelCase : Optional[int] = create_inputs(self.tool.inputs)
__lowerCamelCase : Tuple = self.tool(*SCREAMING_SNAKE_CASE__)
# There is a single output
if len(self.tool.outputs) == 1:
__lowerCamelCase : Optional[int] = [outputs]
self.assertListEqual(output_types(SCREAMING_SNAKE_CASE__) ,self.tool.outputs)
def lowerCAmelCase ( self : Union[str, Any]):
self.assertTrue(hasattr(self.tool ,'description'))
self.assertTrue(hasattr(self.tool ,'default_checkpoint'))
self.assertTrue(self.tool.description.startswith('This is a tool that'))
def lowerCAmelCase ( self : Dict):
__lowerCamelCase : Union[str, Any] = create_inputs(self.tool.inputs)
__lowerCamelCase : List[str] = self.tool(*SCREAMING_SNAKE_CASE__)
if not isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__):
__lowerCamelCase : List[str] = [outputs]
self.assertEqual(len(SCREAMING_SNAKE_CASE__) ,len(self.tool.outputs))
for output, output_type in zip(SCREAMING_SNAKE_CASE__ ,self.tool.outputs):
__lowerCamelCase : List[str] = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__))
def lowerCAmelCase ( self : Any):
__lowerCamelCase : Optional[Any] = create_inputs(self.tool.inputs)
__lowerCamelCase : Optional[Any] = []
for _input, input_type in zip(SCREAMING_SNAKE_CASE__ ,self.tool.inputs):
if isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input) for _input_type in input_type])
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input))
# Should not raise an error
__lowerCamelCase : Tuple = self.tool(*SCREAMING_SNAKE_CASE__)
if not isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__):
__lowerCamelCase : int = [outputs]
self.assertEqual(len(SCREAMING_SNAKE_CASE__) ,len(self.tool.outputs))
| 73
|
import unittest
import numpy as np
from transformers import DistilBertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.distilbert.modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
)
class A_ ( unittest.TestCase ):
def __init__( self : Tuple ,SCREAMING_SNAKE_CASE__ : List[str] ,SCREAMING_SNAKE_CASE__ : Any=1_3 ,SCREAMING_SNAKE_CASE__ : int=7 ,SCREAMING_SNAKE_CASE__ : str=True ,SCREAMING_SNAKE_CASE__ : Dict=True ,SCREAMING_SNAKE_CASE__ : str=True ,SCREAMING_SNAKE_CASE__ : Dict=True ,SCREAMING_SNAKE_CASE__ : List[Any]=9_9 ,SCREAMING_SNAKE_CASE__ : List[Any]=3_2 ,SCREAMING_SNAKE_CASE__ : int=5 ,SCREAMING_SNAKE_CASE__ : List[Any]=4 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=3_7 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]="gelu" ,SCREAMING_SNAKE_CASE__ : int=0.1 ,SCREAMING_SNAKE_CASE__ : Optional[int]=0.1 ,SCREAMING_SNAKE_CASE__ : Optional[int]=5_1_2 ,SCREAMING_SNAKE_CASE__ : Dict=1_6 ,SCREAMING_SNAKE_CASE__ : Dict=2 ,SCREAMING_SNAKE_CASE__ : Optional[int]=0.02 ,SCREAMING_SNAKE_CASE__ : Dict=4 ,):
__lowerCamelCase : int = parent
__lowerCamelCase : Dict = batch_size
__lowerCamelCase : Union[str, Any] = seq_length
__lowerCamelCase : List[Any] = is_training
__lowerCamelCase : Tuple = use_attention_mask
__lowerCamelCase : List[str] = use_token_type_ids
__lowerCamelCase : Any = use_labels
__lowerCamelCase : List[str] = vocab_size
__lowerCamelCase : Any = hidden_size
__lowerCamelCase : Tuple = num_hidden_layers
__lowerCamelCase : Union[str, Any] = num_attention_heads
__lowerCamelCase : Union[str, Any] = intermediate_size
__lowerCamelCase : List[Any] = hidden_act
__lowerCamelCase : int = hidden_dropout_prob
__lowerCamelCase : int = attention_probs_dropout_prob
__lowerCamelCase : Union[str, Any] = max_position_embeddings
__lowerCamelCase : Union[str, Any] = type_vocab_size
__lowerCamelCase : List[str] = type_sequence_label_size
__lowerCamelCase : Tuple = initializer_range
__lowerCamelCase : Optional[int] = num_choices
def lowerCAmelCase ( self : Union[str, Any]):
__lowerCamelCase : Dict = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size)
__lowerCamelCase : Union[str, Any] = None
if self.use_attention_mask:
__lowerCamelCase : Any = random_attention_mask([self.batch_size, self.seq_length])
__lowerCamelCase : str = DistilBertConfig(
vocab_size=self.vocab_size ,dim=self.hidden_size ,n_layers=self.num_hidden_layers ,n_heads=self.num_attention_heads ,hidden_dim=self.intermediate_size ,hidden_act=self.hidden_act ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,tie_weights_=SCREAMING_SNAKE_CASE__ ,)
return config, input_ids, attention_mask
def lowerCAmelCase ( self : List[Any]):
__lowerCamelCase : List[str] = self.prepare_config_and_inputs()
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Dict = config_and_inputs
__lowerCamelCase : Any = {'input_ids': input_ids, 'attention_mask': attention_mask}
return config, inputs_dict
@require_flax
class A_ ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
_UpperCAmelCase : Dict = (
(
FlaxDistilBertModel,
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCAmelCase ( self : Optional[Any]):
__lowerCamelCase : Tuple = FlaxDistilBertModelTester(self)
@slow
def lowerCAmelCase ( self : int):
for model_class_name in self.all_model_classes:
__lowerCamelCase : List[Any] = model_class_name.from_pretrained('distilbert-base-uncased')
__lowerCamelCase : List[str] = model(np.ones((1, 1)))
self.assertIsNotNone(SCREAMING_SNAKE_CASE__)
@require_flax
class A_ ( unittest.TestCase ):
@slow
def lowerCAmelCase ( self : str):
__lowerCamelCase : Union[str, Any] = FlaxDistilBertModel.from_pretrained('distilbert-base-uncased')
__lowerCamelCase : str = np.array([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]])
__lowerCamelCase : List[Any] = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])
__lowerCamelCase : Union[str, Any] = model(SCREAMING_SNAKE_CASE__ ,attention_mask=SCREAMING_SNAKE_CASE__)[0]
__lowerCamelCase : Optional[int] = (1, 1_1, 7_6_8)
self.assertEqual(output.shape ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Union[str, Any] = np.array([[[-0.1639, 0.3299, 0.1648], [-0.1746, 0.3289, 0.1710], [-0.1884, 0.3357, 0.1810]]])
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] ,SCREAMING_SNAKE_CASE__ ,atol=1E-4))
| 73
| 1
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..utils import cached_file
# docstyle-ignore
_snake_case = "\nHuman: <<task>>\n\nAssistant: "
_snake_case = "huggingface-tools/default-prompts"
_snake_case = {"chat": "chat_prompt_template.txt", "run": "run_prompt_template.txt"}
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase="run" ):
'''simple docstring'''
if prompt_or_repo_id is None:
_lowerCAmelCase : str = DEFAULT_PROMPTS_REPO
# prompt is considered a repo ID when it does not contain any kind of space
if re.search("\\s" , _lowerCamelCase ) is not None:
return prompt_or_repo_id
_lowerCAmelCase : Dict = cached_file(
_lowerCamelCase , PROMPT_FILES[mode] , repo_type="dataset" , user_agent={"agent": agent_name} )
with open(_lowerCamelCase , "r" , encoding="utf-8" ) as f:
return f.read()
| 300
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
_snake_case = R"\n [`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and\n can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.\n\n Args:\n title_sep (`str`, *optional*, defaults to `\" / \"`):\n Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].\n doc_sep (`str`, *optional*, defaults to `\" // \"`):\n Separator inserted between the text of the retrieved document and the original input when calling\n [`RagRetriever`].\n n_docs (`int`, *optional*, defaults to 5):\n Number of documents to retrieve.\n max_combined_length (`int`, *optional*, defaults to 300):\n Max length of contextualized input returned by [`~RagRetriever.__call__`].\n retrieval_vector_size (`int`, *optional*, defaults to 768):\n Dimensionality of the document embeddings indexed by [`RagRetriever`].\n retrieval_batch_size (`int`, *optional*, defaults to 8):\n Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated\n [`RagRetriever`].\n dataset (`str`, *optional*, defaults to `\"wiki_dpr\"`):\n A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids\n using `datasets.list_datasets()`).\n dataset_split (`str`, *optional*, defaults to `\"train\"`)\n Which split of the `dataset` to load.\n index_name (`str`, *optional*, defaults to `\"compressed\"`)\n The index name of the index associated with the `dataset`. One can choose between `\"legacy\"`, `\"exact\"` and\n `\"compressed\"`.\n index_path (`str`, *optional*)\n The path to the serialized faiss index on disk.\n passages_path (`str`, *optional*):\n A path to text passages compatible with the faiss index. Required if using\n [`~models.rag.retrieval_rag.LegacyIndex`]\n use_dummy_dataset (`bool`, *optional*, defaults to `False`)\n Whether to load a \"dummy\" variant of the dataset specified by `dataset`.\n label_smoothing (`float`, *optional*, defaults to 0.0):\n Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing\n in the loss calculation. If set to 0, no label smoothing is performed.\n do_marginalize (`bool`, *optional*, defaults to `False`):\n If `True`, the logits are marginalized over all documents by making use of\n `torch.nn.functional.log_softmax`.\n reduce_loss (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.\n do_deduplication (`bool`, *optional*, defaults to `True`):\n Whether or not to deduplicate the generations from different context documents for a given input. Has to be\n set to `False` if used while training with distributed backend.\n exclude_bos_score (`bool`, *optional*, defaults to `False`):\n Whether or not to disregard the BOS token when computing the loss.\n output_retrieved(`bool`, *optional*, defaults to `False`):\n If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and\n `context_attention_mask` are returned. See returned tensors for more detail.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models).\n forced_eos_token_id (`int`, *optional*):\n The id of the token to force as the last generated token when `max_length` is reached. Usually set to\n `eos_token_id`.\n"
@add_start_docstrings(a)
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'rag'
lowerCamelCase__ = True
def __init__( self, __a=None, __a=True, __a=None, __a=None, __a=None, __a=None, __a=None, __a=" / ", __a=" // ", __a=5, __a=300, __a=768, __a=8, __a="wiki_dpr", __a="train", __a="compressed", __a=None, __a=None, __a=False, __a=False, __a=0.0, __a=True, __a=False, __a=False, __a=False, __a=True, __a=None, **__a, ):
'''simple docstring'''
super().__init__(
bos_token_id=__a, pad_token_id=__a, eos_token_id=__a, decoder_start_token_id=__a, forced_eos_token_id=__a, is_encoder_decoder=__a, prefix=__a, vocab_size=__a, **__a, )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
_lowerCAmelCase : List[str] = kwargs.pop("question_encoder")
_lowerCAmelCase : Union[str, Any] = question_encoder_config.pop("model_type")
_lowerCAmelCase : int = kwargs.pop("generator")
_lowerCAmelCase : Optional[Any] = decoder_config.pop("model_type")
from ..auto.configuration_auto import AutoConfig
_lowerCAmelCase : int = AutoConfig.for_model(__a, **__a)
_lowerCAmelCase : Tuple = AutoConfig.for_model(__a, **__a)
_lowerCAmelCase : List[Any] = reduce_loss
_lowerCAmelCase : Any = label_smoothing
_lowerCAmelCase : Optional[int] = exclude_bos_score
_lowerCAmelCase : Optional[Any] = do_marginalize
_lowerCAmelCase : Any = title_sep
_lowerCAmelCase : Any = doc_sep
_lowerCAmelCase : Optional[int] = n_docs
_lowerCAmelCase : Optional[Any] = max_combined_length
_lowerCAmelCase : List[str] = dataset
_lowerCAmelCase : List[str] = dataset_split
_lowerCAmelCase : Optional[Any] = index_name
_lowerCAmelCase : Dict = retrieval_vector_size
_lowerCAmelCase : Union[str, Any] = retrieval_batch_size
_lowerCAmelCase : Optional[int] = passages_path
_lowerCAmelCase : Dict = index_path
_lowerCAmelCase : Tuple = use_dummy_dataset
_lowerCAmelCase : Union[str, Any] = output_retrieved
_lowerCAmelCase : str = do_deduplication
_lowerCAmelCase : Union[str, Any] = use_cache
if self.forced_eos_token_id is None:
_lowerCAmelCase : Tuple = getattr(self.generator, "forced_eos_token_id", __a)
@classmethod
def snake_case__ ( cls, __a, __a, **__a):
'''simple docstring'''
return cls(question_encoder=question_encoder_config.to_dict(), generator=generator_config.to_dict(), **__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = copy.deepcopy(self.__dict__)
_lowerCAmelCase : Union[str, Any] = self.question_encoder.to_dict()
_lowerCAmelCase : Any = self.generator.to_dict()
_lowerCAmelCase : Optional[Any] = self.__class__.model_type
return output
| 300
| 1
|
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
snake_case_ = logging.get_logger(__name__)
snake_case_ = {
"""openai/whisper-base""": """https://huggingface.co/openai/whisper-base/resolve/main/config.json""",
}
# fmt: off
snake_case_ = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 357, 366, 438, 532, 685,
705, 796, 930, 1058, 1220, 1267, 1279, 1303, 1343, 1377,
1391, 1635, 1782, 1875, 2162, 2361, 2488, 3467, 4008, 4211,
4600, 4808, 5299, 5855, 6329, 7203, 9609, 9959, 1_0563, 1_0786,
1_1420, 1_1709, 1_1907, 1_3163, 1_3697, 1_3700, 1_4808, 1_5306, 1_6410, 1_6791,
1_7992, 1_9203, 1_9510, 2_0724, 2_2305, 2_2935, 2_7007, 3_0109, 3_0420, 3_3409,
3_4949, 4_0283, 4_0493, 4_0549, 4_7282, 4_9146, 5_0257, 5_0359, 5_0360, 5_0361
]
snake_case_ = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 359, 503, 522, 542, 873,
893, 902, 918, 922, 931, 1350, 1853, 1982, 2460, 2627,
3246, 3253, 3268, 3536, 3846, 3961, 4183, 4667, 6585, 6647,
7273, 9061, 9383, 1_0428, 1_0929, 1_1938, 1_2033, 1_2331, 1_2562, 1_3793,
1_4157, 1_4635, 1_5265, 1_5618, 1_6553, 1_6604, 1_8362, 1_8956, 2_0075, 2_1675,
2_2520, 2_6130, 2_6161, 2_6435, 2_8279, 2_9464, 3_1650, 3_2302, 3_2470, 3_6865,
4_2863, 4_7425, 4_9870, 5_0254, 5_0258, 5_0360, 5_0361, 5_0362
]
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = """whisper"""
__UpperCamelCase = ["""past_key_values"""]
__UpperCamelCase = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self :Optional[Any] , lowercase_ :Dict=5_18_65 , lowercase_ :Tuple=80 , lowercase_ :str=6 , lowercase_ :Optional[Any]=4 , lowercase_ :Union[str, Any]=6 , lowercase_ :Any=4 , lowercase_ :Optional[int]=15_36 , lowercase_ :Optional[Any]=15_36 , lowercase_ :Tuple=0.0 , lowercase_ :List[Any]=0.0 , lowercase_ :List[Any]=5_02_57 , lowercase_ :str=True , lowercase_ :Optional[int]=True , lowercase_ :Tuple="gelu" , lowercase_ :Union[str, Any]=2_56 , lowercase_ :Optional[int]=0.0 , lowercase_ :List[Any]=0.0 , lowercase_ :List[str]=0.0 , lowercase_ :Dict=0.02 , lowercase_ :Any=False , lowercase_ :str=15_00 , lowercase_ :Union[str, Any]=4_48 , lowercase_ :List[str]=5_02_56 , lowercase_ :Any=5_02_56 , lowercase_ :Optional[int]=5_02_56 , lowercase_ :List[Any]=None , lowercase_ :Dict=[2_20, 5_02_56] , lowercase_ :Optional[int]=False , lowercase_ :Optional[int]=2_56 , lowercase_ :Tuple=False , lowercase_ :Any=0.05 , lowercase_ :Tuple=10 , lowercase_ :Dict=2 , lowercase_ :Union[str, Any]=0.0 , lowercase_ :List[str]=10 , lowercase_ :str=0 , lowercase_ :Tuple=7 , **lowercase_ :Optional[int] , ) -> Optional[Any]:
UpperCAmelCase = vocab_size
UpperCAmelCase = num_mel_bins
UpperCAmelCase = d_model
UpperCAmelCase = encoder_layers
UpperCAmelCase = encoder_attention_heads
UpperCAmelCase = decoder_layers
UpperCAmelCase = decoder_attention_heads
UpperCAmelCase = decoder_ffn_dim
UpperCAmelCase = encoder_ffn_dim
UpperCAmelCase = dropout
UpperCAmelCase = attention_dropout
UpperCAmelCase = activation_dropout
UpperCAmelCase = activation_function
UpperCAmelCase = init_std
UpperCAmelCase = encoder_layerdrop
UpperCAmelCase = decoder_layerdrop
UpperCAmelCase = use_cache
UpperCAmelCase = encoder_layers
UpperCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True
UpperCAmelCase = max_source_positions
UpperCAmelCase = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
UpperCAmelCase = classifier_proj_size
UpperCAmelCase = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCAmelCase = apply_spec_augment
UpperCAmelCase = mask_time_prob
UpperCAmelCase = mask_time_length
UpperCAmelCase = mask_time_min_masks
UpperCAmelCase = mask_feature_prob
UpperCAmelCase = mask_feature_length
UpperCAmelCase = mask_feature_min_masks
UpperCAmelCase = median_filter_width
super().__init__(
pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , is_encoder_decoder=lowercase_ , decoder_start_token_id=lowercase_ , suppress_tokens=lowercase_ , begin_suppress_tokens=lowercase_ , **lowercase_ , )
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
@property
def UpperCAmelCase__ ( self :Any ) -> Mapping[str, Mapping[int, str]]:
UpperCAmelCase = OrderedDict(
[
('input_features', {0: 'batch', 1: 'feature_size', 2: 'encoder_sequence'}),
] )
if self.use_past:
UpperCAmelCase = {0: 'batch'}
else:
UpperCAmelCase = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(lowercase_ , direction='inputs' )
return common_inputs
def UpperCAmelCase__ ( self :int , lowercase_ :Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , lowercase_ :int = -1 , lowercase_ :int = -1 , lowercase_ :bool = False , lowercase_ :Optional["TensorType"] = None , lowercase_ :int = 2_20_50 , lowercase_ :float = 5.0 , lowercase_ :int = 2_20 , ) -> Mapping[str, Any]:
UpperCAmelCase = OrderedDict()
UpperCAmelCase = OnnxConfig.generate_dummy_inputs(
self , preprocessor=preprocessor.feature_extractor , batch_size=lowercase_ , framework=lowercase_ , sampling_rate=lowercase_ , time_duration=lowercase_ , frequency=lowercase_ , )
UpperCAmelCase = encoder_inputs['input_features'].shape[2]
UpperCAmelCase = encoder_sequence_length // 2 if self.use_past else seq_length
UpperCAmelCase = super().generate_dummy_inputs(
preprocessor.tokenizer , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
UpperCAmelCase = encoder_inputs.pop('input_features' )
UpperCAmelCase = decoder_inputs.pop('decoder_input_ids' )
if "past_key_values" in decoder_inputs:
UpperCAmelCase = decoder_inputs.pop('past_key_values' )
return dummy_inputs
@property
def UpperCAmelCase__ ( self :Dict ) -> float:
return 1E-3
| 78
|
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
snake_case_ = logging.get_logger(__name__) # pylint: disable=invalid-name
snake_case_ = """
Examples:
```py
>>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline
>>> from diffusers.utils import load_image
>>> import torch
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(
... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16
... )
>>> pipe_prior.to(\"cuda\")
>>> prompt = \"A red cartoon frog, 4k\"
>>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)
>>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(
... \"kandinsky-community/kandinsky-2-2-decoder\", torch_dtype=torch.float16
... )
>>> pipe.to(\"cuda\")
>>> init_image = load_image(
... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"
... \"/kandinsky/frog.png\"
... )
>>> image = pipe(
... image=init_image,
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... height=768,
... width=768,
... num_inference_steps=100,
... strength=0.2,
... ).images
>>> image[0].save(\"red_frog.png\")
```
"""
def _lowerCAmelCase ( lowercase_ , lowercase_ , lowercase_=8 ):
UpperCAmelCase = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
UpperCAmelCase = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def _lowerCAmelCase ( lowercase_ , lowercase_=512 , lowercase_=512 ):
UpperCAmelCase = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 )
UpperCAmelCase = np.array(pil_image.convert('RGB' ) )
UpperCAmelCase = arr.astype(np.floataa ) / 1_2_7.5 - 1
UpperCAmelCase = np.transpose(lowercase_ , [2, 0, 1] )
UpperCAmelCase = torch.from_numpy(lowercase_ ).unsqueeze(0 )
return image
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def __init__( self :Dict , lowercase_ :UNetaDConditionModel , lowercase_ :DDPMScheduler , lowercase_ :VQModel , ) -> List[str]:
super().__init__()
self.register_modules(
unet=lowercase_ , scheduler=lowercase_ , movq=lowercase_ , )
UpperCAmelCase = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def UpperCAmelCase__ ( self :Optional[int] , lowercase_ :Optional[Any] , lowercase_ :Tuple , lowercase_ :Any ) -> Optional[int]:
# get the original timestep using init_timestep
UpperCAmelCase = min(int(num_inference_steps * strength ) , lowercase_ )
UpperCAmelCase = max(num_inference_steps - init_timestep , 0 )
UpperCAmelCase = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def UpperCAmelCase__ ( self :List[Any] , lowercase_ :Dict , lowercase_ :str , lowercase_ :Optional[Any] , lowercase_ :Union[str, Any] , lowercase_ :List[Any] , lowercase_ :Optional[Any] , lowercase_ :Any=None ) -> Any:
if not isinstance(lowercase_ , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
f"""`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(lowercase_ )}""" )
UpperCAmelCase = image.to(device=lowercase_ , dtype=lowercase_ )
UpperCAmelCase = batch_size * num_images_per_prompt
if image.shape[1] == 4:
UpperCAmelCase = image
else:
if isinstance(lowercase_ , lowercase_ ) and len(lowercase_ ) != batch_size:
raise ValueError(
f"""You have passed a list of generators of length {len(lowercase_ )}, but requested an effective batch"""
f""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
elif isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(lowercase_ )
]
UpperCAmelCase = torch.cat(lowercase_ , dim=0 )
else:
UpperCAmelCase = self.movq.encode(lowercase_ ).latent_dist.sample(lowercase_ )
UpperCAmelCase = self.movq.config.scaling_factor * init_latents
UpperCAmelCase = torch.cat([init_latents] , dim=0 )
UpperCAmelCase = init_latents.shape
UpperCAmelCase = randn_tensor(lowercase_ , generator=lowercase_ , device=lowercase_ , dtype=lowercase_ )
# get latents
UpperCAmelCase = self.scheduler.add_noise(lowercase_ , lowercase_ , lowercase_ )
UpperCAmelCase = init_latents
return latents
def UpperCAmelCase__ ( self :int , lowercase_ :int=0 ) -> List[str]:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
UpperCAmelCase = torch.device(f"""cuda:{gpu_id}""" )
UpperCAmelCase = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(lowercase_ , lowercase_ )
def UpperCAmelCase__ ( self :Union[str, Any] , lowercase_ :str=0 ) -> Dict:
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
UpperCAmelCase = torch.device(f"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=lowercase_ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
UpperCAmelCase = None
for cpu_offloaded_model in [self.unet, self.movq]:
UpperCAmelCase , UpperCAmelCase = cpu_offload_with_hook(lowercase_ , lowercase_ , prev_module_hook=lowercase_ )
# We'll offload the last model manually.
UpperCAmelCase = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def UpperCAmelCase__ ( self :List[Any] ) -> Dict:
if not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(lowercase_ , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(lowercase_ )
def __call__( self :str , lowercase_ :Union[torch.FloatTensor, List[torch.FloatTensor]] , lowercase_ :Union[torch.FloatTensor, PIL.Image.Image, List[torch.FloatTensor], List[PIL.Image.Image]] , lowercase_ :Union[torch.FloatTensor, List[torch.FloatTensor]] , lowercase_ :int = 5_12 , lowercase_ :int = 5_12 , lowercase_ :int = 1_00 , lowercase_ :float = 4.0 , lowercase_ :float = 0.3 , lowercase_ :int = 1 , lowercase_ :Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowercase_ :Optional[str] = "pil" , lowercase_ :bool = True , ) -> List[str]:
UpperCAmelCase = self._execution_device
UpperCAmelCase = guidance_scale > 1.0
if isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase = torch.cat(lowercase_ , dim=0 )
UpperCAmelCase = image_embeds.shape[0]
if isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase = torch.cat(lowercase_ , dim=0 )
if do_classifier_free_guidance:
UpperCAmelCase = image_embeds.repeat_interleave(lowercase_ , dim=0 )
UpperCAmelCase = negative_image_embeds.repeat_interleave(lowercase_ , dim=0 )
UpperCAmelCase = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=lowercase_ )
if not isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase = [image]
if not all(isinstance(lowercase_ , (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
f"""Input is in incorrect format: {[type(lowercase_ ) for i in image]}. Currently, we only support PIL image and pytorch tensor""" )
UpperCAmelCase = torch.cat([prepare_image(lowercase_ , lowercase_ , lowercase_ ) for i in image] , dim=0 )
UpperCAmelCase = image.to(dtype=image_embeds.dtype , device=lowercase_ )
UpperCAmelCase = self.movq.encode(lowercase_ )['latents']
UpperCAmelCase = latents.repeat_interleave(lowercase_ , dim=0 )
self.scheduler.set_timesteps(lowercase_ , device=lowercase_ )
UpperCAmelCase , UpperCAmelCase = self.get_timesteps(lowercase_ , lowercase_ , lowercase_ )
UpperCAmelCase = timesteps[:1].repeat(batch_size * num_images_per_prompt )
UpperCAmelCase , UpperCAmelCase = downscale_height_and_width(lowercase_ , lowercase_ , self.movq_scale_factor )
UpperCAmelCase = self.prepare_latents(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , image_embeds.dtype , lowercase_ , lowercase_ )
for i, t in enumerate(self.progress_bar(lowercase_ ) ):
# expand the latents if we are doing classifier free guidance
UpperCAmelCase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCAmelCase = {'image_embeds': image_embeds}
UpperCAmelCase = self.unet(
sample=lowercase_ , timestep=lowercase_ , encoder_hidden_states=lowercase_ , added_cond_kwargs=lowercase_ , return_dict=lowercase_ , )[0]
if do_classifier_free_guidance:
UpperCAmelCase , UpperCAmelCase = noise_pred.split(latents.shape[1] , dim=1 )
UpperCAmelCase , UpperCAmelCase = noise_pred.chunk(2 )
UpperCAmelCase , UpperCAmelCase = variance_pred.chunk(2 )
UpperCAmelCase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
UpperCAmelCase = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
UpperCAmelCase , UpperCAmelCase = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase = self.scheduler.step(
lowercase_ , lowercase_ , lowercase_ , generator=lowercase_ , )[0]
# post-processing
UpperCAmelCase = self.movq.decode(lowercase_ , force_not_quantize=lowercase_ )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
UpperCAmelCase = image * 0.5 + 0.5
UpperCAmelCase = image.clamp(0 , 1 )
UpperCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
UpperCAmelCase = self.numpy_to_pil(lowercase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowercase_ )
| 78
| 1
|
"""simple docstring"""
import string
import numpy
def __SCREAMING_SNAKE_CASE ( A_ , A_ ):
return b if a == 0 else greatest_common_divisor(b % a , A_ )
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
lowercase__ = string.ascii_uppercase + string.digits
# This cipher takes alphanumerics into account
# i.e. a total of 36 characters
# take x and return x % len(key_string)
lowercase__ = numpy.vectorize(lambda a_ : x % 36 )
lowercase__ = numpy.vectorize(a_ )
def __init__( self : Any ,lowercase_ : numpy.ndarray ):
lowerCAmelCase__ : Any = self.modulus(lowercase_ ) # mod36 calc's on the encrypt key
self.check_determinant() # validate the determinant of the encryption key
lowerCAmelCase__ : int = encrypt_key.shape[0]
def __lowerCAmelCase ( self : Any ,lowercase_ : str ):
return self.key_string.index(lowercase_ )
def __lowerCAmelCase ( self : Any ,lowercase_ : int ):
return self.key_string[round(lowercase_ )]
def __lowerCAmelCase ( self : str ):
lowerCAmelCase__ : List[str] = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
lowerCAmelCase__ : str = det % len(self.key_string )
lowerCAmelCase__ : Union[str, Any] = len(self.key_string )
if greatest_common_divisor(lowercase_ ,len(self.key_string ) ) != 1:
lowerCAmelCase__ : str = (
F'determinant modular {req_l} of encryption key({det}) '
F'is not co prime w.r.t {req_l}.\nTry another key.'
)
raise ValueError(lowercase_ )
def __lowerCAmelCase ( self : Dict ,lowercase_ : str ):
lowerCAmelCase__ : Optional[Any] = [char for char in text.upper() if char in self.key_string]
lowerCAmelCase__ : Optional[Any] = chars[-1]
while len(lowercase_ ) % self.break_key != 0:
chars.append(lowercase_ )
return "".join(lowercase_ )
def __lowerCAmelCase ( self : Tuple ,lowercase_ : str ):
lowerCAmelCase__ : List[Any] = self.process_text(text.upper() )
lowerCAmelCase__ : List[Any] = ''''''
for i in range(0 ,len(lowercase_ ) - self.break_key + 1 ,self.break_key ):
lowerCAmelCase__ : int = text[i : i + self.break_key]
lowerCAmelCase__ : List[str] = [self.replace_letters(lowercase_ ) for char in batch]
lowerCAmelCase__ : Dict = numpy.array([vec] ).T
lowerCAmelCase__ : List[str] = self.modulus(self.encrypt_key.dot(lowercase_ ) ).T.tolist()[
0
]
lowerCAmelCase__ : int = ''''''.join(
self.replace_digits(lowercase_ ) for num in batch_encrypted )
encrypted += encrypted_batch
return encrypted
def __lowerCAmelCase ( self : str ):
lowerCAmelCase__ : Optional[Any] = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
lowerCAmelCase__ : Any = det % len(self.key_string )
lowerCAmelCase__ : Dict = None
for i in range(len(self.key_string ) ):
if (det * i) % len(self.key_string ) == 1:
lowerCAmelCase__ : Union[str, Any] = i
break
lowerCAmelCase__ : List[Any] = (
det_inv
* numpy.linalg.det(self.encrypt_key )
* numpy.linalg.inv(self.encrypt_key )
)
return self.to_int(self.modulus(lowercase_ ) )
def __lowerCAmelCase ( self : List[Any] ,lowercase_ : str ):
lowerCAmelCase__ : int = self.make_decrypt_key()
lowerCAmelCase__ : Any = self.process_text(text.upper() )
lowerCAmelCase__ : str = ''''''
for i in range(0 ,len(lowercase_ ) - self.break_key + 1 ,self.break_key ):
lowerCAmelCase__ : Optional[int] = text[i : i + self.break_key]
lowerCAmelCase__ : Union[str, Any] = [self.replace_letters(lowercase_ ) for char in batch]
lowerCAmelCase__ : Optional[Any] = numpy.array([vec] ).T
lowerCAmelCase__ : Dict = self.modulus(decrypt_key.dot(lowercase_ ) ).T.tolist()[0]
lowerCAmelCase__ : Any = ''''''.join(
self.replace_digits(lowercase_ ) for num in batch_decrypted )
decrypted += decrypted_batch
return decrypted
def __SCREAMING_SNAKE_CASE ( ):
lowerCAmelCase__ : Any = int(input('''Enter the order of the encryption key: ''' ) )
lowerCAmelCase__ : Any = []
print('''Enter each row of the encryption key with space separated integers''' )
for _ in range(A_ ):
lowerCAmelCase__ : int = [int(A_ ) for x in input().split()]
hill_matrix.append(A_ )
lowerCAmelCase__ : List[Any] = HillCipher(numpy.array(A_ ) )
print('''Would you like to encrypt or decrypt some text? (1 or 2)''' )
lowerCAmelCase__ : Optional[Any] = input('''\n1. Encrypt\n2. Decrypt\n''' )
if option == "1":
lowerCAmelCase__ : str = input('''What text would you like to encrypt?: ''' )
print('''Your encrypted text is:''' )
print(hc.encrypt(A_ ) )
elif option == "2":
lowerCAmelCase__ : Dict = input('''What text would you like to decrypt?: ''' )
print('''Your decrypted text is:''' )
print(hc.decrypt(A_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 74
|
"""simple docstring"""
import inspect
import re
from hashlib import shaaaa
from typing import Dict, List
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def __SCREAMING_SNAKE_CASE ( A_ ):
lowerCAmelCase__ : Any = []
for line in lines:
lowerCAmelCase__ : int = re.sub(r'''#.*''' , '''''' , A_ ) # remove comments
if line:
filtered_lines.append(A_ )
lowerCAmelCase__ : Optional[int] = '''\n'''.join(A_ )
# Make a hash from all this code
lowerCAmelCase__ : int = full_str.encode('''utf-8''' )
return shaaaa(A_ ).hexdigest()
# get importable module names and hash for caching
__UpperCamelCase : Any = {
'''csv''': (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
'''json''': (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
'''pandas''': (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
'''parquet''': (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
'''arrow''': (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
'''text''': (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
'''imagefolder''': (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
'''audiofolder''': (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
# Used to infer the module to use based on the data files extensions
__UpperCamelCase : Optional[Any] = {
'''.csv''': ('''csv''', {}),
'''.tsv''': ('''csv''', {'''sep''': '''\t'''}),
'''.json''': ('''json''', {}),
'''.jsonl''': ('''json''', {}),
'''.parquet''': ('''parquet''', {}),
'''.arrow''': ('''arrow''', {}),
'''.txt''': ('''text''', {}),
}
_EXTENSION_TO_MODULE.update({ext: ('''imagefolder''', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('''imagefolder''', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ('''audiofolder''', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('''audiofolder''', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
__UpperCamelCase : Union[str, Any] = {'''imagefolder''', '''audiofolder'''}
# Used to filter data files based on extensions given a module name
__UpperCamelCase : Dict[str, List[str]] = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
_MODULE_TO_EXTENSIONS["imagefolder"].append('''.zip''')
_MODULE_TO_EXTENSIONS["audiofolder"].append('''.zip''')
| 74
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.