code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
'''simple docstring'''
import argparse
import shutil
from pathlib import Path
from tqdm import tqdm
from transformers import AutoTokenizer
def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=1024 )-> Union[str, Any]:
UpperCamelCase = [], []
UpperCamelCase = list(zip(lowercase_ , lowercase_ ) )
UpperCamelCase = sorted_examples[0]
def is_too_big(__UpperCamelCase ):
return tok(lowercase_ , return_tensors="""pt""" ).input_ids.shape[1] > max_tokens
for src, tgt in tqdm(sorted_examples[1:] ):
UpperCamelCase = new_src + """ """ + src
UpperCamelCase = new_tgt + """ """ + tgt
if is_too_big(lowercase_ ) or is_too_big(lowercase_ ): # cant fit, finalize example
finished_src.append(lowercase_ )
finished_tgt.append(lowercase_ )
UpperCamelCase = src, tgt
else: # can fit, keep adding
UpperCamelCase = cand_src, cand_tgt
# cleanup
if new_src:
assert new_tgt
finished_src.append(lowercase_ )
finished_tgt.append(lowercase_ )
return finished_src, finished_tgt
def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> Optional[int]:
UpperCamelCase = Path(lowercase_ )
save_path.mkdir(exist_ok=lowercase_ )
for split in ["train"]:
UpperCamelCase = data_dir / F"{split}.source", data_dir / F"{split}.target"
UpperCamelCase = [x.rstrip() for x in Path(lowercase_ ).open().readlines()]
UpperCamelCase = [x.rstrip() for x in Path(lowercase_ ).open().readlines()]
UpperCamelCase = pack_examples(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
print(F"packed {split} split from {len(lowercase_ )} examples -> {len(lowercase_ )}." )
Path(save_path / F"{split}.source" ).open("""w""" ).write("""\n""".join(lowercase_ ) )
Path(save_path / F"{split}.target" ).open("""w""" ).write("""\n""".join(lowercase_ ) )
for split in ["val", "test"]:
UpperCamelCase = data_dir / F"{split}.source", data_dir / F"{split}.target"
shutil.copyfile(lowercase_ , save_path / F"{split}.source" )
shutil.copyfile(lowercase_ , save_path / F"{split}.target" )
def lowercase__ ( )-> List[str]:
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument("""--tok_name""" , type=lowercase_ , help="""like facebook/bart-large-cnn,t5-base, etc.""" )
parser.add_argument("""--max_seq_len""" , type=lowercase_ , default=128 )
parser.add_argument("""--data_dir""" , type=lowercase_ )
parser.add_argument("""--save_path""" , type=lowercase_ )
UpperCamelCase = parser.parse_args()
UpperCamelCase = AutoTokenizer.from_pretrained(args.tok_name )
return pack_data_dir(lowercase_ , Path(args.data_dir ) , args.max_seq_len , args.save_path )
if __name__ == "__main__":
packer_cli()
| 321
|
import string
def UpperCamelCase (lowercase_: str ) -> None:
for key in range(len(string.ascii_uppercase ) ):
A__ : Dict = """"""
for symbol in message:
if symbol in string.ascii_uppercase:
A__ : Dict = string.ascii_uppercase.find(lowercase_ )
A__ : Optional[int] = num - key
if num < 0:
A__ : Optional[int] = num + len(string.ascii_uppercase )
A__ : Any = translated + string.ascii_uppercase[num]
else:
A__ : Optional[Any] = translated + symbol
print(f"""Decryption using Key #{key}: {translated}""" )
def UpperCamelCase () -> None:
A__ : Optional[Any] = input("""Encrypted message: """ )
A__ : Optional[Any] = message.upper()
decrypt(lowercase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 192
| 0
|
def A_ ( ) -> int:
return [
a * b * (1000 - a - b)
for a in range(1 , 999 )
for b in range(A__ , 999 )
if (a * a + b * b == (1000 - a - b) ** 2)
][0]
if __name__ == "__main__":
print(F"""{solution() = }""")
| 365
|
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mvp import MvpTokenizer
lowercase : List[Any] = logging.get_logger(__name__)
lowercase : Any = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
# See all MVP models at https://huggingface.co/models?filter=mvp
lowercase : Optional[Any] = {
"""vocab_file""": {
"""RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json""",
},
"""added_tokens.json""": {
"""RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json""",
},
"""merges_file""": {
"""RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json""",
},
}
lowercase : List[str] = {
"""RUCAIBox/mvp""": 1_0_2_4,
}
class A__ ( __UpperCAmelCase ):
"""simple docstring"""
__A : Dict = VOCAB_FILES_NAMES
__A : str = PRETRAINED_VOCAB_FILES_MAP
__A : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__A : Any = ['''input_ids''', '''attention_mask''']
__A : Tuple = MvpTokenizer
def __init__( self , lowercase=None , lowercase=None , lowercase=None , lowercase="replace" , lowercase="<s>" , lowercase="</s>" , lowercase="</s>" , lowercase="<s>" , lowercase="<unk>" , lowercase="<pad>" , lowercase="<mask>" , lowercase=False , lowercase=True , **lowercase , ) -> str:
'''simple docstring'''
super().__init__(
lowercase , lowercase , tokenizer_file=lowercase , errors=lowercase , bos_token=lowercase , eos_token=lowercase , sep_token=lowercase , cls_token=lowercase , unk_token=lowercase , pad_token=lowercase , mask_token=lowercase , add_prefix_space=lowercase , trim_offsets=lowercase , **lowercase , )
a__ : Optional[int] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
if pre_tok_state.get('add_prefix_space' , lowercase) != add_prefix_space:
a__ : Dict = getattr(lowercase , pre_tok_state.pop('type'))
a__ : str = add_prefix_space
a__ : Union[str, Any] = pre_tok_class(**lowercase)
a__ : List[Any] = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
a__ : Optional[int] = 'post_processor'
a__ : Optional[int] = getattr(self.backend_tokenizer , lowercase , lowercase)
if tokenizer_component_instance:
a__ : str = json.loads(tokenizer_component_instance.__getstate__())
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
a__ : Any = tuple(state['sep'])
if "cls" in state:
a__ : str = tuple(state['cls'])
a__ : List[str] = False
if state.get('add_prefix_space' , lowercase) != add_prefix_space:
a__ : List[str] = add_prefix_space
a__ : List[str] = True
if state.get('trim_offsets' , lowercase) != trim_offsets:
a__ : Optional[int] = trim_offsets
a__ : List[str] = True
if changes_to_apply:
a__ : Optional[int] = getattr(lowercase , state.pop('type'))
a__ : Tuple = component_class(**lowercase)
setattr(self.backend_tokenizer , lowercase , lowercase)
@property
def __lowercase ( self) -> str:
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.')
return None
return str(self._mask_token)
@mask_token.setter
def __lowercase ( self , lowercase) -> Any:
'''simple docstring'''
a__ : Tuple = AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase) if isinstance(lowercase , lowercase) else value
a__ : str = value
def __lowercase ( self , *lowercase , **lowercase) -> BatchEncoding:
'''simple docstring'''
a__ : Optional[Any] = kwargs.get('is_split_into_words' , lowercase)
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
'to use it with pretokenized inputs.')
return super()._batch_encode_plus(*lowercase , **lowercase)
def __lowercase ( self , *lowercase , **lowercase) -> BatchEncoding:
'''simple docstring'''
a__ : List[str] = kwargs.get('is_split_into_words' , lowercase)
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
'to use it with pretokenized inputs.')
return super()._encode_plus(*lowercase , **lowercase)
def __lowercase ( self , lowercase , lowercase = None) -> Tuple[str]:
'''simple docstring'''
a__ : List[str] = self._tokenizer.model.save(lowercase , name=lowercase)
return tuple(lowercase)
def __lowercase ( self , lowercase , lowercase=None) -> Union[str, Any]:
'''simple docstring'''
a__ : List[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __lowercase ( self , lowercase , lowercase = None) -> List[int]:
'''simple docstring'''
a__ : int = [self.sep_token_id]
a__ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
| 225
| 0
|
'''simple docstring'''
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
lowerCamelCase_ = get_logger()
lowerCamelCase_ = None
class _UpperCAmelCase ( TensorFormatter[Mapping, '''jax.Array''', Mapping] ):
"""simple docstring"""
def __init__( self : int , __UpperCAmelCase : int=None , __UpperCAmelCase : List[str]=None , **__UpperCAmelCase : Optional[int] ):
'''simple docstring'''
super().__init__(features=__UpperCAmelCase )
import jax
from jaxlib.xla_client import Device
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise ValueError(
f'''Expected {device} to be a `str` not {type(__UpperCAmelCase )}, as `jaxlib.xla_extension.Device` '''
"is not serializable neither with `pickle` nor with `dill`. Instead you can surround "
"the device with `str()` to get its string identifier that will be internally mapped "
"to the actual `jaxlib.xla_extension.Device`." )
_A = device if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
_A = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
f'''Device with string identifier {self.device} not listed among the available '''
f'''devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default '''
f'''device: {str(jax.devices()[0] )}.''' )
_A = str(jax.devices()[0] )
_A = jnp_array_kwargs
@staticmethod
def lowerCAmelCase ( ):
'''simple docstring'''
import jax
return {str(__UpperCAmelCase ): device for device in jax.devices()}
def lowerCAmelCase ( self : Any , __UpperCAmelCase : Union[str, Any] ):
'''simple docstring'''
import jax
import jax.numpy as jnp
if isinstance(__UpperCAmelCase , __UpperCAmelCase ) and column:
if all(
isinstance(__UpperCAmelCase , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(__UpperCAmelCase , axis=0 )
return column
def lowerCAmelCase ( self : int , __UpperCAmelCase : List[str] ):
'''simple docstring'''
import jax
import jax.numpy as jnp
if isinstance(__UpperCAmelCase , (str, bytes, type(__UpperCAmelCase )) ):
return value
elif isinstance(__UpperCAmelCase , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
_A = {}
if isinstance(__UpperCAmelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
_A = {"dtype": jnp.intaa}
else:
_A = {"dtype": jnp.intaa}
elif isinstance(__UpperCAmelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
_A = {"dtype": jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(__UpperCAmelCase , PIL.Image.Image ):
_A = np.asarray(__UpperCAmelCase )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
_A = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(__UpperCAmelCase , **{**default_dtype, **self.jnp_array_kwargs} )
def lowerCAmelCase ( self : Optional[Any] , __UpperCAmelCase : int ):
'''simple docstring'''
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(__UpperCAmelCase , torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(__UpperCAmelCase , "__array__" ) and not isinstance(__UpperCAmelCase , jax.Array ):
_A = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(__UpperCAmelCase , np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(__UpperCAmelCase ) for substruct in data_struct] )
elif isinstance(__UpperCAmelCase , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(__UpperCAmelCase ) for substruct in data_struct] )
return self._tensorize(__UpperCAmelCase )
def lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : dict ):
'''simple docstring'''
return map_nested(self._recursive_tensorize , __UpperCAmelCase , map_list=__UpperCAmelCase )
def lowerCAmelCase ( self : str , __UpperCAmelCase : pa.Table ):
'''simple docstring'''
_A = self.numpy_arrow_extractor().extract_row(__UpperCAmelCase )
_A = self.python_features_decoder.decode_row(__UpperCAmelCase )
return self.recursive_tensorize(__UpperCAmelCase )
def lowerCAmelCase ( self : List[str] , __UpperCAmelCase : pa.Table ):
'''simple docstring'''
_A = self.numpy_arrow_extractor().extract_column(__UpperCAmelCase )
_A = self.python_features_decoder.decode_column(__UpperCAmelCase , pa_table.column_names[0] )
_A = self.recursive_tensorize(__UpperCAmelCase )
_A = self._consolidate(__UpperCAmelCase )
return column
def lowerCAmelCase ( self : Union[str, Any] , __UpperCAmelCase : pa.Table ):
'''simple docstring'''
_A = self.numpy_arrow_extractor().extract_batch(__UpperCAmelCase )
_A = self.python_features_decoder.decode_batch(__UpperCAmelCase )
_A = self.recursive_tensorize(__UpperCAmelCase )
for column_name in batch:
_A = self._consolidate(batch[column_name] )
return batch
| 79
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import (
BitConfig,
ViTHybridConfig,
ViTHybridForImageClassification,
ViTHybridImageProcessor,
ViTHybridModel,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase_ = logging.get_logger(__name__)
def __lowercase ( __lowercase , __lowercase=False ) -> int:
'''simple docstring'''
_A = []
# fmt: off
# stem:
rename_keys.append(("cls_token", "vit.embeddings.cls_token") )
rename_keys.append(("pos_embed", "vit.embeddings.position_embeddings") )
rename_keys.append(("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight") )
rename_keys.append(("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias") )
# backbone
rename_keys.append(("patch_embed.backbone.stem.conv.weight", "vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight") )
rename_keys.append(("patch_embed.backbone.stem.norm.weight", "vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight") )
rename_keys.append(("patch_embed.backbone.stem.norm.bias", "vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias") )
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias''') )
# transformer encoder
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''blocks.{i}.norm1.weight''', F'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''blocks.{i}.norm1.bias''', F'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((F'''blocks.{i}.attn.proj.weight''', F'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.attn.proj.bias''', F'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''blocks.{i}.norm2.weight''', F'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''blocks.{i}.norm2.bias''', F'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.weight''', F'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.bias''', F'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.weight''', F'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.bias''', F'''vit.encoder.layer.{i}.output.dense.bias''') )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
("pre_logits.fc.weight", "pooler.dense.weight"),
("pre_logits.fc.bias", "pooler.dense.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
_A = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
# fmt: on
return rename_keys
def __lowercase ( __lowercase , __lowercase , __lowercase=False ) -> Tuple:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
_A = ""
else:
_A = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_A = state_dict.pop(F'''blocks.{i}.attn.qkv.weight''' )
_A = state_dict.pop(F'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
_A = in_proj_weight[
: config.hidden_size, :
]
_A = in_proj_bias[: config.hidden_size]
_A = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_A = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_A = in_proj_weight[
-config.hidden_size :, :
]
_A = in_proj_bias[-config.hidden_size :]
def __lowercase ( __lowercase ) -> List[str]:
'''simple docstring'''
_A = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(__lowercase , __lowercase )
def __lowercase ( __lowercase , __lowercase , __lowercase ) -> Tuple:
'''simple docstring'''
_A = dct.pop(__lowercase )
_A = val
def __lowercase ( ) -> List[str]:
'''simple docstring'''
_A = "http://images.cocodataset.org/val2017/000000039769.jpg"
_A = Image.open(requests.get(__lowercase , stream=__lowercase ).raw )
return im
@torch.no_grad()
def __lowercase ( __lowercase , __lowercase , __lowercase=False ) -> Tuple:
'''simple docstring'''
_A = BitConfig(
global_padding="same" , layer_type="bottleneck" , depths=(3, 4, 9) , out_features=["stage3"] , embedding_dynamic_padding=__lowercase , )
_A = ViTHybridConfig(backbone_config=__lowercase , image_size=384 , num_labels=1000 )
_A = False
# load original model from timm
_A = timm.create_model(__lowercase , pretrained=__lowercase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
_A = timm_model.state_dict()
if base_model:
remove_classification_head_(__lowercase )
_A = create_rename_keys(__lowercase , __lowercase )
for src, dest in rename_keys:
rename_key(__lowercase , __lowercase , __lowercase )
read_in_q_k_v(__lowercase , __lowercase , __lowercase )
_A = "huggingface/label-files"
_A = "imagenet-1k-id2label.json"
_A = json.load(open(hf_hub_download(__lowercase , __lowercase , repo_type="dataset" ) , "r" ) )
_A = {int(__lowercase ): v for k, v in idalabel.items()}
_A = idalabel
_A = {v: k for k, v in idalabel.items()}
# load HuggingFace model
if vit_name[-5:] == "in21k":
_A = ViTHybridModel(__lowercase ).eval()
else:
_A = ViTHybridForImageClassification(__lowercase ).eval()
model.load_state_dict(__lowercase )
# create image processor
_A = create_transform(**resolve_data_config({} , model=__lowercase ) )
_A = transform.transforms
_A = {
"bilinear": PILImageResampling.BILINEAR,
"bicubic": PILImageResampling.BICUBIC,
"nearest": PILImageResampling.NEAREST,
}
_A = ViTHybridImageProcessor(
do_resize=__lowercase , size={"shortest_edge": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=__lowercase , crop_size={"height": timm_transforms[1].size[0], "width": timm_transforms[1].size[1]} , do_normalize=__lowercase , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
_A = prepare_img()
_A = transform(__lowercase ).unsqueeze(0 )
_A = processor(__lowercase , return_tensors="pt" ).pixel_values
# verify pixel values
assert torch.allclose(__lowercase , __lowercase )
# verify logits
with torch.no_grad():
_A = model(__lowercase )
_A = outputs.logits
print("Predicted class:" , logits.argmax(-1 ).item() )
if base_model:
_A = timm_model.forward_features(__lowercase )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(__lowercase , outputs.pooler_output , atol=1e-3 )
else:
_A = timm_model(__lowercase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__lowercase , outputs.logits , atol=1e-3 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
Path(__lowercase ).mkdir(exist_ok=__lowercase )
print(F'''Saving model {vit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__lowercase )
print(F'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(__lowercase )
if push_to_hub:
print(F'''Pushing model and processor to the hub {vit_name}''' )
model.push_to_hub(F'''ybelkada/{vit_name}''' )
processor.push_to_hub(F'''ybelkada/{vit_name}''' )
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--vit_name''',
default='''vit_base_r50_s16_384''',
type=str,
help='''Name of the hybrid ViT timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether to upload the model to the HuggingFace hub.'''
)
lowerCamelCase_ = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 79
| 1
|
"""simple docstring"""
from __future__ import annotations
from fractions import Fraction
def snake_case (A_ :int , A_ :int ):
'''simple docstring'''
return (
num != den and num % 1_0 == den // 1_0 and (num // 1_0) / (den % 1_0) == num / den
)
def snake_case (A_ :int ):
'''simple docstring'''
a : Dict = []
a : str = 1_1
a : Tuple = int('1' + '0' * digit_len )
for num in range(A_ , A_ ):
while den <= 9_9:
if (num != den) and (num % 1_0 == den // 1_0) and (den % 1_0 != 0):
if is_digit_cancelling(A_ , A_ ):
solutions.append(f'''{num}/{den}''' )
den += 1
num += 1
a : List[str] = 1_0
return solutions
def snake_case (A_ :int = 2 ):
'''simple docstring'''
a : Optional[int] = 1.0
for fraction in fraction_list(A_ ):
a : List[Any] = Fraction(A_ )
result *= frac.denominator / frac.numerator
return int(A_ )
if __name__ == "__main__":
print(solution())
| 186
|
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def snake_case (A_ :Dict ):
'''simple docstring'''
a : str = []
embed.append(
(
f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight''',
f'''stage{idx}.patch_embed.proj.weight''',
) )
embed.append(
(
f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias''',
f'''stage{idx}.patch_embed.proj.bias''',
) )
embed.append(
(
f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight''',
f'''stage{idx}.patch_embed.norm.weight''',
) )
embed.append(
(
f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias''',
f'''stage{idx}.patch_embed.norm.bias''',
) )
return embed
def snake_case (A_ :Any , A_ :List[Any] ):
'''simple docstring'''
a : Union[str, Any] = []
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight''',
f'''stage{idx}.blocks.{cnt}.attn.proj_q.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias''',
f'''stage{idx}.blocks.{cnt}.attn.proj_q.bias''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight''',
f'''stage{idx}.blocks.{cnt}.attn.proj_k.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias''',
f'''stage{idx}.blocks.{cnt}.attn.proj_k.bias''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight''',
f'''stage{idx}.blocks.{cnt}.attn.proj_v.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias''',
f'''stage{idx}.blocks.{cnt}.attn.proj_v.bias''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight''',
f'''stage{idx}.blocks.{cnt}.attn.proj.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias''',
f'''stage{idx}.blocks.{cnt}.attn.proj.bias''',
) )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight''', f'''stage{idx}.blocks.{cnt}.mlp.fc1.weight''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias''', f'''stage{idx}.blocks.{cnt}.mlp.fc1.bias''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight''', f'''stage{idx}.blocks.{cnt}.mlp.fc2.weight''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias''', f'''stage{idx}.blocks.{cnt}.mlp.fc2.bias''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight''', f'''stage{idx}.blocks.{cnt}.norm1.weight''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias''', f'''stage{idx}.blocks.{cnt}.norm1.bias''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight''', f'''stage{idx}.blocks.{cnt}.norm2.weight''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias''', f'''stage{idx}.blocks.{cnt}.norm2.bias''') )
return attention_weights
def snake_case (A_ :Dict ):
'''simple docstring'''
a : int = []
token.append((f'''cvt.encoder.stages.{idx}.cls_token''', 'stage2.cls_token') )
return token
def snake_case ():
'''simple docstring'''
a : int = []
head.append(('layernorm.weight', 'norm.weight') )
head.append(('layernorm.bias', 'norm.bias') )
head.append(('classifier.weight', 'head.weight') )
head.append(('classifier.bias', 'head.bias') )
return head
def snake_case (A_ :int , A_ :Optional[int] , A_ :Dict , A_ :Dict ):
'''simple docstring'''
a : Optional[Any] = 'imagenet-1k-id2label.json'
a : Dict = 1_0_0_0
a : Tuple = 'huggingface/label-files'
a : List[Any] = num_labels
a : List[str] = json.load(open(cached_download(hf_hub_url(A_ , A_ , repo_type='dataset' ) ) , 'r' ) )
a : int = {int(A_ ): v for k, v in idalabel.items()}
a : str = idalabel
a : Optional[int] = {v: k for k, v in idalabel.items()}
a : Tuple = CvtConfig(num_labels=A_ , idalabel=A_ , labelaid=A_ )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit('/' , 1 )[-1][4:6] == "13":
a : int = [1, 2, 1_0]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit('/' , 1 )[-1][4:6] == "21":
a : List[Any] = [1, 4, 1_6]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
a : Optional[int] = [2, 2, 2_0]
a : Any = [3, 1_2, 1_6]
a : str = [1_9_2, 7_6_8, 1_0_2_4]
a : List[Any] = CvtForImageClassification(A_ )
a : Optional[int] = AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' )
a : Union[str, Any] = image_size
a : Optional[Any] = torch.load(A_ , map_location=torch.device('cpu' ) )
a : int = OrderedDict()
a : Any = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
a : Dict = list_of_state_dict + cls_token(A_ )
a : Any = list_of_state_dict + embeddings(A_ )
for cnt in range(config.depth[idx] ):
a : Dict = list_of_state_dict + attention(A_ , A_ )
a : Any = list_of_state_dict + final()
for gg in list_of_state_dict:
print(A_ )
for i in range(len(A_ ) ):
a : List[Any] = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(A_ )
model.save_pretrained(A_ )
image_processor.save_pretrained(A_ )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
_UpperCamelCase : Any = argparse.ArgumentParser()
parser.add_argument(
'--cvt_model',
default='cvt-w24',
type=str,
help='Name of the cvt model you\'d like to convert.',
)
parser.add_argument(
'--image_size',
default=384,
type=int,
help='Input Image Size',
)
parser.add_argument(
'--cvt_file_name',
default=r'cvtmodels\CvT-w24-384x384-IN-22k.pth',
type=str,
help='Input Image Size',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
_UpperCamelCase : int = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 186
| 1
|
'''simple docstring'''
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
_SCREAMING_SNAKE_CASE : List[Any] = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Tuple = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
_SCREAMING_SNAKE_CASE : Dict = {
"vocab_file": {
"allegro/herbert-base-cased": "https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json"
},
"merges_file": {
"allegro/herbert-base-cased": "https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt"
},
}
_SCREAMING_SNAKE_CASE : Optional[int] = {"allegro/herbert-base-cased": 514}
_SCREAMING_SNAKE_CASE : Any = {}
class _snake_case ( lowercase_ ):
lowerCAmelCase_ : Optional[int] = VOCAB_FILES_NAMES
lowerCAmelCase_ : int = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ : Optional[Any] = PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase_ : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ : str = HerbertTokenizer
def __init__( self , a__=None , a__=None , a__=None , a__="<s>" , a__="<unk>" , a__="<pad>" , a__="<mask>" , a__="</s>" , **a__ , ) -> List[str]:
'''simple docstring'''
super().__init__(
a__ , a__ , tokenizer_file=a__ , cls_token=a__ , unk_token=a__ , pad_token=a__ , mask_token=a__ , sep_token=a__ , **a__ , )
def lowerCAmelCase__ ( self , a__ , a__ = None ) -> List[int]:
'''simple docstring'''
snake_case_ = [self.cls_token_id]
snake_case_ = [self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowerCAmelCase__ ( self , a__ , a__ = None , a__ = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a__ , token_ids_a=a__ , already_has_special_tokens=a__ )
if token_ids_a is None:
return [1] + ([0] * len(a__ )) + [1]
return [1] + ([0] * len(a__ )) + [1] + ([0] * len(a__ )) + [1]
def lowerCAmelCase__ ( self , a__ , a__ = None ) -> List[int]:
'''simple docstring'''
snake_case_ = [self.sep_token_id]
snake_case_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase__ ( self , a__ , a__ = None ) -> Tuple[str]:
'''simple docstring'''
snake_case_ = self._tokenizer.model.save(a__ , name=a__ )
return tuple(a__ )
| 85
|
'''simple docstring'''
from statistics import mean, stdev
def UpperCamelCase_( snake_case : list , snake_case : int = 3 ):
'''simple docstring'''
snake_case_ = min(snake_case )
snake_case_ = max(snake_case )
# normalize data
return [round((x - x_min) / (x_max - x_min) , snake_case ) for x in data]
def UpperCamelCase_( snake_case : list , snake_case : int = 3 ):
'''simple docstring'''
snake_case_ = mean(snake_case )
snake_case_ = stdev(snake_case )
# standardize data
return [round((x - mu) / (sigma) , snake_case ) for x in data]
| 85
| 1
|
import dataclasses
import re
import string
from typing import Any, Dict, Iterator, List, Mapping, Optional, Sequence, Tuple
import numpy as np
from . import residue_constants
a :List[Any] = Mapping[str, np.ndarray]
a :str = Mapping[str, Any] # Is a nested dict.
a :Optional[Any] = 0.01
@dataclasses.dataclass(frozen=UpperCamelCase_)
class __a :
'''simple docstring'''
_SCREAMING_SNAKE_CASE :np.ndarray # [num_res, num_atom_type, 3]
# Amino-acid type for each residue represented as an integer between 0 and
# 20, where 20 is 'X'.
_SCREAMING_SNAKE_CASE :np.ndarray # [num_res]
# Binary float mask to indicate presence of a particular atom. 1.0 if an atom
# is present and 0.0 if not. This should be used for loss masking.
_SCREAMING_SNAKE_CASE :np.ndarray # [num_res, num_atom_type]
# Residue index as used in PDB. It is not necessarily continuous or 0-indexed.
_SCREAMING_SNAKE_CASE :np.ndarray # [num_res]
# B-factors, or temperature factors, of each residue (in sq. angstroms units),
# representing the displacement of the residue from its ground truth mean
# value.
_SCREAMING_SNAKE_CASE :np.ndarray # [num_res, num_atom_type]
# Chain indices for multi-chain predictions
_SCREAMING_SNAKE_CASE :Optional[np.ndarray] = None
# Optional remark about the protein. Included as a comment in output PDB
# files
_SCREAMING_SNAKE_CASE :Optional[str] = None
# Templates used to generate this protein (prediction-only)
_SCREAMING_SNAKE_CASE :Optional[Sequence[str]] = None
# Chain corresponding to each parent
_SCREAMING_SNAKE_CASE :Optional[Sequence[int]] = None
def _lowercase ( __lowerCAmelCase ) -> Protein:
SCREAMING_SNAKE_CASE__ : str = r"""(\[[A-Z]+\]\n)"""
SCREAMING_SNAKE_CASE__ : List[str] = [tag.strip() for tag in re.split(__lowerCAmelCase , __lowerCAmelCase ) if len(__lowerCAmelCase ) > 0]
SCREAMING_SNAKE_CASE__ : Iterator[Tuple[str, List[str]]] = zip(tags[0::2] , [l.split("""\n""" ) for l in tags[1::2]] )
SCREAMING_SNAKE_CASE__ : List[str] = ["N", "CA", "C"]
SCREAMING_SNAKE_CASE__ : List[str] = None
SCREAMING_SNAKE_CASE__ : Dict = None
SCREAMING_SNAKE_CASE__ : Union[str, Any] = None
for g in groups:
if "[PRIMARY]" == g[0]:
SCREAMING_SNAKE_CASE__ : str = g[1][0].strip()
for i in range(len(__lowerCAmelCase ) ):
if seq[i] not in residue_constants.restypes:
SCREAMING_SNAKE_CASE__ : Tuple = """X""" # FIXME: strings are immutable
SCREAMING_SNAKE_CASE__ : Any = np.array(
[residue_constants.restype_order.get(__lowerCAmelCase , residue_constants.restype_num ) for res_symbol in seq] )
elif "[TERTIARY]" == g[0]:
SCREAMING_SNAKE_CASE__ : List[List[float]] = []
for axis in range(3 ):
tertiary.append(list(map(__lowerCAmelCase , g[1][axis].split() ) ) )
SCREAMING_SNAKE_CASE__ : Any = np.array(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Dict = np.zeros((len(tertiary[0] ) // 3, residue_constants.atom_type_num, 3) ).astype(np.floataa )
for i, atom in enumerate(__lowerCAmelCase ):
SCREAMING_SNAKE_CASE__ : int = np.transpose(tertiary_np[:, i::3] )
atom_positions *= PICO_TO_ANGSTROM
elif "[MASK]" == g[0]:
SCREAMING_SNAKE_CASE__ : Any = np.array(list(map({"""-""": 0, """+""": 1}.get , g[1][0].strip() ) ) )
SCREAMING_SNAKE_CASE__ : Optional[int] = np.zeros(
(
len(__lowerCAmelCase ),
residue_constants.atom_type_num,
) ).astype(np.floataa )
for i, atom in enumerate(__lowerCAmelCase ):
SCREAMING_SNAKE_CASE__ : int = 1
atom_mask *= mask[..., None]
assert aatype is not None
return Protein(
atom_positions=__lowerCAmelCase , atom_mask=__lowerCAmelCase , aatype=__lowerCAmelCase , residue_index=np.arange(len(__lowerCAmelCase ) ) , b_factors=__lowerCAmelCase , )
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase = 0 ) -> List[str]:
SCREAMING_SNAKE_CASE__ : List[str] = []
SCREAMING_SNAKE_CASE__ : List[str] = prot.remark
if remark is not None:
pdb_headers.append(F'''REMARK {remark}''' )
SCREAMING_SNAKE_CASE__ : int = prot.parents
SCREAMING_SNAKE_CASE__ : Optional[Any] = prot.parents_chain_index
if parents is not None and parents_chain_index is not None:
SCREAMING_SNAKE_CASE__ : List[Any] = [p for i, p in zip(__lowerCAmelCase , __lowerCAmelCase ) if i == chain_id]
if parents is None or len(__lowerCAmelCase ) == 0:
SCREAMING_SNAKE_CASE__ : Optional[int] = ["""N/A"""]
pdb_headers.append(F'''PARENT {' '.join(__lowerCAmelCase )}''' )
return pdb_headers
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> str:
SCREAMING_SNAKE_CASE__ : List[str] = []
SCREAMING_SNAKE_CASE__ : Union[str, Any] = pdb_str.split("""\n""" )
SCREAMING_SNAKE_CASE__ : List[Any] = prot.remark
if remark is not None:
out_pdb_lines.append(F'''REMARK {remark}''' )
SCREAMING_SNAKE_CASE__ : List[List[str]]
if prot.parents is not None and len(prot.parents ) > 0:
SCREAMING_SNAKE_CASE__ : int = []
if prot.parents_chain_index is not None:
SCREAMING_SNAKE_CASE__ : Dict[str, List[str]] = {}
for p, i in zip(prot.parents , prot.parents_chain_index ):
parent_dict.setdefault(str(__lowerCAmelCase ) , [] )
parent_dict[str(__lowerCAmelCase )].append(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] = max([int(__lowerCAmelCase ) for chain_idx in parent_dict] )
for i in range(max_idx + 1 ):
SCREAMING_SNAKE_CASE__ : str = parent_dict.get(str(__lowerCAmelCase ) , ["""N/A"""] )
parents_per_chain.append(__lowerCAmelCase )
else:
parents_per_chain.append(list(prot.parents ) )
else:
SCREAMING_SNAKE_CASE__ : List[Any] = [["""N/A"""]]
def make_parent_line(__lowerCAmelCase ) -> str:
return F'''PARENT {' '.join(__lowerCAmelCase )}'''
out_pdb_lines.append(make_parent_line(parents_per_chain[0] ) )
SCREAMING_SNAKE_CASE__ : Tuple = 0
for i, l in enumerate(__lowerCAmelCase ):
if "PARENT" not in l and "REMARK" not in l:
out_pdb_lines.append(__lowerCAmelCase )
if "TER" in l and "END" not in lines[i + 1]:
chain_counter += 1
if not chain_counter >= len(__lowerCAmelCase ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = parents_per_chain[chain_counter]
else:
SCREAMING_SNAKE_CASE__ : Any = ["""N/A"""]
out_pdb_lines.append(make_parent_line(__lowerCAmelCase ) )
return "\n".join(__lowerCAmelCase )
def _lowercase ( __lowerCAmelCase ) -> str:
SCREAMING_SNAKE_CASE__ : int = residue_constants.restypes + ["""X"""]
def res_atoa(__lowerCAmelCase ) -> str:
return residue_constants.restype_atoa.get(restypes[r] , """UNK""" )
SCREAMING_SNAKE_CASE__ : List[str] = residue_constants.atom_types
SCREAMING_SNAKE_CASE__ : List[str] = []
SCREAMING_SNAKE_CASE__ : List[str] = prot.atom_mask
SCREAMING_SNAKE_CASE__ : Optional[int] = prot.aatype
SCREAMING_SNAKE_CASE__ : Optional[int] = prot.atom_positions
SCREAMING_SNAKE_CASE__ : Optional[int] = prot.residue_index.astype(np.intaa )
SCREAMING_SNAKE_CASE__ : int = prot.b_factors
SCREAMING_SNAKE_CASE__ : Union[str, Any] = prot.chain_index
if np.any(aatype > residue_constants.restype_num ):
raise ValueError("""Invalid aatypes.""" )
SCREAMING_SNAKE_CASE__ : Tuple = get_pdb_headers(__lowerCAmelCase )
if len(__lowerCAmelCase ) > 0:
pdb_lines.extend(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : List[str] = aatype.shape[0]
SCREAMING_SNAKE_CASE__ : Dict = 1
SCREAMING_SNAKE_CASE__ : List[str] = 0
SCREAMING_SNAKE_CASE__ : str = string.ascii_uppercase
SCREAMING_SNAKE_CASE__ : List[str] = None
# Add all atom sites.
for i in range(__lowerCAmelCase ):
SCREAMING_SNAKE_CASE__ : Optional[int] = res_atoa(aatype[i] )
for atom_name, pos, mask, b_factor in zip(__lowerCAmelCase , atom_positions[i] , atom_mask[i] , b_factors[i] ):
if mask < 0.5:
continue
SCREAMING_SNAKE_CASE__ : List[str] = """ATOM"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = atom_name if len(__lowerCAmelCase ) == 4 else F''' {atom_name}'''
SCREAMING_SNAKE_CASE__ : List[Any] = """"""
SCREAMING_SNAKE_CASE__ : int = """"""
SCREAMING_SNAKE_CASE__ : List[Any] = 1.00
SCREAMING_SNAKE_CASE__ : List[Any] = atom_name[0] # Protein supports only C, N, O, S, this works.
SCREAMING_SNAKE_CASE__ : int = """"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = """A"""
if chain_index is not None:
SCREAMING_SNAKE_CASE__ : Optional[Any] = chain_tags[chain_index[i]]
# PDB is a columnar format, every space matters here!
SCREAMING_SNAKE_CASE__ : Tuple = (
F'''{record_type:<6}{atom_index:>5} {name:<4}{alt_loc:>1}'''
F'''{res_name_a:>3} {chain_tag:>1}'''
F'''{residue_index[i]:>4}{insertion_code:>1} '''
F'''{pos[0]:>8.3f}{pos[1]:>8.3f}{pos[2]:>8.3f}'''
F'''{occupancy:>6.2f}{b_factor:>6.2f} '''
F'''{element:>2}{charge:>2}'''
)
pdb_lines.append(__lowerCAmelCase )
atom_index += 1
SCREAMING_SNAKE_CASE__ : Union[str, Any] = i == n - 1
if chain_index is not None:
if i != n - 1 and chain_index[i + 1] != prev_chain_index:
SCREAMING_SNAKE_CASE__ : str = True
SCREAMING_SNAKE_CASE__ : Optional[int] = chain_index[i + 1]
if should_terminate:
# Close the chain.
SCREAMING_SNAKE_CASE__ : Dict = """TER"""
SCREAMING_SNAKE_CASE__ : int = (
F'''{chain_end:<6}{atom_index:>5} {res_atoa(aatype[i] ):>3} {chain_tag:>1}{residue_index[i]:>4}'''
)
pdb_lines.append(__lowerCAmelCase )
atom_index += 1
if i != n - 1:
# "prev" is a misnomer here. This happens at the beginning of
# each new chain.
pdb_lines.extend(get_pdb_headers(__lowerCAmelCase , __lowerCAmelCase ) )
pdb_lines.append("""END""" )
pdb_lines.append("""""" )
return "\n".join(__lowerCAmelCase )
def _lowercase ( __lowerCAmelCase ) -> np.ndarray:
return residue_constants.STANDARD_ATOM_MASK[prot.aatype]
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , ) -> Protein:
return Protein(
aatype=features["""aatype"""] , atom_positions=result["""final_atom_positions"""] , atom_mask=result["""final_atom_mask"""] , residue_index=features["""residue_index"""] + 1 , b_factors=b_factors if b_factors is not None else np.zeros_like(result["""final_atom_mask"""] ) , chain_index=__lowerCAmelCase , remark=__lowerCAmelCase , parents=__lowerCAmelCase , parents_chain_index=__lowerCAmelCase , )
| 351
|
"""simple docstring"""
import re
from typing import Callable, List, Optional, Union
import tensorflow as tf
try:
from tensorflow.keras.optimizers.legacy import Adam
except ImportError:
from tensorflow.keras.optimizers import Adam
class __a (tf.keras.optimizers.schedules.LearningRateSchedule):
'''simple docstring'''
def __init__( self , _a , _a , _a , _a = 1.0 , _a = None , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE__ : List[Any] = initial_learning_rate
SCREAMING_SNAKE_CASE__ : Tuple = warmup_steps
SCREAMING_SNAKE_CASE__ : Optional[Any] = power
SCREAMING_SNAKE_CASE__ : Optional[Any] = decay_schedule_fn
SCREAMING_SNAKE_CASE__ : Any = name
def __call__( self , _a ) -> List[Any]:
"""simple docstring"""
with tf.name_scope(self.name or """WarmUp""" ) as name:
# Implements polynomial warmup. i.e., if global_step < warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
SCREAMING_SNAKE_CASE__ : Optional[Any] = tf.cast(_a , tf.floataa )
SCREAMING_SNAKE_CASE__ : Optional[Any] = tf.cast(self.warmup_steps , tf.floataa )
SCREAMING_SNAKE_CASE__ : str = global_step_float / warmup_steps_float
SCREAMING_SNAKE_CASE__ : Optional[int] = self.initial_learning_rate * tf.math.pow(_a , self.power )
return tf.cond(
global_step_float < warmup_steps_float , lambda: warmup_learning_rate , lambda: self.decay_schedule_fn(step - self.warmup_steps ) , name=_a , )
def _a ( self ) -> List[Any]:
"""simple docstring"""
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_schedule_fn": self.decay_schedule_fn,
"warmup_steps": self.warmup_steps,
"power": self.power,
"name": self.name,
}
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 0.0 , __lowerCAmelCase = 0.9 , __lowerCAmelCase = 0.999 , __lowerCAmelCase = 1E-8 , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = 0.0 , __lowerCAmelCase = 1.0 , __lowerCAmelCase = None , ) -> Dict:
SCREAMING_SNAKE_CASE__ : Optional[int] = tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=__lowerCAmelCase , decay_steps=num_train_steps - num_warmup_steps , end_learning_rate=init_lr * min_lr_ratio , power=__lowerCAmelCase , )
if num_warmup_steps:
SCREAMING_SNAKE_CASE__ : Dict = WarmUp(
initial_learning_rate=__lowerCAmelCase , decay_schedule_fn=__lowerCAmelCase , warmup_steps=__lowerCAmelCase , )
if weight_decay_rate > 0.0:
SCREAMING_SNAKE_CASE__ : int = AdamWeightDecay(
learning_rate=__lowerCAmelCase , weight_decay_rate=__lowerCAmelCase , beta_a=__lowerCAmelCase , beta_a=__lowerCAmelCase , epsilon=__lowerCAmelCase , clipnorm=__lowerCAmelCase , global_clipnorm=__lowerCAmelCase , exclude_from_weight_decay=["""LayerNorm""", """layer_norm""", """bias"""] , include_in_weight_decay=__lowerCAmelCase , )
else:
SCREAMING_SNAKE_CASE__ : int = tf.keras.optimizers.Adam(
learning_rate=__lowerCAmelCase , beta_a=__lowerCAmelCase , beta_a=__lowerCAmelCase , epsilon=__lowerCAmelCase , clipnorm=__lowerCAmelCase , global_clipnorm=__lowerCAmelCase , )
# We return the optimizer and the LR scheduler in order to better track the
# evolution of the LR independently of the optimizer.
return optimizer, lr_schedule
class __a (UpperCamelCase_):
'''simple docstring'''
def __init__( self , _a = 0.001 , _a = 0.9 , _a = 0.999 , _a = 1E-7 , _a = False , _a = 0.0 , _a = None , _a = None , _a = "AdamWeightDecay" , **_a , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(_a , _a , _a , _a , _a , _a , **_a )
SCREAMING_SNAKE_CASE__ : Tuple = weight_decay_rate
SCREAMING_SNAKE_CASE__ : Tuple = include_in_weight_decay
SCREAMING_SNAKE_CASE__ : Dict = exclude_from_weight_decay
@classmethod
def _a ( cls , _a ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = {"""WarmUp""": WarmUp}
return super(_a , cls ).from_config(_a , custom_objects=_a )
def _a ( self , _a , _a , _a ) -> str:
"""simple docstring"""
super(_a , self )._prepare_local(_a , _a , _a )
SCREAMING_SNAKE_CASE__ : Tuple = tf.constant(
self.weight_decay_rate , name="""adam_weight_decay_rate""" )
def _a ( self , _a , _a , _a ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = self._do_use_weight_decay(var.name )
if do_decay:
return var.assign_sub(
learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]["""weight_decay_rate"""] , use_locking=self._use_locking , )
return tf.no_op()
def _a ( self , _a , _a=None , **_a ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = list(zip(*_a ) )
return super(_a , self ).apply_gradients(zip(_a , _a ) , name=_a , **_a )
def _a ( self , _a , _a , _a ) -> str:
"""simple docstring"""
if apply_state is None:
return self._decayed_lr_t[var_dtype], {}
SCREAMING_SNAKE_CASE__ : Dict = apply_state or {}
SCREAMING_SNAKE_CASE__ : List[str] = apply_state.get((var_device, var_dtype) )
if coefficients is None:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self._fallback_apply_state(_a , _a )
SCREAMING_SNAKE_CASE__ : List[Any] = coefficients
return coefficients["lr_t"], {"apply_state": apply_state}
def _a ( self , _a , _a , _a=None ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] = self._get_lr(var.device , var.dtype.base_dtype , _a )
SCREAMING_SNAKE_CASE__ : Any = self._decay_weights_op(_a , _a , _a )
with tf.control_dependencies([decay] ):
return super(_a , self )._resource_apply_dense(_a , _a , **_a )
def _a ( self , _a , _a , _a , _a=None ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = self._get_lr(var.device , var.dtype.base_dtype , _a )
SCREAMING_SNAKE_CASE__ : Dict = self._decay_weights_op(_a , _a , _a )
with tf.control_dependencies([decay] ):
return super(_a , self )._resource_apply_sparse(_a , _a , _a , **_a )
def _a ( self ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = super().get_config()
config.update({"""weight_decay_rate""": self.weight_decay_rate} )
return config
def _a ( self , _a ) -> Tuple:
"""simple docstring"""
if self.weight_decay_rate == 0:
return False
if self._include_in_weight_decay:
for r in self._include_in_weight_decay:
if re.search(_a , _a ) is not None:
return True
if self._exclude_from_weight_decay:
for r in self._exclude_from_weight_decay:
if re.search(_a , _a ) is not None:
return False
return True
class __a (UpperCamelCase_):
'''simple docstring'''
def __init__( self ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = []
SCREAMING_SNAKE_CASE__ : List[str] = None
@property
def _a ( self ) -> str:
"""simple docstring"""
if self._accum_steps is None:
SCREAMING_SNAKE_CASE__ : Dict = tf.Variable(
tf.constant(0 , dtype=tf.intaa ) , trainable=_a , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
return self._accum_steps.value()
@property
def _a ( self ) -> List[str]:
"""simple docstring"""
if not self._gradients:
raise ValueError("""The accumulator should be called first to initialize the gradients""" )
return [gradient.value() if gradient is not None else gradient for gradient in self._gradients]
def __call__( self , _a ) -> str:
"""simple docstring"""
if not self._gradients:
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.step # Create the step variable.
self._gradients.extend(
[
tf.Variable(
tf.zeros_like(_a ) , trainable=_a , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
if gradient is not None
else gradient
for gradient in gradients
] )
if len(_a ) != len(self._gradients ):
raise ValueError(f'''Expected {len(self._gradients )} gradients, but got {len(_a )}''' )
for accum_gradient, gradient in zip(self._gradients , _a ):
if accum_gradient is not None and gradient is not None:
accum_gradient.assign_add(_a )
self._accum_steps.assign_add(1 )
def _a ( self ) -> Any:
"""simple docstring"""
if not self._gradients:
return
self._accum_steps.assign(0 )
for gradient in self._gradients:
if gradient is not None:
gradient.assign(tf.zeros_like(_a ) )
| 56
| 0
|
'''simple docstring'''
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> bool:
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(_lowerCAmelCase ) )
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> bool:
# Base Case
if index == len(_lowerCAmelCase ):
return True
# Recursive Step
for i in range(_lowerCAmelCase ):
if valid_coloring(graph[index] , _lowerCAmelCase , _lowerCAmelCase ):
# Color current vertex
snake_case__ : List[Any] = i
# Validate coloring
if util_color(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , index + 1 ):
return True
# Backtrack
snake_case__ : Optional[Any] = -1
return False
def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> list[int]:
snake_case__ : Dict = [-1] * len(_lowerCAmelCase )
if util_color(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , 0 ):
return colored_vertices
return []
| 35
|
'''simple docstring'''
import numpy as np
from transformers import Pipeline
def __snake_case( _lowerCAmelCase ) -> Optional[int]:
snake_case__ : Optional[Any] = np.max(_lowerCAmelCase , axis=-1 , keepdims=_lowerCAmelCase )
snake_case__ : List[str] = np.exp(outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=_lowerCAmelCase )
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
def lowerCamelCase ( self : Optional[Any] , **snake_case_ : int ):
snake_case__ : Optional[int] = {}
if "second_text" in kwargs:
snake_case__ : Union[str, Any] = kwargs["""second_text"""]
return preprocess_kwargs, {}, {}
def lowerCamelCase ( self : str , snake_case_ : Tuple , snake_case_ : Union[str, Any]=None ):
return self.tokenizer(snake_case_ , text_pair=snake_case_ , return_tensors=self.framework )
def lowerCamelCase ( self : List[Any] , snake_case_ : Dict ):
return self.model(**snake_case_ )
def lowerCamelCase ( self : int , snake_case_ : List[Any] ):
snake_case__ : Union[str, Any] = model_outputs.logits[0].numpy()
snake_case__ : List[str] = softmax(snake_case_ )
snake_case__ : List[str] = np.argmax(snake_case_ )
snake_case__ : List[str] = self.model.config.idalabel[best_class]
snake_case__ : Optional[int] = probabilities[best_class].item()
snake_case__ : str = logits.tolist()
return {"label": label, "score": score, "logits": logits}
| 35
| 1
|
"""simple docstring"""
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class snake_case ( _UpperCamelCase):
__UpperCamelCase = ['image_processor', 'tokenizer']
__UpperCamelCase = 'OwlViTImageProcessor'
__UpperCamelCase = ('CLIPTokenizer', 'CLIPTokenizerFast')
def __init__( self : Optional[Any] , a__ : Union[str, Any]=None , a__ : Dict=None , **a__ : Dict ) -> Union[str, Any]:
'''simple docstring'''
_A = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , a__ , )
_A = kwargs.pop("feature_extractor" )
_A = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(a__ , a__ )
def __call__( self : List[Any] , a__ : Any=None , a__ : int=None , a__ : Optional[Any]=None , a__ : Union[str, Any]="max_length" , a__ : Optional[int]="np" , **a__ : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
if text is None and query_images is None and images is None:
raise ValueError(
"You have to specify at least one text or query image or image. All three cannot be none." )
if text is not None:
if isinstance(a__ , a__ ) or (isinstance(a__ , a__ ) and not isinstance(text[0] , a__ )):
_A = [self.tokenizer(a__ , padding=a__ , return_tensors=a__ , **a__ )]
elif isinstance(a__ , a__ ) and isinstance(text[0] , a__ ):
_A = []
# Maximum number of queries across batch
_A = max([len(a__ ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(a__ ) != max_num_queries:
_A = t + [" "] * (max_num_queries - len(a__ ))
_A = self.tokenizer(a__ , padding=a__ , return_tensors=a__ , **a__ )
encodings.append(a__ )
else:
raise TypeError("Input text should be a string, a list of strings or a nested list of strings" )
if return_tensors == "np":
_A = np.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
_A = np.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
_A = jnp.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
_A = jnp.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
_A = torch.cat([encoding["input_ids"] for encoding in encodings] , dim=0 )
_A = torch.cat([encoding["attention_mask"] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
_A = tf.stack([encoding["input_ids"] for encoding in encodings] , axis=0 )
_A = tf.stack([encoding["attention_mask"] for encoding in encodings] , axis=0 )
else:
raise ValueError("Target return tensor type could not be returned" )
_A = BatchEncoding()
_A = input_ids
_A = attention_mask
if query_images is not None:
_A = BatchEncoding()
_A = self.image_processor(
a__ , return_tensors=a__ , **a__ ).pixel_values
_A = query_pixel_values
if images is not None:
_A = self.image_processor(a__ , return_tensors=a__ , **a__ )
if text is not None and images is not None:
_A = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
_A = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**a__ ) , tensor_type=a__ )
def a_ ( self : Dict , *a__ : Union[str, Any] , **a__ : Union[str, Any] ) -> str:
'''simple docstring'''
return self.image_processor.post_process(*a__ , **a__ )
def a_ ( self : List[Any] , *a__ : Any , **a__ : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
return self.image_processor.post_process_object_detection(*a__ , **a__ )
def a_ ( self : Dict , *a__ : List[Any] , **a__ : Dict ) -> List[Any]:
'''simple docstring'''
return self.image_processor.post_process_image_guided_detection(*a__ , **a__ )
def a_ ( self : Optional[Any] , *a__ : List[Any] , **a__ : Any ) -> str:
'''simple docstring'''
return self.tokenizer.batch_decode(*a__ , **a__ )
def a_ ( self : str , *a__ : List[Any] , **a__ : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
return self.tokenizer.decode(*a__ , **a__ )
@property
def a_ ( self : Dict ) -> int:
'''simple docstring'''
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , a__ , )
return self.image_processor_class
@property
def a_ ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , a__ , )
return self.image_processor
| 163
|
"""simple docstring"""
import collections.abc
from typing import Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_poolformer import PoolFormerConfig
a_ = logging.get_logger(__name__)
# General docstring
a_ = "PoolFormerConfig"
# Base docstring
a_ = "sail/poolformer_s12"
a_ = [1, 5_12, 7, 7]
# Image classification docstring
a_ = "sail/poolformer_s12"
a_ = "tabby, tabby cat"
a_ = [
"sail/poolformer_s12",
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
]
def a__ ( __lowercase , __lowercase = 0.0 , __lowercase = False ) -> Dict:
if drop_prob == 0.0 or not training:
return input
_A = 1 - drop_prob
_A = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
_A = keep_prob + torch.rand(__lowercase , dtype=input.dtype , device=input.device )
random_tensor.floor_() # binarize
_A = input.div(__lowercase ) * random_tensor
return output
class snake_case ( nn.Module):
def __init__( self : Any , a__ : Optional[float] = None ) -> None:
'''simple docstring'''
super().__init__()
_A = drop_prob
def a_ ( self : Optional[Any] , a__ : torch.Tensor ) -> torch.Tensor:
'''simple docstring'''
return drop_path(a__ , self.drop_prob , self.training )
def a_ ( self : List[str] ) -> str:
'''simple docstring'''
return "p={}".format(self.drop_prob )
class snake_case ( nn.Module):
def __init__( self : Union[str, Any] , a__ : List[Any] , a__ : Any , a__ : List[Any] , a__ : Optional[int] , a__ : Dict , a__ : str=None ) -> Optional[Any]:
'''simple docstring'''
super().__init__()
_A = patch_size if isinstance(a__ , collections.abc.Iterable ) else (patch_size, patch_size)
_A = stride if isinstance(a__ , collections.abc.Iterable ) else (stride, stride)
_A = padding if isinstance(a__ , collections.abc.Iterable ) else (padding, padding)
_A = nn.Convad(a__ , a__ , kernel_size=a__ , stride=a__ , padding=a__ )
_A = norm_layer(a__ ) if norm_layer else nn.Identity()
def a_ ( self : Dict , a__ : Any ) -> List[str]:
'''simple docstring'''
_A = self.projection(a__ )
_A = self.norm(a__ )
return embeddings
class snake_case ( nn.GroupNorm):
def __init__( self : Dict , a__ : Optional[int] , **a__ : Dict ) -> Optional[Any]:
'''simple docstring'''
super().__init__(1 , a__ , **a__ )
class snake_case ( nn.Module):
def __init__( self : int , a__ : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
_A = nn.AvgPoolad(a__ , stride=1 , padding=pool_size // 2 , count_include_pad=a__ )
def a_ ( self : List[str] , a__ : int ) -> str:
'''simple docstring'''
return self.pool(a__ ) - hidden_states
class snake_case ( nn.Module):
def __init__( self : Tuple , a__ : Optional[int] , a__ : Optional[Any] , a__ : List[str] , a__ : Optional[int] ) -> Any:
'''simple docstring'''
super().__init__()
_A = nn.Convad(a__ , a__ , 1 )
_A = nn.Convad(a__ , a__ , 1 )
_A = PoolFormerDropPath(a__ )
if isinstance(config.hidden_act , a__ ):
_A = ACTaFN[config.hidden_act]
else:
_A = config.hidden_act
def a_ ( self : List[Any] , a__ : int ) -> Dict:
'''simple docstring'''
_A = self.conva(a__ )
_A = self.act_fn(a__ )
_A = self.drop(a__ )
_A = self.conva(a__ )
_A = self.drop(a__ )
return hidden_states
class snake_case ( nn.Module):
def __init__( self : Union[str, Any] , a__ : str , a__ : List[str] , a__ : List[Any] , a__ : List[str] , a__ : Optional[Any] , a__ : Tuple ) -> Dict:
'''simple docstring'''
super().__init__()
_A = PoolFormerPooling(a__ )
_A = PoolFormerOutput(a__ , a__ , a__ , a__ )
_A = PoolFormerGroupNorm(a__ )
_A = PoolFormerGroupNorm(a__ )
# Useful for training neural nets
_A = PoolFormerDropPath(a__ ) if drop_path > 0.0 else nn.Identity()
_A = config.use_layer_scale
if config.use_layer_scale:
_A = nn.Parameter(
config.layer_scale_init_value * torch.ones((a__) ) , requires_grad=a__ )
_A = nn.Parameter(
config.layer_scale_init_value * torch.ones((a__) ) , requires_grad=a__ )
def a_ ( self : Union[str, Any] , a__ : Optional[int] ) -> Tuple:
'''simple docstring'''
if self.use_layer_scale:
_A = self.pooling(self.before_norm(a__ ) )
_A = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * pooling_output
# First residual connection
_A = hidden_states + self.drop_path(a__ )
_A = ()
_A = self.output(self.after_norm(a__ ) )
_A = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * layer_output
# Second residual connection
_A = hidden_states + self.drop_path(a__ )
_A = (output,) + outputs
return outputs
else:
_A = self.drop_path(self.pooling(self.before_norm(a__ ) ) )
# First residual connection
_A = pooling_output + hidden_states
_A = ()
# Second residual connection inside the PoolFormerOutput block
_A = self.drop_path(self.output(self.after_norm(a__ ) ) )
_A = hidden_states + layer_output
_A = (output,) + outputs
return outputs
class snake_case ( nn.Module):
def __init__( self : str , a__ : int ) -> Any:
'''simple docstring'''
super().__init__()
_A = config
# stochastic depth decay rule
_A = [x.item() for x in torch.linspace(0 , config.drop_path_rate , sum(config.depths ) )]
# patch embeddings
_A = []
for i in range(config.num_encoder_blocks ):
embeddings.append(
PoolFormerEmbeddings(
patch_size=config.patch_sizes[i] , stride=config.strides[i] , padding=config.padding[i] , num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1] , hidden_size=config.hidden_sizes[i] , ) )
_A = nn.ModuleList(a__ )
# Transformer blocks
_A = []
_A = 0
for i in range(config.num_encoder_blocks ):
# each block consists of layers
_A = []
if i != 0:
cur += config.depths[i - 1]
for j in range(config.depths[i] ):
layers.append(
PoolFormerLayer(
a__ , num_channels=config.hidden_sizes[i] , pool_size=config.pool_size , hidden_size=config.hidden_sizes[i] , intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio ) , drop_path=dpr[cur + j] , ) )
blocks.append(nn.ModuleList(a__ ) )
_A = nn.ModuleList(a__ )
def a_ ( self : Tuple , a__ : Union[str, Any] , a__ : Tuple=False , a__ : List[str]=True ) -> List[Any]:
'''simple docstring'''
_A = () if output_hidden_states else None
_A = pixel_values
for idx, layers in enumerate(zip(self.patch_embeddings , self.block ) ):
_A , _A = layers
# Get patch embeddings from hidden_states
_A = embedding_layer(a__ )
# Send the embeddings through the blocks
for _, blk in enumerate(a__ ):
_A = blk(a__ )
_A = layer_outputs[0]
if output_hidden_states:
_A = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=a__ , hidden_states=a__ )
class snake_case ( _UpperCamelCase):
__UpperCamelCase = PoolFormerConfig
__UpperCamelCase = 'poolformer'
__UpperCamelCase = 'pixel_values'
__UpperCamelCase = True
def a_ ( self : Tuple , a__ : Dict ) -> Any:
'''simple docstring'''
if isinstance(a__ , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(a__ , nn.LayerNorm ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
def a_ ( self : int , a__ : Dict , a__ : int=False ) -> str:
'''simple docstring'''
if isinstance(a__ , a__ ):
_A = value
a_ = r"\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
a_ = r"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`PoolFormerImageProcessor.__call__`] for details.\n"
@add_start_docstrings(
'The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top.' , _UpperCamelCase , )
class snake_case ( _UpperCamelCase):
def __init__( self : int , a__ : Dict ) -> str:
'''simple docstring'''
super().__init__(a__ )
_A = config
_A = PoolFormerEncoder(a__ )
# Initialize weights and apply final processing
self.post_init()
def a_ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
return self.embeddings.patch_embeddings
@add_start_docstrings_to_model_forward(a__ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=a__ , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def a_ ( self : Tuple , a__ : Optional[torch.FloatTensor] = None , a__ : Optional[bool] = None , a__ : Optional[bool] = None , ) -> Union[Tuple, BaseModelOutputWithNoAttention]:
'''simple docstring'''
_A = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_A = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("You have to specify pixel_values" )
_A = self.encoder(
a__ , output_hidden_states=a__ , return_dict=a__ , )
_A = encoder_outputs[0]
if not return_dict:
return (sequence_output, None) + encoder_outputs[1:]
return BaseModelOutputWithNoAttention(
last_hidden_state=a__ , hidden_states=encoder_outputs.hidden_states , )
class snake_case ( nn.Module):
def __init__( self : List[str] , a__ : Dict ) -> Optional[Any]:
'''simple docstring'''
super().__init__()
_A = nn.Linear(config.hidden_size , config.hidden_size )
def a_ ( self : int , a__ : Tuple ) -> str:
'''simple docstring'''
_A = self.dense(a__ )
return output
@add_start_docstrings(
'\n PoolFormer Model transformer with an image classification head on top\n ' , _UpperCamelCase , )
class snake_case ( _UpperCamelCase):
def __init__( self : Tuple , a__ : str ) -> Optional[int]:
'''simple docstring'''
super().__init__(a__ )
_A = config.num_labels
_A = PoolFormerModel(a__ )
# Final norm
_A = PoolFormerGroupNorm(config.hidden_sizes[-1] )
# Classifier head
_A = (
nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity()
)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(a__ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=a__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def a_ ( self : int , a__ : Optional[torch.FloatTensor] = None , a__ : Optional[torch.LongTensor] = None , a__ : Optional[bool] = None , a__ : Optional[bool] = None , ) -> Union[Tuple, ImageClassifierOutputWithNoAttention]:
'''simple docstring'''
_A = return_dict if return_dict is not None else self.config.use_return_dict
_A = self.poolformer(
a__ , output_hidden_states=a__ , return_dict=a__ , )
_A = outputs[0]
_A = self.classifier(self.norm(a__ ).mean([-2, -1] ) )
_A = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
_A = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
_A = "single_label_classification"
else:
_A = "multi_label_classification"
if self.config.problem_type == "regression":
_A = MSELoss()
if self.num_labels == 1:
_A = loss_fct(logits.squeeze() , labels.squeeze() )
else:
_A = loss_fct(a__ , a__ )
elif self.config.problem_type == "single_label_classification":
_A = CrossEntropyLoss()
_A = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
_A = BCEWithLogitsLoss()
_A = loss_fct(a__ , a__ )
if not return_dict:
_A = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=a__ , logits=a__ , hidden_states=outputs.hidden_states )
| 163
| 1
|
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = n ** (1 / 3)
return (val * val * val) == n
if __name__ == "__main__":
print(perfect_cube(27))
print(perfect_cube(4))
| 110
|
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self : Any ):
SCREAMING_SNAKE_CASE_ = 0
def lowerCAmelCase_ ( self : int ):
SCREAMING_SNAKE_CASE_ = AutoImageProcessor.from_pretrained('openai/clip-vit-base-patch32' )
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
def lowerCAmelCase_ ( self : Optional[int] ):
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE_ = Path(_lowerCAmelCase ) / 'preprocessor_config.json'
SCREAMING_SNAKE_CASE_ = Path(_lowerCAmelCase ) / 'config.json'
json.dump(
{'image_processor_type': 'CLIPImageProcessor', 'processor_class': 'CLIPProcessor'} , open(_lowerCAmelCase , 'w' ) , )
json.dump({'model_type': 'clip'} , open(_lowerCAmelCase , 'w' ) )
SCREAMING_SNAKE_CASE_ = AutoImageProcessor.from_pretrained(_lowerCAmelCase )
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
def lowerCAmelCase_ ( self : str ):
# Ensure we can load the image processor from the feature extractor config
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE_ = Path(_lowerCAmelCase ) / 'preprocessor_config.json'
SCREAMING_SNAKE_CASE_ = Path(_lowerCAmelCase ) / 'config.json'
json.dump(
{'feature_extractor_type': 'CLIPFeatureExtractor', 'processor_class': 'CLIPProcessor'} , open(_lowerCAmelCase , 'w' ) , )
json.dump({'model_type': 'clip'} , open(_lowerCAmelCase , 'w' ) )
SCREAMING_SNAKE_CASE_ = AutoImageProcessor.from_pretrained(_lowerCAmelCase )
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
def lowerCAmelCase_ ( self : Union[str, Any] ):
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE_ = CLIPConfig()
# Create a dummy config file with image_proceesor_type
SCREAMING_SNAKE_CASE_ = Path(_lowerCAmelCase ) / 'preprocessor_config.json'
SCREAMING_SNAKE_CASE_ = Path(_lowerCAmelCase ) / 'config.json'
json.dump(
{'image_processor_type': 'CLIPImageProcessor', 'processor_class': 'CLIPProcessor'} , open(_lowerCAmelCase , 'w' ) , )
json.dump({'model_type': 'clip'} , open(_lowerCAmelCase , 'w' ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
SCREAMING_SNAKE_CASE_ = AutoImageProcessor.from_pretrained(_lowerCAmelCase ).to_dict()
config_dict.pop('image_processor_type' )
SCREAMING_SNAKE_CASE_ = CLIPImageProcessor(**_lowerCAmelCase )
# save in new folder
model_config.save_pretrained(_lowerCAmelCase )
config.save_pretrained(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = AutoImageProcessor.from_pretrained(_lowerCAmelCase )
# make sure private variable is not incorrectly saved
SCREAMING_SNAKE_CASE_ = json.loads(config.to_json_string() )
self.assertTrue('_processor_class' not in dict_as_saved )
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
def lowerCAmelCase_ ( self : str ):
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE_ = Path(_lowerCAmelCase ) / 'preprocessor_config.json'
json.dump(
{'image_processor_type': 'CLIPImageProcessor', 'processor_class': 'CLIPProcessor'} , open(_lowerCAmelCase , 'w' ) , )
SCREAMING_SNAKE_CASE_ = AutoImageProcessor.from_pretrained(_lowerCAmelCase )
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
def lowerCAmelCase_ ( self : List[Any] ):
with self.assertRaisesRegex(
_lowerCAmelCase , 'clip-base is not a local folder and is not a valid model identifier' ):
SCREAMING_SNAKE_CASE_ = AutoImageProcessor.from_pretrained('clip-base' )
def lowerCAmelCase_ ( self : List[Any] ):
with self.assertRaisesRegex(
_lowerCAmelCase , R'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
SCREAMING_SNAKE_CASE_ = AutoImageProcessor.from_pretrained(_lowerCAmelCase , revision='aaaaaa' )
def lowerCAmelCase_ ( self : str ):
with self.assertRaisesRegex(
_lowerCAmelCase , 'hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.' , ):
SCREAMING_SNAKE_CASE_ = AutoImageProcessor.from_pretrained('hf-internal-testing/config-no-model' )
def lowerCAmelCase_ ( self : Tuple ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(_lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ = AutoImageProcessor.from_pretrained('hf-internal-testing/test_dynamic_image_processor' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(_lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ = AutoImageProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_image_processor' , trust_remote_code=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = AutoImageProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_image_processor' , trust_remote_code=_lowerCAmelCase )
self.assertEqual(image_processor.__class__.__name__ , 'NewImageProcessor' )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = AutoImageProcessor.from_pretrained(_lowerCAmelCase , trust_remote_code=_lowerCAmelCase )
self.assertEqual(reloaded_image_processor.__class__.__name__ , 'NewImageProcessor' )
def lowerCAmelCase_ ( self : Optional[Any] ):
try:
AutoConfig.register('custom' , _lowerCAmelCase )
AutoImageProcessor.register(_lowerCAmelCase , _lowerCAmelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_lowerCAmelCase ):
AutoImageProcessor.register(_lowerCAmelCase , _lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE_ = Path(_lowerCAmelCase ) / 'preprocessor_config.json'
SCREAMING_SNAKE_CASE_ = Path(_lowerCAmelCase ) / 'config.json'
json.dump(
{'feature_extractor_type': 'CLIPFeatureExtractor', 'processor_class': 'CLIPProcessor'} , open(_lowerCAmelCase , 'w' ) , )
json.dump({'model_type': 'clip'} , open(_lowerCAmelCase , 'w' ) )
SCREAMING_SNAKE_CASE_ = CustomImageProcessor.from_pretrained(_lowerCAmelCase )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = AutoImageProcessor.from_pretrained(_lowerCAmelCase )
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def lowerCAmelCase_ ( self : Union[str, Any] ):
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = True
try:
AutoConfig.register('custom' , _lowerCAmelCase )
AutoImageProcessor.register(_lowerCAmelCase , _lowerCAmelCase )
# If remote code is not set, the default is to use local
SCREAMING_SNAKE_CASE_ = AutoImageProcessor.from_pretrained('hf-internal-testing/test_dynamic_image_processor' )
self.assertEqual(image_processor.__class__.__name__ , 'NewImageProcessor' )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
SCREAMING_SNAKE_CASE_ = AutoImageProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_image_processor' , trust_remote_code=_lowerCAmelCase )
self.assertEqual(image_processor.__class__.__name__ , 'NewImageProcessor' )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
SCREAMING_SNAKE_CASE_ = AutoImageProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_image_processor' , trust_remote_code=_lowerCAmelCase )
self.assertEqual(image_processor.__class__.__name__ , 'NewImageProcessor' )
self.assertTrue(not hasattr(_lowerCAmelCase , 'is_local' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 225
| 0
|
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
"""microsoft/wavlm-base""": """https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json""",
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
__UpperCAmelCase : Dict = '''wavlm'''
def __init__( self : List[Any] ,_a : Any=32 ,_a : str=768 ,_a : List[str]=12 ,_a : Optional[Any]=12 ,_a : Union[str, Any]=3072 ,_a : Dict="gelu" ,_a : Union[str, Any]=0.1 ,_a : str=0.1 ,_a : List[Any]=0.1 ,_a : Optional[Any]=0.0 ,_a : Dict=0.1 ,_a : Optional[int]=0.1 ,_a : List[str]=0.02 ,_a : Any=1E-5 ,_a : str="group" ,_a : int="gelu" ,_a : str=(512, 512, 512, 512, 512, 512, 512) ,_a : Any=(5, 2, 2, 2, 2, 2, 2) ,_a : List[str]=(10, 3, 3, 3, 3, 2, 2) ,_a : List[str]=False ,_a : int=128 ,_a : Optional[Any]=16 ,_a : Tuple=320 ,_a : Optional[int]=800 ,_a : Tuple=False ,_a : Any=True ,_a : str=0.05 ,_a : Optional[Any]=10 ,_a : Any=2 ,_a : int=0.0 ,_a : int=10 ,_a : Optional[int]=320 ,_a : List[Any]=2 ,_a : Any=0.1 ,_a : Tuple=100 ,_a : Tuple=256 ,_a : int=256 ,_a : Tuple=0.1 ,_a : Optional[int]="mean" ,_a : List[str]=False ,_a : Union[str, Any]=False ,_a : Optional[int]=256 ,_a : Any=(512, 512, 512, 512, 1500) ,_a : Union[str, Any]=(5, 3, 3, 1, 1) ,_a : Union[str, Any]=(1, 2, 3, 1, 1) ,_a : Tuple=512 ,_a : Union[str, Any]=80 ,_a : int=0 ,_a : Dict=1 ,_a : Tuple=2 ,_a : str=False ,_a : Any=3 ,_a : Any=2 ,_a : Union[str, Any]=3 ,_a : List[str]=None ,**_a : Optional[int] ,):
'''simple docstring'''
super().__init__(**_a ,pad_token_id=_a ,bos_token_id=_a ,eos_token_id=_a )
_a : Any = hidden_size
_a : Union[str, Any] = feat_extract_norm
_a : List[str] = feat_extract_activation
_a : Optional[int] = list(_a )
_a : str = list(_a )
_a : Dict = list(_a )
_a : int = conv_bias
_a : Dict = num_buckets
_a : Optional[Any] = max_bucket_distance
_a : List[str] = num_conv_pos_embeddings
_a : int = num_conv_pos_embedding_groups
_a : Union[str, Any] = len(self.conv_dim )
_a : Optional[int] = num_hidden_layers
_a : Any = intermediate_size
_a : Any = hidden_act
_a : Optional[int] = num_attention_heads
_a : str = hidden_dropout
_a : Tuple = attention_dropout
_a : Union[str, Any] = activation_dropout
_a : str = feat_proj_dropout
_a : str = final_dropout
_a : List[str] = layerdrop
_a : List[str] = layer_norm_eps
_a : str = initializer_range
_a : str = num_ctc_classes
_a : Union[str, Any] = vocab_size
_a : Dict = do_stable_layer_norm
_a : Optional[int] = use_weighted_layer_sum
_a : List[str] = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
F""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
F""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_a : List[str] = apply_spec_augment
_a : Tuple = mask_time_prob
_a : Optional[Any] = mask_time_length
_a : List[str] = mask_time_min_masks
_a : Tuple = mask_feature_prob
_a : List[Any] = mask_feature_length
# parameters for pretraining with codevector quantized representations
_a : List[Any] = num_codevectors_per_group
_a : Any = num_codevector_groups
_a : Any = contrastive_logits_temperature
_a : Union[str, Any] = num_negatives
_a : Tuple = codevector_dim
_a : Tuple = proj_codevector_dim
_a : List[Any] = diversity_loss_weight
# ctc loss
_a : int = ctc_loss_reduction
_a : List[Any] = ctc_zero_infinity
# adapter
_a : Optional[Any] = add_adapter
_a : int = adapter_kernel_size
_a : List[Any] = adapter_stride
_a : Tuple = num_adapter_layers
_a : Dict = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
_a : Tuple = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
_a : Optional[int] = list(_a )
_a : Dict = list(_a )
_a : str = list(_a )
_a : List[str] = xvector_output_dim
@property
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
return functools.reduce(operator.mul ,self.conv_stride ,1 )
| 5
|
'''simple docstring'''
from functools import lru_cache
@lru_cache
def UpperCAmelCase_ (__a : int ):
"""simple docstring"""
if num < 0:
raise ValueError('Number should not be negative.' )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 5
| 1
|
from pathlib import Path
import fire
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
A_ : Tuple = Path(SCREAMING_SNAKE_CASE )
A_ : Union[str, Any] = Path(SCREAMING_SNAKE_CASE )
dest_dir.mkdir(exist_ok=SCREAMING_SNAKE_CASE )
for path in src_dir.iterdir():
A_ : Tuple = [x.rstrip() for x in list(path.open().readlines() )][:n]
A_ : int = dest_dir.joinpath(path.name )
print(SCREAMING_SNAKE_CASE )
dest_path.open('''w''' ).write('''\n'''.join(SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
fire.Fire(minify)
| 186
|
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
A_ : List[Any] = current_set.copy()
for row_index, row in enumerate(SCREAMING_SNAKE_CASE ):
A_ : List[str] = row[0]
for column_index, column in enumerate(SCREAMING_SNAKE_CASE ):
if magnitude == 0:
A_ : Union[str, Any] = column
continue
A_ : Dict = column / magnitude
# Subtract to cancel term
A_ : Union[str, Any] = current_set[0]
A_ : Tuple = [first_row]
A_ : int = current_set[1::]
for row in current_set:
A_ : Tuple = []
# If first term is 0, it is already in form we want, so we preserve it
if row[0] == 0:
final_set.append(SCREAMING_SNAKE_CASE )
continue
for column_index in range(len(SCREAMING_SNAKE_CASE ) ):
temp_row.append(first_row[column_index] - row[column_index] )
final_set.append(SCREAMING_SNAKE_CASE )
# Create next recursion iteration set
if len(final_set[0] ) != 3:
A_ : Optional[Any] = final_set[0]
A_ : Any = []
A_ : str = []
for row in final_set[1::]:
current_first_column.append(row[0] )
next_iteration.append(row[1::] )
A_ : Optional[Any] = simplify(SCREAMING_SNAKE_CASE )
for i in range(len(SCREAMING_SNAKE_CASE ) ):
resultant[i].insert(0 , current_first_column[i] )
resultant.insert(0 , SCREAMING_SNAKE_CASE )
A_ : List[Any] = resultant
return final_set
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
if len(SCREAMING_SNAKE_CASE ) == 0:
raise IndexError('''solve_simultaneous() requires n lists of length n+1''' )
A_ : str = len(SCREAMING_SNAKE_CASE ) + 1
if any(len(SCREAMING_SNAKE_CASE ) != _length for item in equations ):
raise IndexError('''solve_simultaneous() requires n lists of length n+1''' )
for row in equations:
if any(not isinstance(SCREAMING_SNAKE_CASE , (int, float) ) for column in row ):
raise ValueError('''solve_simultaneous() requires lists of integers''' )
if len(SCREAMING_SNAKE_CASE ) == 1:
return [equations[0][-1] / equations[0][0]]
A_ : Dict = equations.copy()
if any(0 in row for row in data_set ):
A_ : Tuple = data_set.copy()
A_ : Optional[Any] = []
for row_index, row in enumerate(SCREAMING_SNAKE_CASE ):
if 0 not in row:
A_ : str = data_set.pop(SCREAMING_SNAKE_CASE )
break
if not full_row:
raise ValueError('''solve_simultaneous() requires at least 1 full equation''' )
data_set.insert(0 , SCREAMING_SNAKE_CASE )
A_ : int = data_set.copy()
A_ : Dict = simplify(SCREAMING_SNAKE_CASE )
A_ : Dict = simplified[::-1]
A_ : list = []
for row in simplified:
A_ : Union[str, Any] = row[-1]
if not solutions:
if row[-2] == 0:
solutions.append(0 )
continue
solutions.append(current_solution / row[-2] )
continue
A_ : Optional[Any] = row.copy()[: len(SCREAMING_SNAKE_CASE ) - 1 :]
while temp_row[0] == 0:
temp_row.pop(0 )
if len(SCREAMING_SNAKE_CASE ) == 0:
solutions.append(0 )
continue
A_ : int = temp_row[1::]
A_ : int = temp_row[::-1]
for column_index, column in enumerate(SCREAMING_SNAKE_CASE ):
current_solution -= column * solutions[column_index]
solutions.append(SCREAMING_SNAKE_CASE )
A_ : Union[str, Any] = []
for item in solutions:
final.append(float(round(SCREAMING_SNAKE_CASE , 5 ) ) )
return final[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase = [
[2, 1, 1, 1, 1, 4],
[1, 2, 1, 1, 1, 5],
[1, 1, 2, 1, 1, 6],
[1, 1, 1, 2, 1, 7],
[1, 1, 1, 1, 2, 8],
]
print(solve_simultaneous(eq))
print(solve_simultaneous([[4, 2]]))
| 186
| 1
|
'''simple docstring'''
import argparse
import os
import torch
from transformers import FlavaConfig, FlavaForPreTraining
from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint
def _lowercase ( __A ):
'''simple docstring'''
return sum(param.float().sum() if """encoder.embeddings""" not in key else 0 for key, param in state_dict.items() )
def _lowercase ( __A ,__A ):
'''simple docstring'''
__UpperCamelCase = {}
for key, value in state_dict.items():
if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key:
continue
__UpperCamelCase = key.replace("""heads.cmd.mim_head.cls.predictions""" ,"""mmm_image_head""" )
__UpperCamelCase = key.replace("""heads.cmd.mlm_head.cls.predictions""" ,"""mmm_text_head""" )
__UpperCamelCase = key.replace("""heads.cmd.itm_head.cls""" ,"""itm_head""" )
__UpperCamelCase = key.replace("""heads.cmd.itm_head.pooler""" ,"""itm_head.pooler""" )
__UpperCamelCase = key.replace("""heads.cmd.clip_head.logit_scale""" ,"""flava.logit_scale""" )
__UpperCamelCase = key.replace("""heads.fairseq_mlm.cls.predictions""" ,"""mlm_head""" )
__UpperCamelCase = key.replace("""heads.imagenet.mim_head.cls.predictions""" ,"""mim_head""" )
__UpperCamelCase = key.replace("""mm_text_projection""" ,"""flava.text_to_mm_projection""" )
__UpperCamelCase = key.replace("""mm_image_projection""" ,"""flava.image_to_mm_projection""" )
__UpperCamelCase = key.replace("""image_encoder.module""" ,"""flava.image_model""" )
__UpperCamelCase = key.replace("""text_encoder.module""" ,"""flava.text_model""" )
__UpperCamelCase = key.replace("""mm_encoder.module.encoder.cls_token""" ,"""flava.multimodal_model.cls_token""" )
__UpperCamelCase = key.replace("""mm_encoder.module""" ,"""flava.multimodal_model""" )
__UpperCamelCase = key.replace("""text_projection""" ,"""flava.text_projection""" )
__UpperCamelCase = key.replace("""image_projection""" ,"""flava.image_projection""" )
__UpperCamelCase = value.float()
for key, value in codebook_state_dict.items():
__UpperCamelCase = value
return upgrade
@torch.no_grad()
def _lowercase ( __A ,__A ,__A ,__A=None ):
'''simple docstring'''
if config_path is not None:
__UpperCamelCase = FlavaConfig.from_pretrained(__SCREAMING_SNAKE_CASE )
else:
__UpperCamelCase = FlavaConfig()
__UpperCamelCase = FlavaForPreTraining(__SCREAMING_SNAKE_CASE ).eval()
__UpperCamelCase = convert_dalle_checkpoint(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,save_checkpoint=__SCREAMING_SNAKE_CASE )
if os.path.exists(__SCREAMING_SNAKE_CASE ):
__UpperCamelCase = torch.load(__SCREAMING_SNAKE_CASE ,map_location="""cpu""" )
else:
__UpperCamelCase = torch.hub.load_state_dict_from_url(__SCREAMING_SNAKE_CASE ,map_location="""cpu""" )
__UpperCamelCase = upgrade_state_dict(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
hf_model.load_state_dict(__SCREAMING_SNAKE_CASE )
__UpperCamelCase = hf_model.state_dict()
__UpperCamelCase = count_parameters(__SCREAMING_SNAKE_CASE )
__UpperCamelCase = count_parameters(__SCREAMING_SNAKE_CASE ) + count_parameters(__SCREAMING_SNAKE_CASE )
assert torch.allclose(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,atol=1E-3 )
hf_model.save_pretrained(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
a__ : List[Any] = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to flava checkpoint')
parser.add_argument('--codebook_path', default=None, type=str, help='Path to flava codebook checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
a__ : Tuple = parser.parse_args()
convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
| 370
|
'''simple docstring'''
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class UpperCAmelCase__ ( UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = DistilBertTokenizer
__SCREAMING_SNAKE_CASE = DistilBertTokenizerFast
__SCREAMING_SNAKE_CASE = True
@slow
def __lowerCamelCase ( self ) -> Any:
__UpperCamelCase = DistilBertTokenizer.from_pretrained("""distilbert-base-uncased""" )
__UpperCamelCase = tokenizer.encode("""sequence builders""" , add_special_tokens=lowercase )
__UpperCamelCase = tokenizer.encode("""multi-sequence build""" , add_special_tokens=lowercase )
__UpperCamelCase = tokenizer.build_inputs_with_special_tokens(lowercase )
__UpperCamelCase = tokenizer.build_inputs_with_special_tokens(lowercase , lowercase )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 243
| 0
|
UpperCAmelCase_ = {'a': ['c', 'b'], 'b': ['d', 'e'], 'c': [], 'd': [], 'e': []}
UpperCAmelCase_ = ['a', 'b', 'c', 'd', 'e']
def lowerCAmelCase_ ( __UpperCAmelCase: str , __UpperCAmelCase: Dict , __UpperCAmelCase: List[str] ) -> Any:
UpperCamelCase__ : str = start
# add current to visited
visited.append(__UpperCAmelCase )
UpperCamelCase__ : Optional[Any] = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
UpperCamelCase__ : Optional[int] = topological_sort(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# if all neighbors visited add current to sort
sort.append(__UpperCAmelCase )
# if all vertices haven't been visited select a new one to visit
if len(__UpperCAmelCase ) != len(__UpperCAmelCase ):
for vertice in vertices:
if vertice not in visited:
UpperCamelCase__ : Optional[int] = topological_sort(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# return sort
return sort
if __name__ == "__main__":
UpperCAmelCase_ = topological_sort('a', [], [])
print(sort)
| 201
|
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
a : Dict = logging.get_logger(__name__)
a : List[str] = {
'Helsinki-NLP/opus-mt-en-de': 'https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json',
# See all Marian models at https://huggingface.co/models?filter=marian
}
class a ( _lowerCamelCase ):
snake_case_ = "marian"
snake_case_ = ["past_key_values"]
snake_case_ = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self : List[Any] , lowercase_ : Optional[Any]=5_8101 , lowercase_ : Dict=None , lowercase_ : List[str]=1024 , lowercase_ : Optional[Any]=12 , lowercase_ : int=4096 , lowercase_ : Any=16 , lowercase_ : Optional[int]=12 , lowercase_ : str=4096 , lowercase_ : Union[str, Any]=16 , lowercase_ : Dict=0.0 , lowercase_ : Union[str, Any]=0.0 , lowercase_ : Optional[Any]=True , lowercase_ : Union[str, Any]=True , lowercase_ : int="gelu" , lowercase_ : Dict=1024 , lowercase_ : int=0.1 , lowercase_ : Tuple=0.0 , lowercase_ : Tuple=0.0 , lowercase_ : Tuple=0.02 , lowercase_ : int=5_8100 , lowercase_ : Optional[Any]=False , lowercase_ : Any=5_8100 , lowercase_ : Optional[int]=0 , lowercase_ : Dict=0 , lowercase_ : List[str]=True , **lowercase_ : Any , ):
snake_case_ = vocab_size
snake_case_ = decoder_vocab_size or vocab_size
snake_case_ = max_position_embeddings
snake_case_ = d_model
snake_case_ = encoder_ffn_dim
snake_case_ = encoder_layers
snake_case_ = encoder_attention_heads
snake_case_ = decoder_ffn_dim
snake_case_ = decoder_layers
snake_case_ = decoder_attention_heads
snake_case_ = dropout
snake_case_ = attention_dropout
snake_case_ = activation_dropout
snake_case_ = activation_function
snake_case_ = init_std
snake_case_ = encoder_layerdrop
snake_case_ = decoder_layerdrop
snake_case_ = use_cache
snake_case_ = encoder_layers
snake_case_ = scale_embedding # scale factor will be sqrt(d_model) if True
snake_case_ = share_encoder_decoder_embeddings
super().__init__(
pad_token_id=lowercase_ , eos_token_id=lowercase_ , is_encoder_decoder=lowercase_ , decoder_start_token_id=lowercase_ , forced_eos_token_id=lowercase_ , **lowercase_ , )
class a ( _lowerCamelCase ):
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs
def A_ ( self : Union[str, Any] ):
if self.task in ["default", "seq2seq-lm"]:
snake_case_ = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
snake_case_ = {0: '''batch'''}
snake_case_ = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
snake_case_ = {0: '''batch''', 1: '''decoder_sequence'''}
snake_case_ = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(lowercase_ , direction='''inputs''' )
elif self.task == "causal-lm":
# TODO: figure this case out.
snake_case_ = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
snake_case_ ,snake_case_ = self.num_layers
for i in range(lowercase_ ):
snake_case_ = {0: '''batch''', 2: '''past_sequence + sequence'''}
snake_case_ = {0: '''batch''', 2: '''past_sequence + sequence'''}
else:
snake_case_ = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}),
('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}),
] )
return common_inputs
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs
def A_ ( self : Dict ):
if self.task in ["default", "seq2seq-lm"]:
snake_case_ = super().outputs
else:
snake_case_ = super(lowercase_ , self ).outputs
if self.use_past:
snake_case_ ,snake_case_ = self.num_layers
for i in range(lowercase_ ):
snake_case_ = {0: '''batch''', 2: '''past_sequence + sequence'''}
snake_case_ = {0: '''batch''', 2: '''past_sequence + sequence'''}
return common_outputs
def A_ ( self : Dict , lowercase_ : PreTrainedTokenizer , lowercase_ : int = -1 , lowercase_ : int = -1 , lowercase_ : bool = False , lowercase_ : Optional[TensorType] = None , ):
snake_case_ = self._generate_dummy_inputs_for_encoder_and_decoder(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
# Generate decoder inputs
snake_case_ = seq_length if not self.use_past else 1
snake_case_ = self._generate_dummy_inputs_for_encoder_and_decoder(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
snake_case_ = {F"decoder_{name}": tensor for name, tensor in decoder_inputs.items()}
snake_case_ = dict(**lowercase_ , **lowercase_ )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
snake_case_ ,snake_case_ = common_inputs['''input_ids'''].shape
snake_case_ = common_inputs['''decoder_input_ids'''].shape[1]
snake_case_ ,snake_case_ = self.num_attention_heads
snake_case_ = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
snake_case_ = decoder_seq_length + 3
snake_case_ = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
snake_case_ = torch.cat(
[common_inputs['''decoder_attention_mask'''], torch.ones(lowercase_ , lowercase_ )] , dim=1 )
snake_case_ = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
snake_case_ ,snake_case_ = self.num_layers
snake_case_ = min(lowercase_ , lowercase_ )
snake_case_ = max(lowercase_ , lowercase_ ) - min_num_layers
snake_case_ = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder'''
for _ in range(lowercase_ ):
common_inputs["past_key_values"].append(
(
torch.zeros(lowercase_ ),
torch.zeros(lowercase_ ),
torch.zeros(lowercase_ ),
torch.zeros(lowercase_ ),
) )
# TODO: test this.
snake_case_ = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape
for _ in range(lowercase_ , lowercase_ ):
common_inputs["past_key_values"].append((torch.zeros(lowercase_ ), torch.zeros(lowercase_ )) )
return common_inputs
def A_ ( self : Union[str, Any] , lowercase_ : PreTrainedTokenizer , lowercase_ : int = -1 , lowercase_ : int = -1 , lowercase_ : bool = False , lowercase_ : Optional[TensorType] = None , ):
snake_case_ = self._generate_dummy_inputs_for_encoder_and_decoder(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
snake_case_ ,snake_case_ = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
snake_case_ = seqlen + 2
snake_case_ ,snake_case_ = self.num_layers
snake_case_ ,snake_case_ = self.num_attention_heads
snake_case_ = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
snake_case_ = common_inputs['''attention_mask'''].dtype
snake_case_ = torch.cat(
[common_inputs['''attention_mask'''], torch.ones(lowercase_ , lowercase_ , dtype=lowercase_ )] , dim=1 )
snake_case_ = [
(torch.zeros(lowercase_ ), torch.zeros(lowercase_ )) for _ in range(lowercase_ )
]
return common_inputs
def A_ ( self : List[str] , lowercase_ : PreTrainedTokenizer , lowercase_ : int = -1 , lowercase_ : int = -1 , lowercase_ : bool = False , lowercase_ : Optional[TensorType] = None , ):
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
snake_case_ = compute_effective_axis_dimension(
lowercase_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
snake_case_ = tokenizer.num_special_tokens_to_add(lowercase_ )
snake_case_ = compute_effective_axis_dimension(
lowercase_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowercase_ )
# Generate dummy inputs according to compute batch and sequence
snake_case_ = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size
snake_case_ = dict(tokenizer(lowercase_ , return_tensors=lowercase_ ) )
return common_inputs
def A_ ( self : Any , lowercase_ : PreTrainedTokenizer , lowercase_ : int = -1 , lowercase_ : int = -1 , lowercase_ : bool = False , lowercase_ : Optional[TensorType] = None , ):
if self.task in ["default", "seq2seq-lm"]:
snake_case_ = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
lowercase_ , batch_size=lowercase_ , seq_length=lowercase_ , is_pair=lowercase_ , framework=lowercase_ )
else:
snake_case_ = self._generate_dummy_inputs_for_causal_lm(
lowercase_ , batch_size=lowercase_ , seq_length=lowercase_ , is_pair=lowercase_ , framework=lowercase_ )
return common_inputs
def A_ ( self : Dict , lowercase_ : List[str] , lowercase_ : List[str] , lowercase_ : int , lowercase_ : List[str] ):
if self.task in ["default", "seq2seq-lm"]:
snake_case_ = super()._flatten_past_key_values_(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
else:
snake_case_ = super(lowercase_ , self )._flatten_past_key_values_(
lowercase_ , lowercase_ , lowercase_ , lowercase_ )
@property
def A_ ( self : List[str] ):
return 1e-4
| 56
| 0
|
'''simple docstring'''
def lowerCAmelCase_ ( snake_case_ : int = 1_00_00_00 ) -> int:
'''simple docstring'''
UpperCAmelCase_ = limit + 1
UpperCAmelCase_ = [0] * limit
for first_term in range(1 , snake_case_ ):
for n in range(snake_case_ , snake_case_ , snake_case_ ):
UpperCAmelCase_ = first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
UpperCAmelCase_ = sum(1 for x in frequency[1:limit] if x == 10 )
return count
if __name__ == "__main__":
print(f"{solution() = }")
| 106
|
'''simple docstring'''
import argparse
import pathlib
import fairseq
import torch
from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
from fairseq.modules import TransformerSentenceEncoderLayer
from packaging import version
from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.models.roberta.modeling_roberta import RobertaAttention
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('1.0.0a'):
raise Exception('requires fairseq >= 1.0.0a')
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_: List[Any] =logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_: Tuple ='Hello world! cécé herlolip'
def lowerCAmelCase_ ( snake_case_ : str , snake_case_ : str , snake_case_ : bool ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = FairseqRobertaModel.from_pretrained(snake_case_ )
roberta.eval() # disable dropout
UpperCAmelCase_ = roberta.model.encoder.sentence_encoder
UpperCAmelCase_ = XLMRobertaConfig(
vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings , hidden_size=roberta.cfg.model.encoder_embed_dim , num_hidden_layers=roberta.cfg.model.encoder_layers , num_attention_heads=roberta.cfg.model.encoder_attention_heads , intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=5_14 , type_vocab_size=1 , layer_norm_eps=1E-5 , )
if classification_head:
UpperCAmelCase_ = roberta.model.classification_heads["mnli"].out_proj.weight.shape[0]
print("Our RoBERTa config:" , snake_case_ )
UpperCAmelCase_ = XLMRobertaXLForSequenceClassification(snake_case_ ) if classification_head else XLMRobertaXLForMaskedLM(snake_case_ )
model.eval()
# Now let's copy all the weights.
# Embeddings
UpperCAmelCase_ = roberta_sent_encoder.embed_tokens.weight
UpperCAmelCase_ = roberta_sent_encoder.embed_positions.weight
UpperCAmelCase_ = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them.
UpperCAmelCase_ = roberta_sent_encoder.layer_norm.weight
UpperCAmelCase_ = roberta_sent_encoder.layer_norm.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
UpperCAmelCase_ = model.roberta.encoder.layer[i]
UpperCAmelCase_ = roberta_sent_encoder.layers[i]
UpperCAmelCase_ = layer.attention
UpperCAmelCase_ = roberta_layer.self_attn_layer_norm.weight
UpperCAmelCase_ = roberta_layer.self_attn_layer_norm.bias
# self attention
UpperCAmelCase_ = layer.attention.self
assert (
roberta_layer.self_attn.k_proj.weight.data.shape
== roberta_layer.self_attn.q_proj.weight.data.shape
== roberta_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
)
UpperCAmelCase_ = roberta_layer.self_attn.q_proj.weight
UpperCAmelCase_ = roberta_layer.self_attn.q_proj.bias
UpperCAmelCase_ = roberta_layer.self_attn.k_proj.weight
UpperCAmelCase_ = roberta_layer.self_attn.k_proj.bias
UpperCAmelCase_ = roberta_layer.self_attn.v_proj.weight
UpperCAmelCase_ = roberta_layer.self_attn.v_proj.bias
# self-attention output
UpperCAmelCase_ = layer.attention.output
assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape
UpperCAmelCase_ = roberta_layer.self_attn.out_proj.weight
UpperCAmelCase_ = roberta_layer.self_attn.out_proj.bias
# this one is final layer norm
UpperCAmelCase_ = roberta_layer.final_layer_norm.weight
UpperCAmelCase_ = roberta_layer.final_layer_norm.bias
# intermediate
UpperCAmelCase_ = layer.intermediate
assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape
UpperCAmelCase_ = roberta_layer.fca.weight
UpperCAmelCase_ = roberta_layer.fca.bias
# output
UpperCAmelCase_ = layer.output
assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape
UpperCAmelCase_ = roberta_layer.fca.weight
UpperCAmelCase_ = roberta_layer.fca.bias
# end of layer
if classification_head:
UpperCAmelCase_ = roberta.model.classification_heads["mnli"].dense.weight
UpperCAmelCase_ = roberta.model.classification_heads["mnli"].dense.bias
UpperCAmelCase_ = roberta.model.classification_heads["mnli"].out_proj.weight
UpperCAmelCase_ = roberta.model.classification_heads["mnli"].out_proj.bias
else:
# LM Head
UpperCAmelCase_ = roberta.model.encoder.lm_head.dense.weight
UpperCAmelCase_ = roberta.model.encoder.lm_head.dense.bias
UpperCAmelCase_ = roberta.model.encoder.lm_head.layer_norm.weight
UpperCAmelCase_ = roberta.model.encoder.lm_head.layer_norm.bias
UpperCAmelCase_ = roberta.model.encoder.lm_head.weight
UpperCAmelCase_ = roberta.model.encoder.lm_head.bias
# Let's check that we get the same results.
UpperCAmelCase_ = roberta.encode(snake_case_ ).unsqueeze(0 ) # batch of size 1
UpperCAmelCase_ = model(snake_case_ )[0]
if classification_head:
UpperCAmelCase_ = roberta.model.classification_heads["mnli"](roberta.extract_features(snake_case_ ) )
else:
UpperCAmelCase_ = roberta.model(snake_case_ )[0]
print(our_output.shape , their_output.shape )
UpperCAmelCase_ = torch.max(torch.abs(our_output - their_output ) ).item()
print(f"""max_absolute_diff = {max_absolute_diff}""" ) # ~ 1e-7
UpperCAmelCase_ = torch.allclose(snake_case_ , snake_case_ , atol=1E-3 )
print("Do both models output the same tensors?" , "🔥" if success else "💩" )
if not success:
raise Exception("Something went wRoNg" )
pathlib.Path(snake_case_ ).mkdir(parents=snake_case_ , exist_ok=snake_case_ )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(snake_case_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_: Optional[int] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--roberta_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--classification_head', action='store_true', help='Whether to convert a final classification head.'
)
SCREAMING_SNAKE_CASE_: Union[str, Any] =parser.parse_args()
convert_xlm_roberta_xl_checkpoint_to_pytorch(
args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 106
| 1
|
'''simple docstring'''
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class _snake_case ( a__ , a__ ):
@register_to_config
def __init__( self , _lowerCamelCase = 128 , _lowerCamelCase = 256 , _lowerCamelCase = 2000.0 , _lowerCamelCase = 768 , _lowerCamelCase = 12 , _lowerCamelCase = 12 , _lowerCamelCase = 64 , _lowerCamelCase = 2048 , _lowerCamelCase = 0.1 , ):
super().__init__()
UpperCAmelCase__ : Optional[int] = nn.Sequential(
nn.Linear(_lowerCamelCase , d_model * 4 , bias=_lowerCamelCase) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=_lowerCamelCase) , nn.SiLU() , )
UpperCAmelCase__ : Optional[Any] = nn.Embedding(_lowerCamelCase , _lowerCamelCase)
UpperCAmelCase__ : str = False
UpperCAmelCase__ : str = nn.Linear(_lowerCamelCase , _lowerCamelCase , bias=_lowerCamelCase)
UpperCAmelCase__ : int = nn.Dropout(p=_lowerCamelCase)
UpperCAmelCase__ : Optional[int] = nn.ModuleList()
for lyr_num in range(_lowerCamelCase):
# FiLM conditional T5 decoder
UpperCAmelCase__ : Union[str, Any] = DecoderLayer(d_model=_lowerCamelCase , d_kv=_lowerCamelCase , num_heads=_lowerCamelCase , d_ff=_lowerCamelCase , dropout_rate=_lowerCamelCase)
self.decoders.append(_lowerCamelCase)
UpperCAmelCase__ : str = TaLayerNorm(_lowerCamelCase)
UpperCAmelCase__ : List[str] = nn.Dropout(p=_lowerCamelCase)
UpperCAmelCase__ : str = nn.Linear(_lowerCamelCase , _lowerCamelCase , bias=_lowerCamelCase)
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase):
UpperCAmelCase__ : int = torch.mul(query_input.unsqueeze(-1) , key_input.unsqueeze(-2))
return mask.unsqueeze(-3)
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase):
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : List[str] = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
UpperCAmelCase__ : Optional[int] = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype)
UpperCAmelCase__ : Dict = self.conditioning_emb(_lowerCamelCase).unsqueeze(1)
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
UpperCAmelCase__ : int = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
UpperCAmelCase__ : Optional[Any] = torch.broadcast_to(
torch.arange(_lowerCamelCase , device=decoder_input_tokens.device) , (batch, seq_length) , )
UpperCAmelCase__ : Optional[int] = self.position_encoding(_lowerCamelCase)
UpperCAmelCase__ : Optional[int] = self.continuous_inputs_projection(_lowerCamelCase)
inputs += position_encodings
UpperCAmelCase__ : List[Any] = self.dropout(_lowerCamelCase)
# decoder: No padding present.
UpperCAmelCase__ : Tuple = torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype)
# Translate encoding masks to encoder-decoder masks.
UpperCAmelCase__ : int = [(x, self.encoder_decoder_mask(_lowerCamelCase , _lowerCamelCase)) for x, y in encodings_and_masks]
# cross attend style: concat encodings
UpperCAmelCase__ : Optional[int] = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1)
UpperCAmelCase__ : Optional[int] = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1)
for lyr in self.decoders:
UpperCAmelCase__ : List[str] = lyr(
_lowerCamelCase , conditioning_emb=_lowerCamelCase , encoder_hidden_states=_lowerCamelCase , encoder_attention_mask=_lowerCamelCase , )[0]
UpperCAmelCase__ : List[Any] = self.decoder_norm(_lowerCamelCase)
UpperCAmelCase__ : List[Any] = self.post_dropout(_lowerCamelCase)
UpperCAmelCase__ : Optional[Any] = self.spec_out(_lowerCamelCase)
return spec_out
class _snake_case ( nn.Module ):
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=1e-6):
super().__init__()
UpperCAmelCase__ : Tuple = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=_lowerCamelCase , d_kv=_lowerCamelCase , num_heads=_lowerCamelCase , dropout_rate=_lowerCamelCase))
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=_lowerCamelCase , d_kv=_lowerCamelCase , num_heads=_lowerCamelCase , dropout_rate=_lowerCamelCase , layer_norm_epsilon=_lowerCamelCase , ))
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=_lowerCamelCase , d_ff=_lowerCamelCase , dropout_rate=_lowerCamelCase , layer_norm_epsilon=_lowerCamelCase))
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , ):
UpperCAmelCase__ : Optional[Any] = self.layer[0](
_lowerCamelCase , conditioning_emb=_lowerCamelCase , attention_mask=_lowerCamelCase , )
if encoder_hidden_states is not None:
UpperCAmelCase__ : List[str] = torch.where(encoder_attention_mask > 0 , 0 , -1e1_0).to(
encoder_hidden_states.dtype)
UpperCAmelCase__ : int = self.layer[1](
_lowerCamelCase , key_value_states=_lowerCamelCase , attention_mask=_lowerCamelCase , )
# Apply Film Conditional Feed Forward layer
UpperCAmelCase__ : Optional[Any] = self.layer[-1](_lowerCamelCase , _lowerCamelCase)
return (hidden_states,)
class _snake_case ( nn.Module ):
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase):
super().__init__()
UpperCAmelCase__ : List[Any] = TaLayerNorm(_lowerCamelCase)
UpperCAmelCase__ : Union[str, Any] = TaFiLMLayer(in_features=d_model * 4 , out_features=_lowerCamelCase)
UpperCAmelCase__ : Tuple = Attention(query_dim=_lowerCamelCase , heads=_lowerCamelCase , dim_head=_lowerCamelCase , out_bias=_lowerCamelCase , scale_qk=_lowerCamelCase)
UpperCAmelCase__ : Optional[int] = nn.Dropout(_lowerCamelCase)
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None , ):
# pre_self_attention_layer_norm
UpperCAmelCase__ : Tuple = self.layer_norm(_lowerCamelCase)
if conditioning_emb is not None:
UpperCAmelCase__ : str = self.FiLMLayer(_lowerCamelCase , _lowerCamelCase)
# Self-attention block
UpperCAmelCase__ : List[str] = self.attention(_lowerCamelCase)
UpperCAmelCase__ : str = hidden_states + self.dropout(_lowerCamelCase)
return hidden_states
class _snake_case ( nn.Module ):
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase):
super().__init__()
UpperCAmelCase__ : str = Attention(query_dim=_lowerCamelCase , heads=_lowerCamelCase , dim_head=_lowerCamelCase , out_bias=_lowerCamelCase , scale_qk=_lowerCamelCase)
UpperCAmelCase__ : int = TaLayerNorm(_lowerCamelCase , eps=_lowerCamelCase)
UpperCAmelCase__ : Tuple = nn.Dropout(_lowerCamelCase)
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None , ):
UpperCAmelCase__ : int = self.layer_norm(_lowerCamelCase)
UpperCAmelCase__ : str = self.attention(
_lowerCamelCase , encoder_hidden_states=_lowerCamelCase , attention_mask=attention_mask.squeeze(1) , )
UpperCAmelCase__ : Tuple = hidden_states + self.dropout(_lowerCamelCase)
return layer_output
class _snake_case ( nn.Module ):
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase):
super().__init__()
UpperCAmelCase__ : Tuple = TaDenseGatedActDense(d_model=_lowerCamelCase , d_ff=_lowerCamelCase , dropout_rate=_lowerCamelCase)
UpperCAmelCase__ : Union[str, Any] = TaFiLMLayer(in_features=d_model * 4 , out_features=_lowerCamelCase)
UpperCAmelCase__ : Union[str, Any] = TaLayerNorm(_lowerCamelCase , eps=_lowerCamelCase)
UpperCAmelCase__ : Dict = nn.Dropout(_lowerCamelCase)
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase=None):
UpperCAmelCase__ : Optional[int] = self.layer_norm(_lowerCamelCase)
if conditioning_emb is not None:
UpperCAmelCase__ : List[str] = self.film(_lowerCamelCase , _lowerCamelCase)
UpperCAmelCase__ : Union[str, Any] = self.DenseReluDense(_lowerCamelCase)
UpperCAmelCase__ : List[str] = hidden_states + self.dropout(_lowerCamelCase)
return hidden_states
class _snake_case ( nn.Module ):
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase):
super().__init__()
UpperCAmelCase__ : List[str] = nn.Linear(_lowerCamelCase , _lowerCamelCase , bias=_lowerCamelCase)
UpperCAmelCase__ : str = nn.Linear(_lowerCamelCase , _lowerCamelCase , bias=_lowerCamelCase)
UpperCAmelCase__ : Union[str, Any] = nn.Linear(_lowerCamelCase , _lowerCamelCase , bias=_lowerCamelCase)
UpperCAmelCase__ : int = nn.Dropout(_lowerCamelCase)
UpperCAmelCase__ : Optional[Any] = NewGELUActivation()
def snake_case__ ( self , _lowerCamelCase):
UpperCAmelCase__ : Tuple = self.act(self.wi_a(_lowerCamelCase))
UpperCAmelCase__ : Dict = self.wi_a(_lowerCamelCase)
UpperCAmelCase__ : Tuple = hidden_gelu * hidden_linear
UpperCAmelCase__ : List[Any] = self.dropout(_lowerCamelCase)
UpperCAmelCase__ : List[Any] = self.wo(_lowerCamelCase)
return hidden_states
class _snake_case ( nn.Module ):
def __init__( self , _lowerCamelCase , _lowerCamelCase=1e-6):
super().__init__()
UpperCAmelCase__ : Tuple = nn.Parameter(torch.ones(_lowerCamelCase))
UpperCAmelCase__ : str = eps
def snake_case__ ( self , _lowerCamelCase):
# T5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean
# Square Layer Normalization https://arxiv.org/abs/1910.07467 thus variance is calculated
# w/o mean and there is no bias. Additionally we want to make sure that the accumulation for
# half-precision inputs is done in fp32
UpperCAmelCase__ : Union[str, Any] = hidden_states.to(torch.floataa).pow(2).mean(-1 , keepdim=_lowerCamelCase)
UpperCAmelCase__ : Union[str, Any] = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
UpperCAmelCase__ : int = hidden_states.to(self.weight.dtype)
return self.weight * hidden_states
class _snake_case ( nn.Module ):
def snake_case__ ( self , _lowerCamelCase):
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi) * (input + 0.044715 * torch.pow(_lowerCamelCase , 3.0))))
class _snake_case ( nn.Module ):
def __init__( self , _lowerCamelCase , _lowerCamelCase):
super().__init__()
UpperCAmelCase__ : List[Any] = nn.Linear(_lowerCamelCase , out_features * 2 , bias=_lowerCamelCase)
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase):
UpperCAmelCase__ : List[Any] = self.scale_bias(_lowerCamelCase)
UpperCAmelCase__ , UpperCAmelCase__ : Any = torch.chunk(_lowerCamelCase , 2 , -1)
UpperCAmelCase__ : int = x * (1 + scale) + shift
return x
| 163
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A =logging.get_logger(__name__)
__A ={
'google/pegasus-large': 'https://huggingface.co/google/pegasus-large/resolve/main/config.json',
# See all PEGASUS models at https://huggingface.co/models?filter=pegasus
}
class _snake_case ( a__ ):
lowerCAmelCase :Optional[int] = '''pegasus'''
lowerCAmelCase :Optional[int] = ['''past_key_values''']
lowerCAmelCase :str = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self , _lowerCamelCase=5_0265 , _lowerCamelCase=1024 , _lowerCamelCase=12 , _lowerCamelCase=4096 , _lowerCamelCase=16 , _lowerCamelCase=12 , _lowerCamelCase=4096 , _lowerCamelCase=16 , _lowerCamelCase=0.0 , _lowerCamelCase=0.0 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase="gelu" , _lowerCamelCase=1024 , _lowerCamelCase=0.1 , _lowerCamelCase=0.0 , _lowerCamelCase=0.0 , _lowerCamelCase=0.02 , _lowerCamelCase=0 , _lowerCamelCase=False , _lowerCamelCase=0 , _lowerCamelCase=1 , _lowerCamelCase=1 , **_lowerCamelCase , ):
UpperCAmelCase__ : Union[str, Any] = vocab_size
UpperCAmelCase__ : Union[str, Any] = max_position_embeddings
UpperCAmelCase__ : List[Any] = d_model
UpperCAmelCase__ : Union[str, Any] = encoder_ffn_dim
UpperCAmelCase__ : Any = encoder_layers
UpperCAmelCase__ : List[str] = encoder_attention_heads
UpperCAmelCase__ : int = decoder_ffn_dim
UpperCAmelCase__ : Any = decoder_layers
UpperCAmelCase__ : Tuple = decoder_attention_heads
UpperCAmelCase__ : Optional[int] = dropout
UpperCAmelCase__ : Dict = attention_dropout
UpperCAmelCase__ : Optional[int] = activation_dropout
UpperCAmelCase__ : Dict = activation_function
UpperCAmelCase__ : Optional[Any] = init_std
UpperCAmelCase__ : int = encoder_layerdrop
UpperCAmelCase__ : Tuple = decoder_layerdrop
UpperCAmelCase__ : str = use_cache
UpperCAmelCase__ : Any = encoder_layers
UpperCAmelCase__ : Tuple = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , is_encoder_decoder=_lowerCamelCase , decoder_start_token_id=_lowerCamelCase , forced_eos_token_id=_lowerCamelCase , **_lowerCamelCase , )
@property
def snake_case__ ( self):
return self.encoder_attention_heads
@property
def snake_case__ ( self):
return self.d_model
| 163
| 1
|
"""simple docstring"""
import importlib.metadata
from typing import Union
from packaging.version import Version, parse
from .constants import STR_OPERATION_TO_FUNC
_UpperCamelCase : Tuple = parse(importlib.metadata.version('torch'))
def snake_case (A_ :Union[str, Version] , A_ :str , A_ :str ):
'''simple docstring'''
if operation not in STR_OPERATION_TO_FUNC.keys():
raise ValueError(f'''`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys() )}, received {operation}''' )
a : List[Any] = STR_OPERATION_TO_FUNC[operation]
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
a : Union[str, Any] = parse(importlib.metadata.version(__lowerCAmelCase ) )
return operation(__lowerCAmelCase , parse(__lowerCAmelCase ) )
def snake_case (A_ :str , A_ :str ):
'''simple docstring'''
return compare_versions(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
| 371
|
"""simple docstring"""
import argparse
import os
import re
import packaging.version
_UpperCamelCase : Optional[Any] = 'examples/'
_UpperCamelCase : Any = {
'examples': (re.compile(r'^check_min_version\("[^"]+"\)\s*$', re.MULTILINE), 'check_min_version("VERSION")\n'),
'init': (re.compile(r'^__version__\s+=\s+"([^"]+)"\s*$', re.MULTILINE), '__version__ = "VERSION"\n'),
'setup': (re.compile(r'^(\s*)version\s*=\s*"[^"]+",', re.MULTILINE), r'\1version="VERSION",'),
'doc': (re.compile(r'^(\s*)release\s*=\s*"[^"]+"$', re.MULTILINE), 'release = "VERSION"\n'),
}
_UpperCamelCase : List[str] = {
'init': 'src/diffusers/__init__.py',
'setup': 'setup.py',
}
_UpperCamelCase : List[str] = 'README.md'
def snake_case (A_ :str , A_ :Optional[Any] , A_ :Any ):
'''simple docstring'''
with open(A_ , 'r' , encoding='utf-8' , newline='\n' ) as f:
a : Tuple = f.read()
a, a : Any = REPLACE_PATTERNS[pattern]
a : Dict = replace.replace('VERSION' , A_ )
a : Union[str, Any] = re_pattern.sub(A_ , A_ )
with open(A_ , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.write(A_ )
def snake_case (A_ :List[Any] ):
'''simple docstring'''
for folder, directories, fnames in os.walk(A_ ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('research_projects' )
if "legacy" in directories:
directories.remove('legacy' )
for fname in fnames:
if fname.endswith('.py' ):
update_version_in_file(os.path.join(A_ , A_ ) , A_ , pattern='examples' )
def snake_case (A_ :Tuple , A_ :Optional[Any]=False ):
'''simple docstring'''
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(A_ , A_ , A_ )
if not patch:
update_version_in_examples(A_ )
def snake_case ():
'''simple docstring'''
a : str = '🤗 Transformers currently provides the following architectures'
a : Dict = '1. Want to contribute a new model?'
with open(A_ , 'r' , encoding='utf-8' , newline='\n' ) as f:
a : Optional[Any] = f.readlines()
# Find the start of the list.
a : List[str] = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
a : Optional[Any] = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('1.' ):
a : int = lines[index].replace(
'https://huggingface.co/docs/diffusers/main/model_doc' , 'https://huggingface.co/docs/diffusers/model_doc' , )
index += 1
with open(A_ , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(A_ )
def snake_case ():
'''simple docstring'''
with open(REPLACE_FILES['init'] , 'r' ) as f:
a : List[str] = f.read()
a : str = REPLACE_PATTERNS['init'][0].search(A_ ).groups()[0]
return packaging.version.parse(A_ )
def snake_case (A_ :Optional[Any]=False ):
'''simple docstring'''
a : Optional[int] = get_version()
if patch and default_version.is_devrelease:
raise ValueError('Can\'t create a patch version from the dev branch, checkout a released version!' )
if default_version.is_devrelease:
a : Tuple = default_version.base_version
elif patch:
a : Union[str, Any] = f'''{default_version.major}.{default_version.minor}.{default_version.micro + 1}'''
else:
a : Optional[Any] = f'''{default_version.major}.{default_version.minor + 1}.0'''
# Now let's ask nicely if that's the right one.
a : Union[str, Any] = input(f'''Which version are you releasing? [{default_version}]''' )
if len(A_ ) == 0:
a : int = default_version
print(f'''Updating version to {version}.''' )
global_version_update(A_ , patch=A_ )
def snake_case ():
'''simple docstring'''
a : str = get_version()
a : Optional[int] = f'''{current_version.major}.{current_version.minor + 1}.0.dev0'''
a : Optional[int] = current_version.base_version
# Check with the user we got that right.
a : str = input(f'''Which version are we developing now? [{dev_version}]''' )
if len(A_ ) == 0:
a : Union[str, Any] = dev_version
print(f'''Updating version to {version}.''' )
global_version_update(A_ )
# print("Cleaning main README, don't forget to run `make fix-copies`.")
# clean_main_ref_in_model_list()
if __name__ == "__main__":
_UpperCamelCase : Dict = argparse.ArgumentParser()
parser.add_argument('--post_release', action='store_true', help='Whether this is pre or post release.')
parser.add_argument('--patch', action='store_true', help='Whether or not this is a patch release.')
_UpperCamelCase : Optional[Any] = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('Nothing to do after a patch :-)')
else:
post_release_work()
| 186
| 0
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
'''microsoft/wavlm-base''': '''https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json''',
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class lowerCamelCase__ ( lowerCAmelCase):
SCREAMING_SNAKE_CASE__ = '''wavlm'''
def __init__(self , UpperCAmelCase=3_2 , UpperCAmelCase=7_6_8 , UpperCAmelCase=1_2 , UpperCAmelCase=1_2 , UpperCAmelCase=3_0_7_2 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=0.0 , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=0.02 , UpperCAmelCase=1e-5 , UpperCAmelCase="group" , UpperCAmelCase="gelu" , UpperCAmelCase=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , UpperCAmelCase=(5, 2, 2, 2, 2, 2, 2) , UpperCAmelCase=(1_0, 3, 3, 3, 3, 2, 2) , UpperCAmelCase=False , UpperCAmelCase=1_2_8 , UpperCAmelCase=1_6 , UpperCAmelCase=3_2_0 , UpperCAmelCase=8_0_0 , UpperCAmelCase=False , UpperCAmelCase=True , UpperCAmelCase=0.05 , UpperCAmelCase=1_0 , UpperCAmelCase=2 , UpperCAmelCase=0.0 , UpperCAmelCase=1_0 , UpperCAmelCase=3_2_0 , UpperCAmelCase=2 , UpperCAmelCase=0.1 , UpperCAmelCase=1_0_0 , UpperCAmelCase=2_5_6 , UpperCAmelCase=2_5_6 , UpperCAmelCase=0.1 , UpperCAmelCase="mean" , UpperCAmelCase=False , UpperCAmelCase=False , UpperCAmelCase=2_5_6 , UpperCAmelCase=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 1_5_0_0) , UpperCAmelCase=(5, 3, 3, 1, 1) , UpperCAmelCase=(1, 2, 3, 1, 1) , UpperCAmelCase=5_1_2 , UpperCAmelCase=8_0 , UpperCAmelCase=0 , UpperCAmelCase=1 , UpperCAmelCase=2 , UpperCAmelCase=False , UpperCAmelCase=3 , UpperCAmelCase=2 , UpperCAmelCase=3 , UpperCAmelCase=None , **UpperCAmelCase , ) -> Optional[Any]:
super().__init__(**UpperCAmelCase , pad_token_id=UpperCAmelCase , bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase )
_lowercase =hidden_size
_lowercase =feat_extract_norm
_lowercase =feat_extract_activation
_lowercase =list(UpperCAmelCase )
_lowercase =list(UpperCAmelCase )
_lowercase =list(UpperCAmelCase )
_lowercase =conv_bias
_lowercase =num_buckets
_lowercase =max_bucket_distance
_lowercase =num_conv_pos_embeddings
_lowercase =num_conv_pos_embedding_groups
_lowercase =len(self.conv_dim )
_lowercase =num_hidden_layers
_lowercase =intermediate_size
_lowercase =hidden_act
_lowercase =num_attention_heads
_lowercase =hidden_dropout
_lowercase =attention_dropout
_lowercase =activation_dropout
_lowercase =feat_proj_dropout
_lowercase =final_dropout
_lowercase =layerdrop
_lowercase =layer_norm_eps
_lowercase =initializer_range
_lowercase =num_ctc_classes
_lowercase =vocab_size
_lowercase =do_stable_layer_norm
_lowercase =use_weighted_layer_sum
_lowercase =classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
f" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"
f" `len(config.conv_kernel) = {len(self.conv_kernel )}`." )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_lowercase =apply_spec_augment
_lowercase =mask_time_prob
_lowercase =mask_time_length
_lowercase =mask_time_min_masks
_lowercase =mask_feature_prob
_lowercase =mask_feature_length
# parameters for pretraining with codevector quantized representations
_lowercase =num_codevectors_per_group
_lowercase =num_codevector_groups
_lowercase =contrastive_logits_temperature
_lowercase =num_negatives
_lowercase =codevector_dim
_lowercase =proj_codevector_dim
_lowercase =diversity_loss_weight
# ctc loss
_lowercase =ctc_loss_reduction
_lowercase =ctc_zero_infinity
# adapter
_lowercase =add_adapter
_lowercase =adapter_kernel_size
_lowercase =adapter_stride
_lowercase =num_adapter_layers
_lowercase =output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
_lowercase =classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
_lowercase =list(UpperCAmelCase )
_lowercase =list(UpperCAmelCase )
_lowercase =list(UpperCAmelCase )
_lowercase =xvector_output_dim
@property
def __A (self ) -> int:
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 5
|
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
UpperCAmelCase__ = ['''\nclass''', '''\ndef''', '''\n#''', '''\n@''', '''\nprint''', '''\nif''']
class lowerCamelCase__ ( lowerCAmelCase):
def __init__(self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=1 ) -> Dict:
_lowercase =tokenizer
_lowercase =dataset
_lowercase =len(UpperCAmelCase ) if n_tasks is None else n_tasks
_lowercase =n_copies
def __iter__(self ) -> Optional[Any]:
_lowercase =[]
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]['''prompt'''].strip() )
_lowercase =self.tokenizer(UpperCAmelCase , padding=UpperCAmelCase , return_tensors='''pt''' )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class lowerCamelCase__ ( lowerCAmelCase):
def __init__(self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]:
_lowercase =start_length
_lowercase =eof_strings
_lowercase =tokenizer
def __call__(self , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) -> Dict:
_lowercase =self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
_lowercase =[]
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(UpperCAmelCase )
def UpperCAmelCase_ ( __snake_case ) -> Optional[Any]:
"""simple docstring"""
_lowercase =re.split('''(%s)''' % '''|'''.join(__snake_case ) , __snake_case )
# last string should be ""
return "".join(string_list[:-2] )
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case=20 , **__snake_case ) -> Tuple:
"""simple docstring"""
_lowercase =defaultdict(__snake_case ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(__snake_case ) ):
with torch.no_grad():
_lowercase =batch['''ids'''].shape[-1]
_lowercase =accelerator.unwrap_model(__snake_case ).generate(
input_ids=batch['''ids'''][:, : batch['''input_len''']] , num_return_sequences=__snake_case , **__snake_case )
# each task is generated batch_size times
_lowercase =batch['''task_id'''].repeat(__snake_case )
_lowercase =accelerator.pad_across_processes(
__snake_case , dim=1 , pad_index=tokenizer.pad_token_id )
_lowercase , _lowercase =accelerator.gather((generated_tokens, generated_tasks) )
_lowercase =generated_tokens.cpu().numpy()
_lowercase =generated_tasks.cpu().numpy()
for task, generated_tokens in zip(__snake_case , __snake_case ):
gen_token_dict[task].append(__snake_case )
_lowercase =[[] for _ in range(__snake_case )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
_lowercase =tokenizer.decode(__snake_case , skip_special_tokens=__snake_case , clean_up_tokenization_spaces=__snake_case )
code_gens[task].append(remove_last_block(__snake_case ) )
return code_gens
def UpperCAmelCase_ ( ) -> str:
"""simple docstring"""
_lowercase =HfArgumentParser(__snake_case )
_lowercase =parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
_lowercase =args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
_lowercase ='''false'''
if args.num_workers is None:
_lowercase =multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
_lowercase =Accelerator()
set_seed(args.seed , device_specific=__snake_case )
# Load model and tokenizer
_lowercase =AutoTokenizer.from_pretrained(args.model_ckpt )
_lowercase =tokenizer.eos_token
_lowercase =AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
_lowercase ={
'''do_sample''': args.do_sample,
'''temperature''': args.temperature,
'''max_new_tokens''': args.max_new_tokens,
'''top_p''': args.top_p,
'''top_k''': args.top_k,
'''stopping_criteria''': StoppingCriteriaList([EndOfFunctionCriteria(0 , __snake_case , __snake_case )] ),
}
# Load evaluation dataset and metric
_lowercase =load_dataset('''openai_humaneval''' )
_lowercase =load_metric('''code_eval''' )
_lowercase =args.num_tasks if args.num_tasks is not None else len(human_eval['''test'''] )
_lowercase =args.n_samples // args.batch_size
_lowercase =TokenizedDataset(__snake_case , human_eval['''test'''] , n_copies=__snake_case , n_tasks=__snake_case )
# do not confuse args.batch_size, which is actually the num_return_sequences
_lowercase =DataLoader(__snake_case , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
_lowercase =code_eval_metric.compute(references=[''''''] , predictions=[['''''']] )
except ValueError as exception:
print(
'''Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL="1"`'''
''' flag to enable code evaluation.''' )
raise exception
_lowercase , _lowercase =accelerator.prepare(__snake_case , __snake_case )
_lowercase =complete_code(
__snake_case , __snake_case , __snake_case , __snake_case , n_tasks=__snake_case , batch_size=args.batch_size , **__snake_case , )
if accelerator.is_main_process:
_lowercase =[]
for task in tqdm(range(__snake_case ) ):
_lowercase =human_eval['''test'''][task]['''test''']
_lowercase =F"check({human_eval['test'][task]['entry_point']})"
references.append('''\n''' + test_func + '''\n''' + entry_point )
# Evaluate completions with "code_eval" metric
_lowercase , _lowercase =code_eval_metric.compute(
references=__snake_case , predictions=__snake_case , num_workers=args.num_workers )
print(F"Results: {pass_at_k}" )
# Save results to json file
with open(args.output_file , '''w''' ) as fp:
json.dump(__snake_case , __snake_case )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 5
| 1
|
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class UpperCAmelCase_ :
'''simple docstring'''
@staticmethod
def _A ( *_A , **_A ):
'''simple docstring'''
pass
def __lowercase ( a__ ) -> Tuple:
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
lowerCAmelCase__ : Union[str, Any] =(
'''https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png'''
)
@is_pipeline_test
@require_torch
@require_vision
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
UpperCamelCase__ : Optional[Any] = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def _A ( self , _A , _A , _A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = pipeline(
'document-question-answering' , model=_A , tokenizer=_A , image_processor=_A )
__SCREAMING_SNAKE_CASE = INVOICE_URL
__SCREAMING_SNAKE_CASE = list(zip(*apply_tesseract(load_image(_A ) , _A , '' ) ) )
__SCREAMING_SNAKE_CASE = 'What is the placebo?'
__SCREAMING_SNAKE_CASE = [
{
'image': load_image(_A ),
'question': question,
},
{
'image': image,
'question': question,
},
{
'image': image,
'question': question,
'word_boxes': word_boxes,
},
]
return dqa_pipeline, examples
def _A ( self , _A , _A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = dqa_pipeline(_A , top_k=2 )
self.assertEqual(
_A , [
[
{'score': ANY(_A ), 'answer': ANY(_A ), 'start': ANY(_A ), 'end': ANY(_A )},
{'score': ANY(_A ), 'answer': ANY(_A ), 'start': ANY(_A ), 'end': ANY(_A )},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = pipeline('document-question-answering' , model='hf-internal-testing/tiny-random-layoutlmv2' )
__SCREAMING_SNAKE_CASE = INVOICE_URL
__SCREAMING_SNAKE_CASE = 'How many cats are there?'
__SCREAMING_SNAKE_CASE = [
{'score': 0.0_0_0_1, 'answer': 'oy 2312/2019', 'start': 38, 'end': 39},
{'score': 0.0_0_0_1, 'answer': 'oy 2312/2019 DUE', 'start': 38, 'end': 40},
]
__SCREAMING_SNAKE_CASE = dqa_pipeline(image=_A , question=_A , top_k=2 )
self.assertEqual(nested_simplify(_A , decimals=4 ) , _A )
__SCREAMING_SNAKE_CASE = dqa_pipeline({'image': image, 'question': question} , top_k=2 )
self.assertEqual(nested_simplify(_A , decimals=4 ) , _A )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
__SCREAMING_SNAKE_CASE = './tests/fixtures/tests_samples/COCO/000000039769.png'
__SCREAMING_SNAKE_CASE = dqa_pipeline(image=_A , question=_A , top_k=2 )
self.assertEqual(_A , [] )
# We can optionnally pass directly the words and bounding boxes
__SCREAMING_SNAKE_CASE = './tests/fixtures/tests_samples/COCO/000000039769.png'
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = dqa_pipeline(image=_A , question=_A , words=_A , boxes=_A , top_k=2 )
self.assertEqual(_A , [] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = pipeline(
'document-question-answering' , model='tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa' , revision='9977165' , )
__SCREAMING_SNAKE_CASE = INVOICE_URL
__SCREAMING_SNAKE_CASE = 'What is the invoice number?'
__SCREAMING_SNAKE_CASE = dqa_pipeline(image=_A , question=_A , top_k=2 )
self.assertEqual(
nested_simplify(_A , decimals=4 ) , [
{'score': 0.9_9_4_4, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.0_0_0_9, 'answer': 'us-001', 'start': 16, 'end': 16},
] , )
__SCREAMING_SNAKE_CASE = dqa_pipeline({'image': image, 'question': question} , top_k=2 )
self.assertEqual(
nested_simplify(_A , decimals=4 ) , [
{'score': 0.9_9_4_4, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.0_0_0_9, 'answer': 'us-001', 'start': 16, 'end': 16},
] , )
__SCREAMING_SNAKE_CASE = dqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2 )
self.assertEqual(
nested_simplify(_A , decimals=4 ) , [
[
{'score': 0.9_9_4_4, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.0_0_0_9, 'answer': 'us-001', 'start': 16, 'end': 16},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = pipeline(
'document-question-answering' , model='tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa' , revision='9977165' , max_seq_len=50 , )
__SCREAMING_SNAKE_CASE = INVOICE_URL
__SCREAMING_SNAKE_CASE = 'What is the invoice number?'
__SCREAMING_SNAKE_CASE = dqa_pipeline(image=_A , question=_A , top_k=2 )
self.assertEqual(
nested_simplify(_A , decimals=4 ) , [
{'score': 0.9_9_7_4, 'answer': '1110212019', 'start': 23, 'end': 23},
{'score': 0.9_9_4_8, 'answer': 'us-001', 'start': 16, 'end': 16},
] , )
__SCREAMING_SNAKE_CASE = dqa_pipeline({'image': image, 'question': question} , top_k=2 )
self.assertEqual(
nested_simplify(_A , decimals=4 ) , [
{'score': 0.9_9_7_4, 'answer': '1110212019', 'start': 23, 'end': 23},
{'score': 0.9_9_4_8, 'answer': 'us-001', 'start': 16, 'end': 16},
] , )
__SCREAMING_SNAKE_CASE = dqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2 )
self.assertEqual(
nested_simplify(_A , decimals=4 ) , [
[
{'score': 0.9_9_7_4, 'answer': '1110212019', 'start': 23, 'end': 23},
{'score': 0.9_9_4_8, 'answer': 'us-001', 'start': 16, 'end': 16},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(
'impira/layoutlm-document-qa' , revision='3dc6de3' , add_prefix_space=_A )
__SCREAMING_SNAKE_CASE = pipeline(
'document-question-answering' , model='impira/layoutlm-document-qa' , tokenizer=_A , revision='3dc6de3' , )
__SCREAMING_SNAKE_CASE = INVOICE_URL
__SCREAMING_SNAKE_CASE = 'What is the invoice number?'
__SCREAMING_SNAKE_CASE = dqa_pipeline(image=_A , question=_A , top_k=2 )
self.assertEqual(
nested_simplify(_A , decimals=4 ) , [
{'score': 0.4_2_5_1, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.0_8_1_9, 'answer': '1110212019', 'start': 23, 'end': 23},
] , )
__SCREAMING_SNAKE_CASE = dqa_pipeline({'image': image, 'question': question} , top_k=2 )
self.assertEqual(
nested_simplify(_A , decimals=4 ) , [
{'score': 0.4_2_5_1, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.0_8_1_9, 'answer': '1110212019', 'start': 23, 'end': 23},
] , )
__SCREAMING_SNAKE_CASE = dqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2 )
self.assertEqual(
nested_simplify(_A , decimals=4 ) , [
[
{'score': 0.4_2_5_1, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.0_8_1_9, 'answer': '1110212019', 'start': 23, 'end': 23},
]
]
* 2 , )
__SCREAMING_SNAKE_CASE = list(zip(*apply_tesseract(load_image(_A ) , _A , '' ) ) )
# This model should also work if `image` is set to None
__SCREAMING_SNAKE_CASE = dqa_pipeline({'image': None, 'word_boxes': word_boxes, 'question': question} , top_k=2 )
self.assertEqual(
nested_simplify(_A , decimals=4 ) , [
{'score': 0.4_2_5_1, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.0_8_1_9, 'answer': '1110212019', 'start': 23, 'end': 23},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(
'impira/layoutlm-document-qa' , revision='3dc6de3' , add_prefix_space=_A )
__SCREAMING_SNAKE_CASE = pipeline(
'document-question-answering' , model='impira/layoutlm-document-qa' , tokenizer=_A , revision='3dc6de3' , max_seq_len=50 , )
__SCREAMING_SNAKE_CASE = INVOICE_URL
__SCREAMING_SNAKE_CASE = 'What is the invoice number?'
__SCREAMING_SNAKE_CASE = dqa_pipeline(image=_A , question=_A , top_k=2 )
self.assertEqual(
nested_simplify(_A , decimals=4 ) , [
{'score': 0.9_9_9_9, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.9_9_9_8, 'answer': 'us-001', 'start': 16, 'end': 16},
] , )
__SCREAMING_SNAKE_CASE = dqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2 )
self.assertEqual(
nested_simplify(_A , decimals=4 ) , [
[
{'score': 0.9_9_9_9, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.9_9_9_8, 'answer': 'us-001', 'start': 16, 'end': 16},
]
]
* 2 , )
__SCREAMING_SNAKE_CASE = list(zip(*apply_tesseract(load_image(_A ) , _A , '' ) ) )
# This model should also work if `image` is set to None
__SCREAMING_SNAKE_CASE = dqa_pipeline({'image': None, 'word_boxes': word_boxes, 'question': question} , top_k=2 )
self.assertEqual(
nested_simplify(_A , decimals=4 ) , [
{'score': 0.9_9_9_9, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.9_9_9_8, 'answer': 'us-001', 'start': 16, 'end': 16},
] , )
@slow
@require_torch
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = pipeline(
'document-question-answering' , model='naver-clova-ix/donut-base-finetuned-docvqa' , tokenizer=AutoTokenizer.from_pretrained('naver-clova-ix/donut-base-finetuned-docvqa' ) , feature_extractor='naver-clova-ix/donut-base-finetuned-docvqa' , )
__SCREAMING_SNAKE_CASE = INVOICE_URL
__SCREAMING_SNAKE_CASE = 'What is the invoice number?'
__SCREAMING_SNAKE_CASE = dqa_pipeline(image=_A , question=_A , top_k=2 )
self.assertEqual(nested_simplify(_A , decimals=4 ) , [{'answer': 'us-001'}] )
@require_tf
@unittest.skip('Document question answering not implemented in TF' )
def _A ( self ):
'''simple docstring'''
pass
| 369
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase__ : List[str] ={
'''configuration_timesformer''': ['''TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TimesformerConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : List[str] =[
'''TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TimesformerModel''',
'''TimesformerForVideoClassification''',
'''TimesformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ : Union[str, Any] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 118
| 0
|
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_=10 ) -> int:
"""simple docstring"""
A__ = []
for _ in range(lowercase_ ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_=10 ) -> List[str]:
"""simple docstring"""
A__ = []
for step in range(lowercase_ ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
A__ = os.path.join(lowercase_ , '''schedule.bin''' )
torch.save(scheduler.state_dict() , lowercase_ )
A__ = torch.load(lowercase_ )
scheduler.load_state_dict(lowercase_ )
return lrs
@require_torch
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[int]) ->Optional[int]:
'''simple docstring'''
self.assertEqual(len(UpperCAmelCase__) , len(UpperCAmelCase__))
for a, b in zip(UpperCAmelCase__ , UpperCAmelCase__):
self.assertAlmostEqual(UpperCAmelCase__ , UpperCAmelCase__ , delta=UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->List[Any]:
'''simple docstring'''
A__ = torch.tensor([0.1, -0.2, -0.1] , requires_grad=UpperCAmelCase__)
A__ = torch.tensor([0.4, 0.2, -0.5])
A__ = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
A__ = AdamW(params=[w] , lr=2e-1 , weight_decay=0.0)
for _ in range(100):
A__ = criterion(UpperCAmelCase__ , UpperCAmelCase__)
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2)
def SCREAMING_SNAKE_CASE ( self : str) ->Tuple:
'''simple docstring'''
A__ = torch.tensor([0.1, -0.2, -0.1] , requires_grad=UpperCAmelCase__)
A__ = torch.tensor([0.4, 0.2, -0.5])
A__ = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
A__ = Adafactor(
params=[w] , lr=1e-2 , eps=(1e-30, 1e-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=UpperCAmelCase__ , weight_decay=0.0 , relative_step=UpperCAmelCase__ , scale_parameter=UpperCAmelCase__ , warmup_init=UpperCAmelCase__ , )
for _ in range(1_000):
A__ = criterion(UpperCAmelCase__ , UpperCAmelCase__)
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2)
@require_torch
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__ = nn.Linear(50 , 50 ) if is_torch_available() else None
UpperCAmelCase__ = AdamW(m.parameters() , lr=1_0.0 ) if is_torch_available() else None
UpperCAmelCase__ = 10
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : List[str]=None) ->Any:
'''simple docstring'''
self.assertEqual(len(UpperCAmelCase__) , len(UpperCAmelCase__))
for a, b in zip(UpperCAmelCase__ , UpperCAmelCase__):
self.assertAlmostEqual(UpperCAmelCase__ , UpperCAmelCase__ , delta=UpperCAmelCase__ , msg=UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : List[str]) ->Optional[Any]:
'''simple docstring'''
A__ = {'''num_warmup_steps''': 2, '''num_training_steps''': 10}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
A__ = {
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{'''num_warmup_steps''': 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, '''num_cycles''': 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, '''power''': 2.0, '''lr_end''': 1e-7},
[0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156],
),
get_inverse_sqrt_schedule: (
{'''num_warmup_steps''': 2},
[0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714],
),
}
for scheduler_func, data in scheds.items():
A__ , A__ = data
A__ = scheduler_func(self.optimizer , **UpperCAmelCase__)
self.assertEqual(len([scheduler.get_lr()[0]]) , 1)
A__ = unwrap_schedule(UpperCAmelCase__ , self.num_steps)
self.assertListAlmostEqual(
UpperCAmelCase__ , UpperCAmelCase__ , tol=1e-2 , msg=f"""failed for {scheduler_func} in normal scheduler""" , )
A__ = scheduler_func(self.optimizer , **UpperCAmelCase__)
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(UpperCAmelCase__) # wrap to test picklability of the schedule
A__ = unwrap_and_save_reload_schedule(UpperCAmelCase__ , self.num_steps)
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ , msg=f"""failed for {scheduler_func} in save and reload""")
class UpperCamelCase_ :
'''simple docstring'''
def __init__( self : int , UpperCAmelCase__ : int) ->Tuple:
'''simple docstring'''
A__ = fn
def __call__( self : Optional[Any] , *UpperCAmelCase__ : Optional[int] , **UpperCAmelCase__ : Dict) ->List[str]:
'''simple docstring'''
return self.fn(*UpperCAmelCase__ , **UpperCAmelCase__)
@classmethod
def SCREAMING_SNAKE_CASE ( self : Any , UpperCAmelCase__ : str) ->str:
'''simple docstring'''
A__ = list(map(self , scheduler.lr_lambdas))
| 14
|
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
UniSpeechConfig,
UniSpeechForCTC,
UniSpeechForPreTraining,
WavaVecaFeatureExtractor,
WavaVecaPhonemeCTCTokenizer,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'ctc_proj',
'mask_emb': 'masked_spec_embed',
}
UpperCamelCase_ = [
'ctc_proj',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->Dict:
"""simple docstring"""
for attribute in key.split("." ):
if is_finetuned:
if attribute in ["quantizer", "project_q", "project_hid"]:
# those layers are only relevant for pretraining and should be dropped
return
if attribute == "ctc_proj":
# we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models
a_ = "lm_head"
a_ = getattr(UpperCAmelCase , UpperCAmelCase )
if weight_type is not None:
a_ = getattr(UpperCAmelCase , UpperCAmelCase ).shape
else:
a_ = hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
a_ = value
elif weight_type == "weight_g":
a_ = value
elif weight_type == "weight_v":
a_ = value
elif weight_type == "bias":
a_ = value
else:
a_ = value
logger.info(F'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->Optional[int]:
"""simple docstring"""
a_ = []
a_ = fairseq_model.state_dict()
a_ = hf_model.unispeech.feature_extractor
for name, value in fairseq_dict.items():
a_ = False
if "conv_layers" in name:
load_conv_layer(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , hf_model.config.feat_extract_norm == "group" , )
a_ = True
else:
for key, mapped_key in MAPPING.items():
a_ = "unispeech." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
a_ = True
if "*" in mapped_key:
a_ = name.split(UpperCAmelCase )[0].split("." )[-2]
a_ = mapped_key.replace("*" , UpperCAmelCase )
if "weight_g" in name:
a_ = "weight_g"
elif "weight_v" in name:
a_ = "weight_v"
elif "bias" in name:
a_ = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
a_ = "weight"
else:
a_ = None
set_recursively(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
continue
if not is_used:
unused_weights.append(UpperCAmelCase )
logger.warning(F'''Unused weights: {unused_weights}''' )
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->Optional[Any]:
"""simple docstring"""
a_ = full_name.split("conv_layers." )[-1]
a_ = name.split("." )
a_ = int(items[0] )
a_ = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
a_ = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
a_ = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
a_ = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
a_ = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(UpperCAmelCase )
@torch.no_grad()
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=True ) ->Tuple:
"""simple docstring"""
if config_path is not None:
a_ = UniSpeechConfig.from_pretrained(UpperCAmelCase )
else:
a_ = UniSpeechConfig()
if is_finetuned:
if dict_path:
a_ = Dictionary.load_from_json(UpperCAmelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
a_ = target_dict.pad_index
a_ = target_dict.bos_index
a_ = target_dict.eos_index
a_ = len(target_dict.symbols )
a_ = os.path.join(UpperCAmelCase , "vocab.json" )
if not os.path.isdir(UpperCAmelCase ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(UpperCAmelCase ) )
return
os.makedirs(UpperCAmelCase , exist_ok=UpperCAmelCase )
a_ = target_dict.indices
# fairseq has the <pad> and <s> switched
a_ = 42
a_ = 43
with open(UpperCAmelCase , "w" , encoding="utf-8" ) as vocab_handle:
json.dump(UpperCAmelCase , UpperCAmelCase )
a_ = WavaVecaPhonemeCTCTokenizer(
UpperCAmelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=UpperCAmelCase , )
a_ = True if config.feat_extract_norm == "layer" else False
a_ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=UpperCAmelCase , return_attention_mask=UpperCAmelCase , )
a_ = WavaVecaProcessor(feature_extractor=UpperCAmelCase , tokenizer=UpperCAmelCase )
processor.save_pretrained(UpperCAmelCase )
a_ = UniSpeechForCTC(UpperCAmelCase )
else:
a_ = UniSpeechForPreTraining(UpperCAmelCase )
if is_finetuned:
a_ , a_ , a_ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] ), "w2v_path": checkpoint_path} )
else:
a_ , a_ , a_ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
a_ = model[0].eval()
recursively_load_weights(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
hf_unispeech.save_pretrained(UpperCAmelCase )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
UpperCamelCase_ = parser.parse_args()
convert_unispeech_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 243
| 0
|
'''simple docstring'''
a_ = [
[0, 1_6, 1_3, 0, 0, 0],
[0, 0, 1_0, 1_2, 0, 0],
[0, 4, 0, 0, 1_4, 0],
[0, 0, 9, 0, 0, 2_0],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
def _a( UpperCamelCase__ : Dict, UpperCamelCase__ : Optional[int], UpperCamelCase__ : List[str], UpperCamelCase__ : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any =[False] * len(_a )
SCREAMING_SNAKE_CASE__ : Optional[Any] =[s]
SCREAMING_SNAKE_CASE__ : Any =True
while queue:
SCREAMING_SNAKE_CASE__ : Tuple =queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(_a )
SCREAMING_SNAKE_CASE__ : Dict =True
SCREAMING_SNAKE_CASE__ : Any =u
return visited[t]
def _a( UpperCamelCase__ : Any, UpperCamelCase__ : Dict, UpperCamelCase__ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] =[-1] * (len(_a ))
SCREAMING_SNAKE_CASE__ : List[str] =0
SCREAMING_SNAKE_CASE__ : Optional[int] =[]
SCREAMING_SNAKE_CASE__ : str =[i[:] for i in graph] # Record original cut, copy.
while bfs(_a, _a, _a, _a ):
SCREAMING_SNAKE_CASE__ : Dict =float('''Inf''' )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =sink
while s != source:
# Find the minimum value in select path
SCREAMING_SNAKE_CASE__ : List[str] =min(_a, graph[parent[s]][s] )
SCREAMING_SNAKE_CASE__ : int =parent[s]
max_flow += path_flow
SCREAMING_SNAKE_CASE__ : int =sink
while v != source:
SCREAMING_SNAKE_CASE__ : List[Any] =parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
SCREAMING_SNAKE_CASE__ : Any =parent[v]
for i in range(len(_a ) ):
for j in range(len(graph[0] ) ):
if graph[i][j] == 0 and temp[i][j] > 0:
res.append((i, j) )
return res
if __name__ == "__main__":
print(mincut(test_graph, source=0, sink=5))
| 361
|
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
@slow
@require_torch_gpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __magic_name__ ( self : List[str] ) -> Optional[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __magic_name__ ( self : Union[str, Any] ) -> Dict:
SCREAMING_SNAKE_CASE__ : Optional[int] =StableDiffusionKDiffusionPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' )
SCREAMING_SNAKE_CASE__ : str =sd_pipe.to(__lowercase )
sd_pipe.set_progress_bar_config(disable=__lowercase )
sd_pipe.set_scheduler('''sample_euler''' )
SCREAMING_SNAKE_CASE__ : List[Any] ='''A painting of a squirrel eating a burger'''
SCREAMING_SNAKE_CASE__ : List[str] =torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =sd_pipe([prompt] , generator=__lowercase , guidance_scale=9.0 , num_inference_steps=20 , output_type='''np''' )
SCREAMING_SNAKE_CASE__ : Optional[Any] =output.images
SCREAMING_SNAKE_CASE__ : int =image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
SCREAMING_SNAKE_CASE__ : Any =np.array([0.0447, 0.0492, 0.0468, 0.0408, 0.0383, 0.0408, 0.0354, 0.0380, 0.0339] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __magic_name__ ( self : List[str] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : List[str] =StableDiffusionKDiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
SCREAMING_SNAKE_CASE__ : int =sd_pipe.to(__lowercase )
sd_pipe.set_progress_bar_config(disable=__lowercase )
sd_pipe.set_scheduler('''sample_euler''' )
SCREAMING_SNAKE_CASE__ : Any ='''A painting of a squirrel eating a burger'''
SCREAMING_SNAKE_CASE__ : Dict =torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Tuple =sd_pipe([prompt] , generator=__lowercase , guidance_scale=9.0 , num_inference_steps=20 , output_type='''np''' )
SCREAMING_SNAKE_CASE__ : Dict =output.images
SCREAMING_SNAKE_CASE__ : Dict =image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
SCREAMING_SNAKE_CASE__ : List[str] =np.array([0.1237, 0.1320, 0.1438, 0.1359, 0.1390, 0.1132, 0.1277, 0.1175, 0.1112] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-1
def __magic_name__ ( self : List[str] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : Any =StableDiffusionKDiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
SCREAMING_SNAKE_CASE__ : Tuple =sd_pipe.to(__lowercase )
sd_pipe.set_progress_bar_config(disable=__lowercase )
sd_pipe.set_scheduler('''sample_dpmpp_2m''' )
SCREAMING_SNAKE_CASE__ : Tuple ='''A painting of a squirrel eating a burger'''
SCREAMING_SNAKE_CASE__ : Optional[Any] =torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : List[Any] =sd_pipe(
[prompt] , generator=__lowercase , guidance_scale=7.5 , num_inference_steps=15 , output_type='''np''' , use_karras_sigmas=__lowercase , )
SCREAMING_SNAKE_CASE__ : Optional[int] =output.images
SCREAMING_SNAKE_CASE__ : Dict =image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
SCREAMING_SNAKE_CASE__ : Optional[Any] =np.array(
[0.11381689, 0.12112921, 0.1389457, 0.12549606, 0.1244964, 0.10831517, 0.11562866, 0.10867816, 0.10499048] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 222
| 0
|
"""simple docstring"""
import argparse
import json
import os
import torch
from transformers.file_utils import has_file
from diffusers import UNetaDConditionModel, UNetaDModel
__UpperCamelCase : List[str] = False
__UpperCamelCase : List[Any] = True
__UpperCamelCase : Optional[Any] = False
if __name__ == "__main__":
__UpperCamelCase : Any = argparse.ArgumentParser()
parser.add_argument(
'''--repo_path''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the architecture.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
__UpperCamelCase : List[str] = parser.parse_args()
__UpperCamelCase : Tuple = {
'''image_size''': '''sample_size''',
'''num_res_blocks''': '''layers_per_block''',
'''block_channels''': '''block_out_channels''',
'''down_blocks''': '''down_block_types''',
'''up_blocks''': '''up_block_types''',
'''downscale_freq_shift''': '''freq_shift''',
'''resnet_num_groups''': '''norm_num_groups''',
'''resnet_act_fn''': '''act_fn''',
'''resnet_eps''': '''norm_eps''',
'''num_head_channels''': '''attention_head_dim''',
}
__UpperCamelCase : Dict = {
'''time_steps''': '''time_proj''',
'''mid''': '''mid_block''',
'''downsample_blocks''': '''down_blocks''',
'''upsample_blocks''': '''up_blocks''',
}
__UpperCamelCase : int = '''''' if has_file(args.repo_path, '''config.json''') else '''unet'''
with open(os.path.join(args.repo_path, subfolder, '''config.json'''), '''r''', encoding='''utf-8''') as reader:
__UpperCamelCase : int = reader.read()
__UpperCamelCase : Tuple = json.loads(text)
if do_only_config:
for key in config_parameters_to_change.keys():
config.pop(key, None)
if has_file(args.repo_path, '''config.json'''):
__UpperCamelCase : int = UNetaDModel(**config)
else:
__UpperCamelCase : List[Any] = UNetaDConditionModel if '''ldm-text2im-large-256''' in args.repo_path else UNetaDModel
__UpperCamelCase : Tuple = class_name(**config)
if do_only_config:
model.save_config(os.path.join(args.repo_path, subfolder))
__UpperCamelCase : List[Any] = dict(model.config)
if do_only_renaming:
for key, value in config_parameters_to_change.items():
if key in config:
__UpperCamelCase : Optional[int] = config[key]
del config[key]
__UpperCamelCase : Tuple = [k.replace('''UNetRes''', '''''') for k in config['''down_block_types''']]
__UpperCamelCase : Tuple = [k.replace('''UNetRes''', '''''') for k in config['''up_block_types''']]
if do_only_weights:
__UpperCamelCase : List[Any] = torch.load(os.path.join(args.repo_path, subfolder, '''diffusion_pytorch_model.bin'''))
__UpperCamelCase : int = {}
for param_key, param_value in state_dict.items():
if param_key.endswith('''.op.bias''') or param_key.endswith('''.op.weight'''):
continue
__UpperCamelCase : Optional[Any] = False
for key, new_key in key_parameters_to_change.items():
if not has_changed and param_key.split('''.''')[0] == key:
__UpperCamelCase : List[str] = param_value
__UpperCamelCase : Any = True
if not has_changed:
__UpperCamelCase : Union[str, Any] = param_value
model.load_state_dict(new_state_dict)
model.save_pretrained(os.path.join(args.repo_path, subfolder))
| 106
|
"""simple docstring"""
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
__UpperCamelCase : Tuple = '''%20'''.join(argv[1:]) if len(argv) > 1 else quote(str(input('''Search: ''')))
print('''Googling.....''')
__UpperCamelCase : Optional[int] = F'''https://www.google.com/search?q={query}&num=100'''
__UpperCamelCase : Optional[Any] = requests.get(
url,
headers={'''User-Agent''': str(UserAgent().random)},
)
try:
__UpperCamelCase : Union[str, Any] = (
BeautifulSoup(res.text, '''html.parser''')
.find('''div''', attrs={'''class''': '''yuRUbf'''})
.find('''a''')
.get('''href''')
)
except AttributeError:
__UpperCamelCase : str = parse_qs(
BeautifulSoup(res.text, '''html.parser''')
.find('''div''', attrs={'''class''': '''kCrYT'''})
.find('''a''')
.get('''href''')
)['''url'''][0]
webbrowser.open(link)
| 106
| 1
|
'''simple docstring'''
import math
def _a ( _lowercase : float , _lowercase : float ):
'''simple docstring'''
if initial_intensity < 0:
raise ValueError('''The value of intensity cannot be negative''' )
# handling of negative values of initial intensity
if angle < 0 or angle > 360:
raise ValueError('''In Malus Law, the angle is in the range 0-360 degrees''' )
# handling of values out of allowed range
return initial_intensity * (math.cos(math.radians(_lowercase ) ) ** 2)
if __name__ == "__main__":
import doctest
doctest.testmod(name="malus_law")
| 356
|
'''simple docstring'''
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import TimesformerConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
TimesformerForVideoClassification,
TimesformerModel,
)
from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class a :
"""simple docstring"""
def __init__( self : List[str] , snake_case : Any , snake_case : Tuple=13 , snake_case : Any=10 , snake_case : Any=3 , snake_case : Dict=2 , snake_case : Optional[Any]=2 , snake_case : Union[str, Any]=True , snake_case : Dict=True , snake_case : List[Any]=32 , snake_case : Dict=5 , snake_case : List[str]=4 , snake_case : Dict=37 , snake_case : Any="gelu" , snake_case : Optional[int]=0.1 , snake_case : Union[str, Any]=0.1 , snake_case : Optional[int]=10 , snake_case : Dict=0.02 , snake_case : Tuple="divided_space_time" , snake_case : List[Any]=None , ) -> Optional[int]:
__UpperCAmelCase : Dict = parent
__UpperCAmelCase : Tuple = batch_size
__UpperCAmelCase : Optional[Any] = image_size
__UpperCAmelCase : Optional[int] = num_channels
__UpperCAmelCase : Optional[Any] = patch_size
__UpperCAmelCase : List[str] = num_frames
__UpperCAmelCase : Union[str, Any] = is_training
__UpperCAmelCase : str = use_labels
__UpperCAmelCase : Union[str, Any] = hidden_size
__UpperCAmelCase : Any = num_hidden_layers
__UpperCAmelCase : List[Any] = num_attention_heads
__UpperCAmelCase : Dict = intermediate_size
__UpperCAmelCase : List[str] = hidden_act
__UpperCAmelCase : List[Any] = hidden_dropout_prob
__UpperCAmelCase : int = attention_probs_dropout_prob
__UpperCAmelCase : Any = attention_type
__UpperCAmelCase : Optional[Any] = initializer_range
__UpperCAmelCase : str = scope
__UpperCAmelCase : List[str] = num_labels
# in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token
__UpperCAmelCase : str = (image_size // patch_size) ** 2
__UpperCAmelCase : int = (num_frames) * self.num_patches_per_frame + 1
def lowerCamelCase__ ( self : List[Any] ) -> Tuple:
__UpperCAmelCase : List[str] = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
__UpperCAmelCase : Dict = None
if self.use_labels:
__UpperCAmelCase : List[str] = ids_tensor([self.batch_size] , self.num_labels )
__UpperCAmelCase : Optional[int] = self.get_config()
return config, pixel_values, labels
def lowerCamelCase__ ( self : List[Any] ) -> Optional[Any]:
__UpperCAmelCase : str = TimesformerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , )
__UpperCAmelCase : Optional[int] = self.num_labels
return config
def lowerCamelCase__ ( self : Dict , snake_case : Any , snake_case : Optional[int] , snake_case : List[Any] ) -> Optional[Any]:
__UpperCAmelCase : List[Any] = TimesformerModel(config=snake_case )
model.to(snake_case )
model.eval()
__UpperCAmelCase : Tuple = model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase__ ( self : int , snake_case : Tuple , snake_case : List[Any] , snake_case : Optional[Any] ) -> str:
__UpperCAmelCase : Union[str, Any] = TimesformerForVideoClassification(snake_case )
model.to(snake_case )
model.eval()
__UpperCAmelCase : str = model(snake_case )
# verify the logits shape
__UpperCAmelCase : List[str] = torch.Size((self.batch_size, self.num_labels) )
self.parent.assertEqual(result.logits.shape , snake_case )
def lowerCamelCase__ ( self : Any ) -> List[str]:
__UpperCAmelCase : Optional[int] = self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : int = config_and_inputs
__UpperCAmelCase : Optional[Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class a ( _a , _a , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else ()
SCREAMING_SNAKE_CASE : Optional[Any] = (
{"feature-extraction": TimesformerModel, "video-classification": TimesformerForVideoClassification}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE : int = False
SCREAMING_SNAKE_CASE : str = False
SCREAMING_SNAKE_CASE : Union[str, Any] = False
SCREAMING_SNAKE_CASE : List[Any] = False
def lowerCamelCase__ ( self : int ) -> str:
__UpperCAmelCase : Tuple = TimesformerModelTester(self )
__UpperCAmelCase : str = ConfigTester(
self , config_class=snake_case , has_text_modality=snake_case , hidden_size=37 )
def lowerCamelCase__ ( self : Dict , snake_case : Optional[int] , snake_case : Optional[int] , snake_case : Optional[int]=False ) -> Union[str, Any]:
__UpperCAmelCase : Union[str, Any] = copy.deepcopy(snake_case )
if return_labels:
if model_class in get_values(snake_case ):
__UpperCAmelCase : List[Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=snake_case )
return inputs_dict
def lowerCamelCase__ ( self : Optional[Any] ) -> Union[str, Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason='''TimeSformer does not use inputs_embeds''' )
def lowerCamelCase__ ( self : Any ) -> Dict:
pass
def lowerCamelCase__ ( self : Optional[Any] ) -> int:
__UpperCAmelCase , __UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : Optional[Any] = model_class(snake_case )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__UpperCAmelCase : Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(snake_case , nn.Linear ) )
def lowerCamelCase__ ( self : List[Any] ) -> List[Any]:
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : Dict = model_class(snake_case )
__UpperCAmelCase : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCAmelCase : int = [*signature.parameters.keys()]
__UpperCAmelCase : List[str] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , snake_case )
def lowerCamelCase__ ( self : Union[str, Any] ) -> Dict:
__UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
def lowerCamelCase__ ( self : Tuple ) -> Dict:
__UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_video_classification(*snake_case )
@slow
def lowerCamelCase__ ( self : Union[str, Any] ) -> str:
for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase : Optional[int] = TimesformerModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
def lowerCamelCase__ ( self : Dict ) -> List[Any]:
if not self.has_attentions:
pass
else:
__UpperCAmelCase , __UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase : Optional[int] = True
for model_class in self.all_model_classes:
__UpperCAmelCase : Tuple = self.model_tester.seq_length
__UpperCAmelCase : int = self.model_tester.num_frames
__UpperCAmelCase : Optional[int] = True
__UpperCAmelCase : Any = False
__UpperCAmelCase : Optional[int] = True
__UpperCAmelCase : Tuple = model_class(snake_case )
model.to(snake_case )
model.eval()
with torch.no_grad():
__UpperCAmelCase : int = model(**self._prepare_for_class(snake_case , snake_case ) )
__UpperCAmelCase : str = outputs.attentions
self.assertEqual(len(snake_case ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__UpperCAmelCase : Dict = True
__UpperCAmelCase : str = model_class(snake_case )
model.to(snake_case )
model.eval()
with torch.no_grad():
__UpperCAmelCase : List[Any] = model(**self._prepare_for_class(snake_case , snake_case ) )
__UpperCAmelCase : List[Any] = outputs.attentions
self.assertEqual(len(snake_case ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
__UpperCAmelCase : Tuple = len(snake_case )
# Check attention is always last and order is fine
__UpperCAmelCase : Optional[int] = True
__UpperCAmelCase : Optional[int] = True
__UpperCAmelCase : Union[str, Any] = model_class(snake_case )
model.to(snake_case )
model.eval()
with torch.no_grad():
__UpperCAmelCase : Any = model(**self._prepare_for_class(snake_case , snake_case ) )
self.assertEqual(out_len + 1 , len(snake_case ) )
__UpperCAmelCase : Any = outputs.attentions
self.assertEqual(len(snake_case ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
def lowerCamelCase__ ( self : Dict ) -> Union[str, Any]:
def check_hidden_states_output(snake_case : Optional[Any] , snake_case : Optional[int] , snake_case : Tuple ):
__UpperCAmelCase : str = model_class(snake_case )
model.to(snake_case )
model.eval()
with torch.no_grad():
__UpperCAmelCase : Any = model(**self._prepare_for_class(snake_case , snake_case ) )
__UpperCAmelCase : int = outputs.hidden_states
__UpperCAmelCase : Union[str, Any] = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(snake_case ) , snake_case )
__UpperCAmelCase : int = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
__UpperCAmelCase , __UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : str = True
check_hidden_states_output(snake_case , snake_case , snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCAmelCase : str = True
check_hidden_states_output(snake_case , snake_case , snake_case )
def _a ( ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = hf_hub_download(
repo_id='''hf-internal-testing/spaghetti-video''' , filename='''eating_spaghetti.npy''' , repo_type='''dataset''' )
__UpperCAmelCase : int = np.load(_lowercase )
return list(_lowercase )
@require_torch
@require_vision
class a ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowerCamelCase__ ( self : Union[str, Any] ) -> str:
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def lowerCamelCase__ ( self : str ) -> List[Any]:
__UpperCAmelCase : Union[str, Any] = TimesformerForVideoClassification.from_pretrained('''facebook/timesformer-base-finetuned-k400''' ).to(
snake_case )
__UpperCAmelCase : str = self.default_image_processor
__UpperCAmelCase : Dict = prepare_video()
__UpperCAmelCase : Union[str, Any] = image_processor(video[:8] , return_tensors='''pt''' ).to(snake_case )
# forward pass
with torch.no_grad():
__UpperCAmelCase : List[Any] = model(**snake_case )
# verify the logits
__UpperCAmelCase : Optional[Any] = torch.Size((1, 400) )
self.assertEqual(outputs.logits.shape , snake_case )
__UpperCAmelCase : List[Any] = torch.tensor([-0.3_016, -0.7_713, -0.4_205] ).to(snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case , atol=1E-4 ) )
| 240
| 0
|
"""simple docstring"""
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_outputs import (
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import logging
from .configuration_regnet import RegNetConfig
a : Any = logging.get_logger(__name__)
# General docstring
a : Optional[Any] = '''RegNetConfig'''
# Base docstring
a : str = '''facebook/regnet-y-040'''
a : Optional[Any] = [1, 1088, 7, 7]
# Image classification docstring
a : Tuple = '''facebook/regnet-y-040'''
a : Dict = '''tabby, tabby cat'''
a : int = [
'''facebook/regnet-y-040''',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class __UpperCamelCase ( nn.Module ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = 3 , lowerCAmelCase__ = 1 , lowerCAmelCase__ = 1 , lowerCAmelCase__ = "relu" , ) -> Optional[Any]:
super().__init__()
a : Optional[Any] = nn.Convad(
lowerCAmelCase__ , lowerCAmelCase__ , kernel_size=lowerCAmelCase__ , stride=lowerCAmelCase__ , padding=kernel_size // 2 , groups=lowerCAmelCase__ , bias=lowerCAmelCase__ , )
a : int = nn.BatchNormad(lowerCAmelCase__ )
a : List[str] = ACTaFN[activation] if activation is not None else nn.Identity()
def __a ( self , lowerCAmelCase__ ) -> int:
a : List[str] = self.convolution(lowerCAmelCase__ )
a : Any = self.normalization(lowerCAmelCase__ )
a : Optional[Any] = self.activation(lowerCAmelCase__ )
return hidden_state
class __UpperCamelCase ( nn.Module ):
def __init__( self , lowerCAmelCase__ ) -> int:
super().__init__()
a : Union[str, Any] = RegNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act )
a : List[str] = config.num_channels
def __a ( self , lowerCAmelCase__ ) -> Tuple:
a : Union[str, Any] = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
"Make sure that the channel dimension of the pixel values match with the one set in the configuration." )
a : Any = self.embedder(lowerCAmelCase__ )
return hidden_state
class __UpperCamelCase ( nn.Module ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = 2 ) -> List[Any]:
super().__init__()
a : List[Any] = nn.Convad(lowerCAmelCase__ , lowerCAmelCase__ , kernel_size=1 , stride=lowerCAmelCase__ , bias=lowerCAmelCase__ )
a : Union[str, Any] = nn.BatchNormad(lowerCAmelCase__ )
def __a ( self , lowerCAmelCase__ ) -> Tensor:
a : List[Any] = self.convolution(lowerCAmelCase__ )
a : List[Any] = self.normalization(lowerCAmelCase__ )
return hidden_state
class __UpperCamelCase ( nn.Module ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[int]:
super().__init__()
a : int = nn.AdaptiveAvgPoolad((1, 1) )
a : Dict = nn.Sequential(
nn.Convad(lowerCAmelCase__ , lowerCAmelCase__ , kernel_size=1 ) , nn.ReLU() , nn.Convad(lowerCAmelCase__ , lowerCAmelCase__ , kernel_size=1 ) , nn.Sigmoid() , )
def __a ( self , lowerCAmelCase__ ) -> Union[str, Any]:
# b c h w -> b c 1 1
a : int = self.pooler(lowerCAmelCase__ )
a : Optional[Any] = self.attention(lowerCAmelCase__ )
a : Any = hidden_state * attention
return hidden_state
class __UpperCamelCase ( nn.Module ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = 1 ) -> List[str]:
super().__init__()
a : str = in_channels != out_channels or stride != 1
a : str = max(1 , out_channels // config.groups_width )
a : int = (
RegNetShortCut(lowerCAmelCase__ , lowerCAmelCase__ , stride=lowerCAmelCase__ ) if should_apply_shortcut else nn.Identity()
)
a : Tuple = nn.Sequential(
RegNetConvLayer(lowerCAmelCase__ , lowerCAmelCase__ , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(lowerCAmelCase__ , lowerCAmelCase__ , stride=lowerCAmelCase__ , groups=lowerCAmelCase__ , activation=config.hidden_act ) , RegNetConvLayer(lowerCAmelCase__ , lowerCAmelCase__ , kernel_size=1 , activation=lowerCAmelCase__ ) , )
a : List[Any] = ACTaFN[config.hidden_act]
def __a ( self , lowerCAmelCase__ ) -> Union[str, Any]:
a : Any = hidden_state
a : int = self.layer(lowerCAmelCase__ )
a : Union[str, Any] = self.shortcut(lowerCAmelCase__ )
hidden_state += residual
a : Tuple = self.activation(lowerCAmelCase__ )
return hidden_state
class __UpperCamelCase ( nn.Module ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = 1 ) -> str:
super().__init__()
a : Optional[int] = in_channels != out_channels or stride != 1
a : str = max(1 , out_channels // config.groups_width )
a : int = (
RegNetShortCut(lowerCAmelCase__ , lowerCAmelCase__ , stride=lowerCAmelCase__ ) if should_apply_shortcut else nn.Identity()
)
a : int = nn.Sequential(
RegNetConvLayer(lowerCAmelCase__ , lowerCAmelCase__ , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(lowerCAmelCase__ , lowerCAmelCase__ , stride=lowerCAmelCase__ , groups=lowerCAmelCase__ , activation=config.hidden_act ) , RegNetSELayer(lowerCAmelCase__ , reduced_channels=int(round(in_channels / 4 ) ) ) , RegNetConvLayer(lowerCAmelCase__ , lowerCAmelCase__ , kernel_size=1 , activation=lowerCAmelCase__ ) , )
a : Optional[Any] = ACTaFN[config.hidden_act]
def __a ( self , lowerCAmelCase__ ) -> Optional[Any]:
a : str = hidden_state
a : int = self.layer(lowerCAmelCase__ )
a : Any = self.shortcut(lowerCAmelCase__ )
hidden_state += residual
a : int = self.activation(lowerCAmelCase__ )
return hidden_state
class __UpperCamelCase ( nn.Module ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = 2 , lowerCAmelCase__ = 2 , ) -> int:
super().__init__()
a : Optional[int] = RegNetXLayer if config.layer_type == "x" else RegNetYLayer
a : Optional[int] = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , stride=lowerCAmelCase__ , ) , *[layer(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) for _ in range(depth - 1 )] , )
def __a ( self , lowerCAmelCase__ ) -> Optional[Any]:
a : Optional[Any] = self.layers(lowerCAmelCase__ )
return hidden_state
class __UpperCamelCase ( nn.Module ):
def __init__( self , lowerCAmelCase__ ) -> str:
super().__init__()
a : Any = nn.ModuleList([] )
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
RegNetStage(
lowerCAmelCase__ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
a : Any = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(lowerCAmelCase__ , config.depths[1:] ):
self.stages.append(RegNetStage(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , depth=lowerCAmelCase__ ) )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = False , lowerCAmelCase__ = True ) -> BaseModelOutputWithNoAttention:
a : Any = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
a : Any = hidden_states + (hidden_state,)
a : Any = stage_module(lowerCAmelCase__ )
if output_hidden_states:
a : Dict = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=lowerCAmelCase__ , hidden_states=lowerCAmelCase__ )
class __UpperCamelCase ( a__ ):
lowerCamelCase : Union[str, Any] =RegNetConfig
lowerCamelCase : Dict ="""regnet"""
lowerCamelCase : Tuple ="""pixel_values"""
lowerCamelCase : List[Any] =True
def __a ( self , lowerCAmelCase__ ) -> List[Any]:
if isinstance(lowerCAmelCase__ , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode="fan_out" , nonlinearity="relu" )
elif isinstance(lowerCAmelCase__ , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__=False ) -> List[Any]:
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
a : Any = value
a : str = R'''
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
a : Optional[int] = R'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConvNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
"""The bare RegNet model outputting raw features without any specific head on top.""" , a__ , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet
class __UpperCamelCase ( a__ ):
def __init__( self , lowerCAmelCase__ ) -> int:
super().__init__(lowerCAmelCase__ )
a : Tuple = config
a : Optional[int] = RegNetEmbeddings(lowerCAmelCase__ )
a : List[str] = RegNetEncoder(lowerCAmelCase__ )
a : Tuple = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCAmelCase__ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowerCAmelCase__ , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = None ) -> BaseModelOutputWithPoolingAndNoAttention:
a : Optional[Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
a : str = return_dict if return_dict is not None else self.config.use_return_dict
a : Tuple = self.embedder(lowerCAmelCase__ )
a : Any = self.encoder(
lowerCAmelCase__ , output_hidden_states=lowerCAmelCase__ , return_dict=lowerCAmelCase__ )
a : Any = encoder_outputs[0]
a : Union[str, Any] = self.pooler(lowerCAmelCase__ )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowerCAmelCase__ , pooler_output=lowerCAmelCase__ , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
"""
RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
""" , a__ , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet
class __UpperCamelCase ( a__ ):
def __init__( self , lowerCAmelCase__ ) -> str:
super().__init__(lowerCAmelCase__ )
a : List[str] = config.num_labels
a : Dict = RegNetModel(lowerCAmelCase__ )
# classification head
a : Optional[Any] = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCAmelCase__ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowerCAmelCase__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def __a ( self , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , ) -> ImageClassifierOutputWithNoAttention:
a : Dict = return_dict if return_dict is not None else self.config.use_return_dict
a : List[Any] = self.regnet(lowerCAmelCase__ , output_hidden_states=lowerCAmelCase__ , return_dict=lowerCAmelCase__ )
a : Any = outputs.pooler_output if return_dict else outputs[1]
a : List[str] = self.classifier(lowerCAmelCase__ )
a : str = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
a : Optional[int] = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
a : Optional[Any] = "single_label_classification"
else:
a : str = "multi_label_classification"
if self.config.problem_type == "regression":
a : Optional[Any] = MSELoss()
if self.num_labels == 1:
a : List[Any] = loss_fct(logits.squeeze() , labels.squeeze() )
else:
a : str = loss_fct(lowerCAmelCase__ , lowerCAmelCase__ )
elif self.config.problem_type == "single_label_classification":
a : Union[str, Any] = CrossEntropyLoss()
a : Tuple = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
a : Dict = BCEWithLogitsLoss()
a : List[Any] = loss_fct(lowerCAmelCase__ , lowerCAmelCase__ )
if not return_dict:
a : int = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=lowerCAmelCase__ , logits=lowerCAmelCase__ , hidden_states=outputs.hidden_states )
| 105
|
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class _lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )->List[Any]:
'''simple docstring'''
A_ : Optional[int] = jnp.ones((batch_size, length) ) / length
return scores
def _snake_case ( self )->Tuple:
'''simple docstring'''
A_ : Union[str, Any] = None
A_ : Any = 20
A_ : Any = self._get_uniform_logits(batch_size=2 , length=_SCREAMING_SNAKE_CASE )
# tweak scores to not be uniform anymore
A_ : Dict = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch
A_ : Tuple = scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch
# compute softmax
A_ : List[str] = jax.nn.softmax(_SCREAMING_SNAKE_CASE , axis=-1 )
A_ : Any = FlaxTemperatureLogitsWarper(temperature=0.5 )
A_ : Optional[int] = FlaxTemperatureLogitsWarper(temperature=1.3 )
A_ : Optional[Any] = jax.nn.softmax(temp_dist_warper_sharper(_SCREAMING_SNAKE_CASE , scores.copy() , cur_len=_SCREAMING_SNAKE_CASE ) , axis=-1 )
A_ : List[Any] = jax.nn.softmax(temp_dist_warper_smoother(_SCREAMING_SNAKE_CASE , scores.copy() , cur_len=_SCREAMING_SNAKE_CASE ) , axis=-1 )
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1e-3 ) )
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1e-3 ) )
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max() )
self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min() )
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max() )
self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min() )
def _snake_case ( self )->List[Any]:
'''simple docstring'''
A_ : Any = None
A_ : List[Any] = 10
A_ : str = 2
# create ramp distribution
A_ : Any = np.broadcast_to(np.arange(_SCREAMING_SNAKE_CASE )[None, :] , (batch_size, vocab_size) ).copy()
A_ : List[Any] = ramp_logits[1:, : vocab_size // 2] + vocab_size
A_ : Any = FlaxTopKLogitsWarper(3 )
A_ : Tuple = top_k_warp(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0] ).tolist() , 7 * [True] + 3 * [False] )
self.assertListEqual(jnp.isinf(scores[1] ).tolist() , 2 * [True] + 3 * [False] + 5 * [True] )
# check special case
A_ : Optional[int] = 5
A_ : List[Any] = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3 )
A_ : Optional[Any] = np.broadcast_to(np.arange(_SCREAMING_SNAKE_CASE )[None, :] , (batch_size, length) ).copy()
A_ : Dict = top_k_warp_safety_check(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() , [2, 2] )
def _snake_case ( self )->Any:
'''simple docstring'''
A_ : str = None
A_ : Optional[Any] = 10
A_ : Any = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
A_ : Optional[int] = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.1_5, 0.3, 0.3, 0.2_5]] ) )
A_ : str = FlaxTopPLogitsWarper(0.8 )
A_ : Optional[int] = np.exp(top_p_warp(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE ) )
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
A_ : Tuple = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.2_5]] )
self.assertTrue(np.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1e-3 ) )
# check edge cases with negative and extreme logits
A_ : Union[str, Any] = np.broadcast_to(np.arange(_SCREAMING_SNAKE_CASE )[None, :] , (batch_size, vocab_size) ).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
A_ : str = ramp_logits[1] * 1_0_0.0
# make sure at least 2 tokens are kept
A_ : str = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0 )
A_ : str = top_p_warp(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() , [3, 2] )
def _snake_case ( self )->Any:
'''simple docstring'''
A_ : str = 20
A_ : Union[str, Any] = 4
A_ : Optional[Any] = 0
A_ : Union[str, Any] = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=_SCREAMING_SNAKE_CASE )
# check that min length is applied at length 5
A_ : int = ids_tensor((batch_size, 20) , vocab_size=20 )
A_ : List[Any] = 5
A_ : Optional[int] = self._get_uniform_logits(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
A_ : Optional[int] = min_dist_processor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float('''inf''' )] )
# check that min length is not applied anymore at length 15
A_ : Tuple = self._get_uniform_logits(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
A_ : Any = 15
A_ : int = min_dist_processor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
self.assertFalse(jnp.isinf(_SCREAMING_SNAKE_CASE ).any() )
def _snake_case ( self )->Optional[Any]:
'''simple docstring'''
A_ : Optional[int] = 20
A_ : Optional[int] = 4
A_ : Optional[int] = 0
A_ : Optional[Any] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_SCREAMING_SNAKE_CASE )
# check that all scores are -inf except the bos_token_id score
A_ : Optional[Any] = ids_tensor((batch_size, 1) , vocab_size=20 )
A_ : str = 1
A_ : List[str] = self._get_uniform_logits(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
A_ : List[str] = logits_processor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0] ) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
A_ : Optional[int] = 3
A_ : List[Any] = self._get_uniform_logits(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
A_ : Tuple = logits_processor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
self.assertFalse(jnp.isinf(_SCREAMING_SNAKE_CASE ).any() )
def _snake_case ( self )->List[str]:
'''simple docstring'''
A_ : Union[str, Any] = 20
A_ : str = 4
A_ : Dict = 0
A_ : Optional[int] = 5
A_ : Tuple = FlaxForcedEOSTokenLogitsProcessor(max_length=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE )
# check that all scores are -inf except the eos_token_id when max_length is reached
A_ : List[Any] = ids_tensor((batch_size, 4) , vocab_size=20 )
A_ : Any = 4
A_ : Optional[Any] = self._get_uniform_logits(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
A_ : List[Any] = logits_processor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0] ) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
A_ : int = 3
A_ : Union[str, Any] = self._get_uniform_logits(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
A_ : Dict = logits_processor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
self.assertFalse(jnp.isinf(_SCREAMING_SNAKE_CASE ).any() )
def _snake_case ( self )->str:
'''simple docstring'''
A_ : str = 4
A_ : Dict = 10
A_ : Union[str, Any] = 15
A_ : str = 2
A_ : int = 1
A_ : List[str] = 15
# dummy input_ids and scores
A_ : Tuple = ids_tensor((batch_size, sequence_length) , _SCREAMING_SNAKE_CASE )
A_ : int = input_ids.copy()
A_ : List[Any] = self._get_uniform_logits(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
A_ : Union[str, Any] = scores.copy()
# instantiate all dist processors
A_ : Dict = FlaxTemperatureLogitsWarper(temperature=0.5 )
A_ : Any = FlaxTopKLogitsWarper(3 )
A_ : List[Any] = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
A_ : List[Any] = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=_SCREAMING_SNAKE_CASE )
A_ : Optional[int] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_SCREAMING_SNAKE_CASE )
A_ : Dict = FlaxForcedEOSTokenLogitsProcessor(max_length=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE )
A_ : Union[str, Any] = 10
# no processor list
A_ : int = temp_dist_warp(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
A_ : List[str] = top_k_warp(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
A_ : Any = top_p_warp(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
A_ : Dict = min_dist_proc(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
A_ : Optional[int] = bos_dist_proc(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
A_ : Optional[Any] = eos_dist_proc(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
# with processor list
A_ : Any = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
A_ : List[str] = processor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
# scores should be equal
self.assertTrue(jnp.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1e-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
def _snake_case ( self )->Dict:
'''simple docstring'''
A_ : str = 4
A_ : Dict = 10
A_ : Tuple = 15
A_ : List[str] = 2
A_ : List[str] = 1
A_ : Union[str, Any] = 15
# dummy input_ids and scores
A_ : Any = ids_tensor((batch_size, sequence_length) , _SCREAMING_SNAKE_CASE )
A_ : Union[str, Any] = input_ids.copy()
A_ : Optional[Any] = self._get_uniform_logits(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
A_ : Tuple = scores.copy()
# instantiate all dist processors
A_ : List[str] = FlaxTemperatureLogitsWarper(temperature=0.5 )
A_ : Optional[Any] = FlaxTopKLogitsWarper(3 )
A_ : int = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
A_ : List[Any] = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=_SCREAMING_SNAKE_CASE )
A_ : Dict = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_SCREAMING_SNAKE_CASE )
A_ : Optional[Any] = FlaxForcedEOSTokenLogitsProcessor(max_length=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE )
A_ : str = 10
# no processor list
def run_no_processor_list(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
A_ : int = temp_dist_warp(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
A_ : Optional[Any] = top_k_warp(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
A_ : List[Any] = top_p_warp(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
A_ : Dict = min_dist_proc(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
A_ : Any = bos_dist_proc(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
A_ : Optional[Any] = eos_dist_proc(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
return scores
# with processor list
def run_processor_list(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
A_ : Optional[int] = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
A_ : Optional[int] = processor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
return scores
A_ : Optional[int] = jax.jit(_SCREAMING_SNAKE_CASE )
A_ : Union[str, Any] = jax.jit(_SCREAMING_SNAKE_CASE )
A_ : Dict = jitted_run_no_processor_list(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
A_ : List[Any] = jitted_run_processor_list(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# scores should be equal
self.assertTrue(jnp.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1e-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
| 186
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import _LazyModule
lowerCamelCase = {"""tokenization_wav2vec2_phoneme""": ["""Wav2Vec2PhonemeCTCTokenizer"""]}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 48
|
'''simple docstring'''
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {"""vocab_file""": """spiece.model"""}
lowerCamelCase = {
"""vocab_file""": {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model""",
}
}
lowerCamelCase = {
"""xlnet-base-cased""": None,
"""xlnet-large-cased""": None,
}
# Segments (not really needed)
lowerCamelCase = 0
lowerCamelCase = 1
lowerCamelCase = 2
lowerCamelCase = 3
lowerCamelCase = 4
class _UpperCamelCase ( A ):
'''simple docstring'''
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = """left"""
def __init__( self : List[Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : List[Any]=False , _lowerCAmelCase : Optional[int]=True , _lowerCAmelCase : Any=False , _lowerCAmelCase : Any="<s>" , _lowerCAmelCase : Union[str, Any]="</s>" , _lowerCAmelCase : int="<unk>" , _lowerCAmelCase : Union[str, Any]="<sep>" , _lowerCAmelCase : Union[str, Any]="<pad>" , _lowerCAmelCase : Union[str, Any]="<cls>" , _lowerCAmelCase : List[Any]="<mask>" , _lowerCAmelCase : List[Any]=["<eop>", "<eod>"] , _lowerCAmelCase : Optional[Dict[str, Any]] = None , **_lowerCAmelCase : str , ):
'''simple docstring'''
__lowercase =AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase) if isinstance(_lowerCAmelCase , _lowerCAmelCase) else mask_token
__lowercase ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_lowerCAmelCase , remove_space=_lowerCAmelCase , keep_accents=_lowerCAmelCase , bos_token=_lowerCAmelCase , eos_token=_lowerCAmelCase , unk_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , mask_token=_lowerCAmelCase , additional_special_tokens=_lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCAmelCase , )
__lowercase =3
__lowercase =do_lower_case
__lowercase =remove_space
__lowercase =keep_accents
__lowercase =vocab_file
__lowercase =spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(_lowerCAmelCase)
@property
def __lowerCamelCase ( self : str):
'''simple docstring'''
return len(self.sp_model)
def __lowerCamelCase ( self : Any):
'''simple docstring'''
__lowercase ={self.convert_ids_to_tokens(_lowerCAmelCase): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__( self : str):
'''simple docstring'''
__lowercase =self.__dict__.copy()
__lowercase =None
return state
def __setstate__( self : List[Any] , _lowerCAmelCase : List[str]):
'''simple docstring'''
__lowercase =d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs'):
__lowercase ={}
__lowercase =spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def __lowerCamelCase ( self : Union[str, Any] , _lowerCAmelCase : Any):
'''simple docstring'''
if self.remove_space:
__lowercase =' '.join(inputs.strip().split())
else:
__lowercase =inputs
__lowercase =outputs.replace('``' , '"').replace('\'\'' , '"')
if not self.keep_accents:
__lowercase =unicodedata.normalize('NFKD' , _lowerCAmelCase)
__lowercase =''.join([c for c in outputs if not unicodedata.combining(_lowerCAmelCase)])
if self.do_lower_case:
__lowercase =outputs.lower()
return outputs
def __lowerCamelCase ( self : List[Any] , _lowerCAmelCase : str):
'''simple docstring'''
__lowercase =self.preprocess_text(_lowerCAmelCase)
__lowercase =self.sp_model.encode(_lowerCAmelCase , out_type=_lowerCAmelCase)
__lowercase =[]
for piece in pieces:
if len(_lowerCAmelCase) > 1 and piece[-1] == str(',') and piece[-2].isdigit():
__lowercase =self.sp_model.EncodeAsPieces(piece[:-1].replace(_lowerCAmelCase , ''))
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0]) == 1:
__lowercase =cur_pieces[1:]
else:
__lowercase =cur_pieces[0][1:]
cur_pieces.append(piece[-1])
new_pieces.extend(_lowerCAmelCase)
else:
new_pieces.append(_lowerCAmelCase)
return new_pieces
def __lowerCamelCase ( self : Any , _lowerCAmelCase : Optional[int]):
'''simple docstring'''
return self.sp_model.PieceToId(_lowerCAmelCase)
def __lowerCamelCase ( self : Tuple , _lowerCAmelCase : List[str]):
'''simple docstring'''
return self.sp_model.IdToPiece(_lowerCAmelCase)
def __lowerCamelCase ( self : Any , _lowerCAmelCase : Tuple):
'''simple docstring'''
__lowercase =''.join(_lowerCAmelCase).replace(_lowerCAmelCase , ' ').strip()
return out_string
def __lowerCamelCase ( self : Optional[Any] , _lowerCAmelCase : List[int] , _lowerCAmelCase : bool = False , _lowerCAmelCase : bool = None , _lowerCAmelCase : bool = True , **_lowerCAmelCase : List[Any] , ):
'''simple docstring'''
__lowercase =kwargs.pop('use_source_tokenizer' , _lowerCAmelCase)
__lowercase =self.convert_ids_to_tokens(_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase)
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
__lowercase =[]
__lowercase =[]
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_lowerCAmelCase))
__lowercase =[]
sub_texts.append(_lowerCAmelCase)
else:
current_sub_text.append(_lowerCAmelCase)
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_lowerCAmelCase))
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
__lowercase =''.join(_lowerCAmelCase)
__lowercase =(
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
__lowercase =self.clean_up_tokenization(_lowerCAmelCase)
return clean_text
else:
return text
def __lowerCamelCase ( self : Tuple , _lowerCAmelCase : List[int] , _lowerCAmelCase : Optional[List[int]] = None):
'''simple docstring'''
__lowercase =[self.sep_token_id]
__lowercase =[self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def __lowerCamelCase ( self : Any , _lowerCAmelCase : List[int] , _lowerCAmelCase : Optional[List[int]] = None , _lowerCAmelCase : bool = False):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCAmelCase , token_ids_a=_lowerCAmelCase , already_has_special_tokens=_lowerCAmelCase)
if token_ids_a is not None:
return ([0] * len(_lowerCAmelCase)) + [1] + ([0] * len(_lowerCAmelCase)) + [1, 1]
return ([0] * len(_lowerCAmelCase)) + [1, 1]
def __lowerCamelCase ( self : Tuple , _lowerCAmelCase : List[int] , _lowerCAmelCase : Optional[List[int]] = None):
'''simple docstring'''
__lowercase =[self.sep_token_id]
__lowercase =[2]
if token_ids_a is None:
return len(token_ids_a + sep) * [0] + cls_segment_id
return len(token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1] + cls_segment_id
def __lowerCamelCase ( self : List[Any] , _lowerCAmelCase : str , _lowerCAmelCase : Optional[str] = None):
'''simple docstring'''
if not os.path.isdir(_lowerCAmelCase):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""")
return
__lowercase =os.path.join(
_lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(_lowerCAmelCase) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , _lowerCAmelCase)
elif not os.path.isfile(self.vocab_file):
with open(_lowerCAmelCase , 'wb') as fi:
__lowercase =self.sp_model.serialized_model_proto()
fi.write(_lowerCAmelCase)
return (out_vocab_file,)
| 48
| 1
|
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
lowerCamelCase__ : List[Any] = get_tests_dir() + "/test_data/fsmt/fsmt_val_data.json"
with io.open(filename, 'r', encoding='utf-8') as f:
lowerCamelCase__ : str = json.load(f)
@require_torch
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self : Any , _lowerCAmelCase : Dict ):
return FSMTTokenizer.from_pretrained(_lowerCAmelCase )
def lowerCAmelCase_ ( self : List[Any] , _lowerCAmelCase : Optional[Any] ):
SCREAMING_SNAKE_CASE_ = FSMTForConditionalGeneration.from_pretrained(_lowerCAmelCase ).to(_lowerCAmelCase )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
['en-ru', 26.0],
['ru-en', 22.0],
['en-de', 22.0],
['de-en', 29.0],
] )
@slow
def lowerCAmelCase_ ( self : Any , _lowerCAmelCase : List[Any] , _lowerCAmelCase : int ):
# note: this test is not testing the best performance since it only evals a small batch
# but it should be enough to detect a regression in the output quality
SCREAMING_SNAKE_CASE_ = F"facebook/wmt19-{pair}"
SCREAMING_SNAKE_CASE_ = self.get_tokenizer(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = self.get_model(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = bleu_data[pair]['src']
SCREAMING_SNAKE_CASE_ = bleu_data[pair]['tgt']
SCREAMING_SNAKE_CASE_ = tokenizer(_lowerCAmelCase , return_tensors='pt' , truncation=_lowerCAmelCase , padding='longest' ).to(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
SCREAMING_SNAKE_CASE_ = tokenizer.batch_decode(
_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase , clean_up_tokenization_spaces=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = calculate_bleu(_lowerCAmelCase , _lowerCAmelCase )
print(_lowerCAmelCase )
self.assertGreaterEqual(scores['bleu'] , _lowerCAmelCase )
| 225
|
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
A : Tuple = datasets.utils.logging.get_logger(__name__)
@dataclass
class lowerCamelCase (datasets.BuilderConfig ):
"""simple docstring"""
lowerCamelCase__ = None
lowerCamelCase__ = "utf-8"
lowerCamelCase__ = None
lowerCamelCase__ = None
lowerCamelCase__ = True # deprecated
lowerCamelCase__ = None # deprecated
lowerCamelCase__ = 1_0 << 2_0 # 10MB
lowerCamelCase__ = None
class lowerCamelCase (datasets.ArrowBasedBuilder ):
"""simple docstring"""
lowerCamelCase__ = JsonConfig
def __A ( self : Optional[int] ) -> Optional[int]:
if self.config.block_size is not None:
logger.warning("The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead" )
SCREAMING_SNAKE_CASE_ = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
"The JSON loader parameter `use_threads` is deprecated and doesn't have any effect anymore." )
if self.config.newlines_in_values is not None:
raise ValueError("The JSON loader parameter `newlines_in_values` is no longer supported" )
return datasets.DatasetInfo(features=self.config.features )
def __A ( self : List[str] , __magic_name__ : str ) -> Tuple:
if not self.config.data_files:
raise ValueError(F'''At least one data file must be specified, but got data_files={self.config.data_files}''' )
SCREAMING_SNAKE_CASE_ = dl_manager.download_and_extract(self.config.data_files )
if isinstance(__magic_name__ , (str, list, tuple) ):
SCREAMING_SNAKE_CASE_ = data_files
if isinstance(__magic_name__ , __magic_name__ ):
SCREAMING_SNAKE_CASE_ = [files]
SCREAMING_SNAKE_CASE_ = [dl_manager.iter_files(__magic_name__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )]
SCREAMING_SNAKE_CASE_ = []
for split_name, files in data_files.items():
if isinstance(__magic_name__ , __magic_name__ ):
SCREAMING_SNAKE_CASE_ = [files]
SCREAMING_SNAKE_CASE_ = [dl_manager.iter_files(__magic_name__ ) for file in files]
splits.append(datasets.SplitGenerator(name=__magic_name__ , gen_kwargs={"files": files} ) )
return splits
def __A ( self : str , __magic_name__ : pa.Table ) -> pa.Table:
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features ) - set(pa_table.column_names ):
SCREAMING_SNAKE_CASE_ = self.config.features.arrow_schema.field(__magic_name__ ).type
SCREAMING_SNAKE_CASE_ = pa_table.append_column(__magic_name__ , pa.array([None] * len(__magic_name__ ) , type=__magic_name__ ) )
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
SCREAMING_SNAKE_CASE_ = table_cast(__magic_name__ , self.config.features.arrow_schema )
return pa_table
def __A ( self : List[str] , __magic_name__ : List[str] ) -> int:
for file_idx, file in enumerate(itertools.chain.from_iterable(__magic_name__ ) ):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(__magic_name__ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
SCREAMING_SNAKE_CASE_ = json.load(__magic_name__ )
# We keep only the field we are interested in
SCREAMING_SNAKE_CASE_ = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(__magic_name__ , (list, tuple) ):
SCREAMING_SNAKE_CASE_ = set().union(*[row.keys() for row in dataset] )
SCREAMING_SNAKE_CASE_ = {col: [row.get(__magic_name__ ) for row in dataset] for col in keys}
else:
SCREAMING_SNAKE_CASE_ = dataset
SCREAMING_SNAKE_CASE_ = pa.Table.from_pydict(__magic_name__ )
yield file_idx, self._cast_table(__magic_name__ )
# If the file has one json object per line
else:
with open(__magic_name__ , "rb" ) as f:
SCREAMING_SNAKE_CASE_ = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
SCREAMING_SNAKE_CASE_ = max(self.config.chunksize // 32 , 16 << 10 )
SCREAMING_SNAKE_CASE_ = (
self.config.encoding_errors if self.config.encoding_errors is not None else "strict"
)
while True:
SCREAMING_SNAKE_CASE_ = f.read(self.config.chunksize )
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(__magic_name__ )
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
SCREAMING_SNAKE_CASE_ = batch.decode(self.config.encoding , errors=__magic_name__ ).encode("utf-8" )
try:
while True:
try:
SCREAMING_SNAKE_CASE_ = paj.read_json(
io.BytesIO(__magic_name__ ) , read_options=paj.ReadOptions(block_size=__magic_name__ ) )
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(__magic_name__ , pa.ArrowInvalid )
and "straddling" not in str(__magic_name__ )
or block_size > len(__magic_name__ )
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
F'''Batch of {len(__magic_name__ )} bytes couldn\'t be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.''' )
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
__magic_name__ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
SCREAMING_SNAKE_CASE_ = json.load(__magic_name__ )
except json.JSONDecodeError:
logger.error(F'''Failed to read file \'{file}\' with error {type(__magic_name__ )}: {e}''' )
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(__magic_name__ , __magic_name__ ): # list is the only sequence type supported in JSON
try:
SCREAMING_SNAKE_CASE_ = set().union(*[row.keys() for row in dataset] )
SCREAMING_SNAKE_CASE_ = {col: [row.get(__magic_name__ ) for row in dataset] for col in keys}
SCREAMING_SNAKE_CASE_ = pa.Table.from_pydict(__magic_name__ )
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(F'''Failed to read file \'{file}\' with error {type(__magic_name__ )}: {e}''' )
raise ValueError(F'''Not able to read records in the JSON file at {file}.''' ) from None
yield file_idx, self._cast_table(__magic_name__ )
break
else:
logger.error(F'''Failed to read file \'{file}\' with error {type(__magic_name__ )}: {e}''' )
raise ValueError(
F'''Not able to read records in the JSON file at {file}. '''
F'''You should probably indicate the field of the JSON file containing your records. '''
F'''This JSON file contain the following fields: {str(list(dataset.keys() ) )}. '''
F'''Select the correct one and provide it as `field=\'XXX\'` to the dataset loading method. ''' ) from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(__magic_name__ )
batch_idx += 1
| 118
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
'microsoft/focalnet-tiny': 'https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json',
}
class lowerCAmelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase : List[Any] = "focalnet"
def __init__( self : Any , lowerCamelCase__ : Any=2_24 , lowerCamelCase__ : Optional[Any]=4 , lowerCamelCase__ : Optional[Any]=3 , lowerCamelCase__ : Any=96 , lowerCamelCase__ : List[Any]=False , lowerCamelCase__ : List[Any]=[1_92, 3_84, 7_68, 7_68] , lowerCamelCase__ : Optional[Any]=[2, 2, 6, 2] , lowerCamelCase__ : Dict=[2, 2, 2, 2] , lowerCamelCase__ : Dict=[3, 3, 3, 3] , lowerCamelCase__ : Optional[int]="gelu" , lowerCamelCase__ : List[Any]=4.0 , lowerCamelCase__ : str=0.0 , lowerCamelCase__ : List[str]=0.1 , lowerCamelCase__ : List[str]=False , lowerCamelCase__ : Optional[Any]=1E-4 , lowerCamelCase__ : Union[str, Any]=False , lowerCamelCase__ : Tuple=False , lowerCamelCase__ : Tuple=False , lowerCamelCase__ : Optional[int]=0.0_2 , lowerCamelCase__ : Optional[Any]=1E-5 , lowerCamelCase__ : List[Any]=32 , lowerCamelCase__ : List[Any]=None , lowerCamelCase__ : Union[str, Any]=None , **lowerCamelCase__ : Optional[Any] , ) ->Optional[Any]:
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
_UpperCAmelCase : str = image_size
_UpperCAmelCase : Optional[int] = patch_size
_UpperCAmelCase : Dict = num_channels
_UpperCAmelCase : List[Any] = embed_dim
_UpperCAmelCase : List[Any] = use_conv_embed
_UpperCAmelCase : Optional[int] = hidden_sizes
_UpperCAmelCase : List[str] = depths
_UpperCAmelCase : int = focal_levels
_UpperCAmelCase : Union[str, Any] = focal_windows
_UpperCAmelCase : Union[str, Any] = hidden_act
_UpperCAmelCase : Any = mlp_ratio
_UpperCAmelCase : List[str] = hidden_dropout_prob
_UpperCAmelCase : List[Any] = drop_path_rate
_UpperCAmelCase : Tuple = use_layerscale
_UpperCAmelCase : List[str] = layerscale_value
_UpperCAmelCase : int = use_post_layernorm
_UpperCAmelCase : Union[str, Any] = use_post_layernorm_in_modulation
_UpperCAmelCase : int = normalize_modulator
_UpperCAmelCase : Union[str, Any] = initializer_range
_UpperCAmelCase : int = layer_norm_eps
_UpperCAmelCase : List[str] = encoder_stride
_UpperCAmelCase : Dict = ['stem'] + [F"""stage{idx}""" for idx in range(1 , len(self.depths ) + 1 )]
_UpperCAmelCase : Dict = get_aligned_output_features_output_indices(
out_features=lowerCamelCase__ , out_indices=lowerCamelCase__ , stage_names=self.stage_names )
| 359
|
'''simple docstring'''
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def __lowerCAmelCase (__lowerCAmelCase ):
if is_torch_version("<" , "2.0.0" ) or not hasattr(__lowerCAmelCase , "_dynamo" ):
return False
return isinstance(__lowerCAmelCase , torch._dynamo.eval_frame.OptimizedModule )
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase = True ):
_UpperCAmelCase : Any = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
_UpperCAmelCase : Dict = is_compiled_module(__lowerCAmelCase )
if is_compiled:
_UpperCAmelCase : Optional[int] = model
_UpperCAmelCase : Any = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(__lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : Any = model.module
if not keep_fpaa_wrapper:
_UpperCAmelCase : List[Any] = getattr(__lowerCAmelCase , "forward" )
_UpperCAmelCase : Dict = model.__dict__.pop("_original_forward" , __lowerCAmelCase )
if original_forward is not None:
while hasattr(__lowerCAmelCase , "__wrapped__" ):
_UpperCAmelCase : Optional[int] = forward.__wrapped__
if forward == original_forward:
break
_UpperCAmelCase : Dict = forward
if getattr(__lowerCAmelCase , "_converted_to_transformer_engine" , __lowerCAmelCase ):
convert_model(__lowerCAmelCase , to_transformer_engine=__lowerCAmelCase )
if is_compiled:
_UpperCAmelCase : int = model
_UpperCAmelCase : str = compiled_model
return model
def __lowerCAmelCase ():
PartialState().wait_for_everyone()
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
if PartialState().distributed_type == DistributedType.TPU:
xm.save(__lowerCAmelCase , __lowerCAmelCase )
elif PartialState().local_process_index == 0:
torch.save(__lowerCAmelCase , __lowerCAmelCase )
@contextmanager
def __lowerCAmelCase (**__lowerCAmelCase ):
for key, value in kwargs.items():
_UpperCAmelCase : str = str(__lowerCAmelCase )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def __lowerCAmelCase (__lowerCAmelCase ):
if not hasattr(__lowerCAmelCase , "__qualname__" ) and not hasattr(__lowerCAmelCase , "__name__" ):
_UpperCAmelCase : List[str] = getattr(__lowerCAmelCase , "__class__" , __lowerCAmelCase )
if hasattr(__lowerCAmelCase , "__qualname__" ):
return obj.__qualname__
if hasattr(__lowerCAmelCase , "__name__" ):
return obj.__name__
return str(__lowerCAmelCase )
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
for key, value in source.items():
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : Any = destination.setdefault(__lowerCAmelCase , {} )
merge_dicts(__lowerCAmelCase , __lowerCAmelCase )
else:
_UpperCAmelCase : Optional[int] = value
return destination
def __lowerCAmelCase (__lowerCAmelCase = None ):
if port is None:
_UpperCAmelCase : Tuple = 29_500
with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s:
return s.connect_ex(("localhost", port) ) == 0
| 322
| 0
|
"""simple docstring"""
import inspect
import unittest
from transformers import ViTHybridConfig
from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel
from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class __snake_case :
def __init__( self : Dict , __lowerCAmelCase : List[str] , __lowerCAmelCase : Any=1_3 , __lowerCAmelCase : Dict=6_4 , __lowerCAmelCase : Tuple=2 , __lowerCAmelCase : int=3 , __lowerCAmelCase : str=True , __lowerCAmelCase : Dict=True , __lowerCAmelCase : Union[str, Any]=3_2 , __lowerCAmelCase : Dict=5 , __lowerCAmelCase : List[Any]=4 , __lowerCAmelCase : Tuple=3_7 , __lowerCAmelCase : Tuple="gelu" , __lowerCAmelCase : Tuple=0.1 , __lowerCAmelCase : Optional[Any]=0.1 , __lowerCAmelCase : Optional[int]=1_0 , __lowerCAmelCase : List[Any]=0.02 , __lowerCAmelCase : Union[str, Any]=[1, 1_6, 4, 4] , __lowerCAmelCase : List[Any]=None , ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = parent
_lowerCamelCase : Optional[Any] = batch_size
_lowerCamelCase : Dict = image_size
_lowerCamelCase : List[str] = patch_size
_lowerCamelCase : Any = num_channels
_lowerCamelCase : Union[str, Any] = is_training
_lowerCamelCase : Optional[int] = use_labels
_lowerCamelCase : Optional[Any] = hidden_size
_lowerCamelCase : List[Any] = num_hidden_layers
_lowerCamelCase : List[Any] = num_attention_heads
_lowerCamelCase : Optional[Any] = intermediate_size
_lowerCamelCase : Any = hidden_act
_lowerCamelCase : int = hidden_dropout_prob
_lowerCamelCase : str = attention_probs_dropout_prob
_lowerCamelCase : Tuple = type_sequence_label_size
_lowerCamelCase : int = initializer_range
_lowerCamelCase : Any = scope
_lowerCamelCase : Tuple = backbone_featmap_shape
# in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
# the number of patches is based on the feature map of the backbone, which by default uses an output stride
# of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size
_lowerCamelCase : Optional[int] = (self.image_size // 3_2) ** 2
_lowerCamelCase : Dict = num_patches + 1
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
_lowerCamelCase : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCamelCase : Union[str, Any] = None
if self.use_labels:
_lowerCamelCase : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCamelCase : List[Any] = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : List[Any] = {
'''global_padding''': '''same''',
'''layer_type''': '''bottleneck''',
'''depths''': [3, 4, 9],
'''out_features''': ['''stage1''', '''stage2''', '''stage3'''],
'''embedding_dynamic_padding''': True,
'''hidden_sizes''': [4, 8, 1_6, 3_2],
'''num_groups''': 2,
}
return ViTHybridConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__lowerCAmelCase , initializer_range=self.initializer_range , backbone_featmap_shape=self.backbone_featmap_shape , backbone_config=__lowerCAmelCase , )
def SCREAMING_SNAKE_CASE ( self : Dict , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : str , __lowerCAmelCase : Dict ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = ViTHybridModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Optional[int] = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Tuple ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = self.type_sequence_label_size
_lowerCamelCase : List[str] = ViTHybridForImageClassification(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : str = model(__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
_lowerCamelCase : Tuple = self.prepare_config_and_inputs()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Tuple = config_and_inputs
_lowerCamelCase : List[str] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __snake_case ( _lowercase , _lowercase , unittest.TestCase):
snake_case__ : Dict = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else ()
snake_case__ : Union[str, Any] = (
{"feature-extraction": ViTHybridModel, "image-classification": ViTHybridForImageClassification}
if is_torch_available()
else {}
)
snake_case__ : List[str] = False
snake_case__ : List[str] = False
snake_case__ : Dict = False
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
_lowerCamelCase : int = ViTHybridModelTester(self )
_lowerCamelCase : List[str] = ConfigTester(self , config_class=__lowerCAmelCase , has_text_modality=__lowerCAmelCase , hidden_size=3_7 )
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViT does not use inputs_embeds''' )
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : str = model_class(__lowerCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_lowerCamelCase : List[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowerCAmelCase , nn.Linear ) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : Optional[int] = model_class(__lowerCAmelCase )
_lowerCamelCase : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCamelCase : Optional[Any] = [*signature.parameters.keys()]
_lowerCamelCase : List[str] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
_lowerCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase : Dict = _config_zero_init(__lowerCAmelCase )
for model_class in self.all_model_classes:
_lowerCamelCase : Optional[Any] = model_class(config=__lowerCAmelCase )
# Skip the check for the backbone
for name, module in model.named_modules():
if module.__class__.__name__ == "ViTHybridPatchEmbeddings":
_lowerCamelCase : List[str] = [f'''{name}.{key}''' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@slow
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : Union[str, Any] = ViTHybridModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : Dict = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class __snake_case ( unittest.TestCase):
@cached_property
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
return (
ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : List[str] = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
__lowerCAmelCase )
_lowerCamelCase : Dict = self.default_image_processor
_lowerCamelCase : Optional[Any] = prepare_img()
_lowerCamelCase : Dict = image_processor(images=__lowerCAmelCase , return_tensors='''pt''' ).to(__lowerCAmelCase )
# forward pass
with torch.no_grad():
_lowerCamelCase : str = model(**__lowerCAmelCase )
# verify the logits
_lowerCamelCase : Union[str, Any] = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , __lowerCAmelCase )
_lowerCamelCase : str = torch.tensor([-1.90_90, -0.49_93, -0.23_89] ).to(__lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCAmelCase , atol=1E-4 ) )
@slow
@require_accelerate
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
_lowerCamelCase : str = ViTHybridImageProcessor.from_pretrained('''google/vit-hybrid-base-bit-384''' )
_lowerCamelCase : Any = ViTHybridForImageClassification.from_pretrained('''google/vit-hybrid-base-bit-384''' , device_map='''auto''' )
_lowerCamelCase : Any = prepare_img()
_lowerCamelCase : Dict = image_processor(images=__lowerCAmelCase , return_tensors='''pt''' )
_lowerCamelCase : List[Any] = model(**__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = outputs.logits
# model predicts one of the 1000 ImageNet classes
_lowerCamelCase : List[Any] = logits.argmax(-1 ).item()
self.assertTrue(model.config.idalabel[predicted_class_idx] , '''tabby, tabby cat''' )
| 72
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_UpperCAmelCase : Union[str, Any] = {
"configuration_instructblip": [
"INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"InstructBlipConfig",
"InstructBlipQFormerConfig",
"InstructBlipVisionConfig",
],
"processing_instructblip": ["InstructBlipProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : List[Any] = [
"INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"InstructBlipQFormerModel",
"InstructBlipPreTrainedModel",
"InstructBlipForConditionalGeneration",
"InstructBlipVisionModel",
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
_UpperCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 222
| 0
|
'''simple docstring'''
import unittest
from transformers import SPIECE_UNDERLINE
from transformers.models.speechta import SpeechTaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.tokenization_utils import AddedToken
from ...test_tokenization_common import TokenizerTesterMixin
snake_case_ : str = get_tests_dir('fixtures/test_sentencepiece_bpe_char.model')
@require_sentencepiece
@require_tokenizers
class lowercase__ ( lowercase , unittest.TestCase ):
lowercase__ = SpeechTaTokenizer
lowercase__ = False
lowercase__ = True
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
_UpperCamelCase : Any = SpeechTaTokenizer(lowerCamelCase__ )
_UpperCamelCase : List[Any] = AddedToken('<mask>' ,lstrip=lowerCamelCase__ ,rstrip=lowerCamelCase__ )
_UpperCamelCase : Optional[Any] = mask_token
tokenizer.add_special_tokens({'mask_token': mask_token} )
tokenizer.add_tokens(['<ctc_blank>'] )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase_ ( self : Union[str, Any] ,lowerCamelCase__ : Tuple ):
'''simple docstring'''
_UpperCamelCase : Optional[int] = 'this is a test'
_UpperCamelCase : Dict = 'this is a test'
return input_text, output_text
def UpperCamelCase_ ( self : Optional[Any] ,lowerCamelCase__ : Optional[int] ,lowerCamelCase__ : Union[str, Any]=False ,lowerCamelCase__ : Tuple=20 ,lowerCamelCase__ : List[Any]=5 ):
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase : List[str] = self.get_input_output_texts(lowerCamelCase__ )
_UpperCamelCase : List[Any] = tokenizer.encode(lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ )
_UpperCamelCase : Optional[int] = tokenizer.decode(lowerCamelCase__ ,clean_up_tokenization_spaces=lowerCamelCase__ )
return text, ids
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
_UpperCamelCase : Tuple = '<pad>'
_UpperCamelCase : Optional[int] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase__ ) ,lowerCamelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase__ ) ,lowerCamelCase__ )
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
_UpperCamelCase : List[str] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,'<s>' )
self.assertEqual(vocab_keys[1] ,'<pad>' )
self.assertEqual(vocab_keys[-4] ,'œ' )
self.assertEqual(vocab_keys[-2] ,'<mask>' )
self.assertEqual(vocab_keys[-1] ,'<ctc_blank>' )
self.assertEqual(len(lowerCamelCase__ ) ,81 )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size ,79 )
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
_UpperCamelCase : Optional[int] = self.get_tokenizers(do_lower_case=lowerCamelCase__ )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
_UpperCamelCase : Union[str, Any] = tokenizer.vocab_size
_UpperCamelCase : Optional[Any] = len(lowerCamelCase__ )
self.assertNotEqual(lowerCamelCase__ ,0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
_UpperCamelCase : Union[str, Any] = ['aaaaa bbbbbb', 'cccccccccdddddddd']
_UpperCamelCase : Dict = tokenizer.add_tokens(lowerCamelCase__ )
_UpperCamelCase : Union[str, Any] = tokenizer.vocab_size
_UpperCamelCase : List[Any] = len(lowerCamelCase__ )
self.assertNotEqual(lowerCamelCase__ ,0 )
self.assertEqual(lowerCamelCase__ ,lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ ,len(lowerCamelCase__ ) )
self.assertEqual(lowerCamelCase__ ,all_size + len(lowerCamelCase__ ) )
_UpperCamelCase : Optional[Any] = tokenizer.encode('aaaaa bbbbbb low cccccccccdddddddd l' ,add_special_tokens=lowerCamelCase__ )
self.assertGreaterEqual(len(lowerCamelCase__ ) ,4 )
self.assertGreater(tokens[0] ,tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] ,tokenizer.vocab_size - 1 )
_UpperCamelCase : Optional[int] = {'eos_token': '>>>>|||<||<<|<<', 'pad_token': '<<<<<|||>|>>>>|>'}
_UpperCamelCase : Any = tokenizer.add_special_tokens(lowerCamelCase__ )
_UpperCamelCase : List[Any] = tokenizer.vocab_size
_UpperCamelCase : Any = len(lowerCamelCase__ )
self.assertNotEqual(lowerCamelCase__ ,0 )
self.assertEqual(lowerCamelCase__ ,lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ ,len(lowerCamelCase__ ) )
self.assertEqual(lowerCamelCase__ ,all_size_a + len(lowerCamelCase__ ) )
_UpperCamelCase : str = tokenizer.encode(
'>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l' ,add_special_tokens=lowerCamelCase__ )
self.assertGreaterEqual(len(lowerCamelCase__ ) ,6 )
self.assertGreater(tokens[0] ,tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] ,tokens[1] )
self.assertGreater(tokens[-3] ,tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] ,tokens[-4] )
self.assertEqual(tokens[0] ,tokenizer.eos_token_id )
self.assertEqual(tokens[-3] ,tokenizer.pad_token_id )
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
_UpperCamelCase : List[Any] = self.get_tokenizer()
_UpperCamelCase : Tuple = tokenizer.tokenize('This is a test' )
# fmt: off
self.assertListEqual(lowerCamelCase__ ,[SPIECE_UNDERLINE, 'T', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'a', SPIECE_UNDERLINE, 't', 'e', 's', 't'] )
# fmt: on
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) ,[4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] ,)
_UpperCamelCase : Optional[int] = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
lowerCamelCase__ ,[SPIECE_UNDERLINE, 'I', SPIECE_UNDERLINE, 'w', 'a', 's', SPIECE_UNDERLINE, 'b', 'o', 'r', 'n', SPIECE_UNDERLINE, 'i', 'n', SPIECE_UNDERLINE, '92000', ',', SPIECE_UNDERLINE, 'a', 'n', 'd', SPIECE_UNDERLINE, 't', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'f', 'a', 'l', 's', 'é', '.'] )
_UpperCamelCase : Optional[Any] = tokenizer.convert_tokens_to_ids(lowerCamelCase__ )
# fmt: off
self.assertListEqual(lowerCamelCase__ ,[4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26] )
# fmt: on
_UpperCamelCase : int = tokenizer.convert_ids_to_tokens(lowerCamelCase__ )
self.assertListEqual(
lowerCamelCase__ ,[SPIECE_UNDERLINE, 'I', SPIECE_UNDERLINE, 'w', 'a', 's', SPIECE_UNDERLINE, 'b', 'o', 'r', 'n', SPIECE_UNDERLINE, 'i', 'n', SPIECE_UNDERLINE, '<unk>', ',', SPIECE_UNDERLINE, 'a', 'n', 'd', SPIECE_UNDERLINE, 't', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'f', 'a', 'l', 's', 'é', '.'] )
@slow
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
# Use custom sequence because this tokenizer does not handle numbers.
_UpperCamelCase : str = [
'Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides '
'general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural '
'Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained '
'models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.',
'BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly '
'conditioning on both left and right context in all layers.',
'The quick brown fox jumps over the lazy dog.',
]
# fmt: off
_UpperCamelCase : str = {
'input_ids': [
[4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2],
[4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
],
'attention_mask': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase__ ,model_name='microsoft/speecht5_asr' ,revision='c5ef64c71905caeccde0e4462ef3f9077224c524' ,sequences=lowerCamelCase__ ,)
| 236
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
snake_case_ : Any = {
'configuration_mvp': ['MVP_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MvpConfig', 'MvpOnnxConfig'],
'tokenization_mvp': ['MvpTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Dict = ['MvpTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : List[Any] = [
'MVP_PRETRAINED_MODEL_ARCHIVE_LIST',
'MvpForCausalLM',
'MvpForConditionalGeneration',
'MvpForQuestionAnswering',
'MvpForSequenceClassification',
'MvpModel',
'MvpPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig
from .tokenization_mvp import MvpTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mvp_fast import MvpTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mvp import (
MVP_PRETRAINED_MODEL_ARCHIVE_LIST,
MvpForCausalLM,
MvpForConditionalGeneration,
MvpForQuestionAnswering,
MvpForSequenceClassification,
MvpModel,
MvpPreTrainedModel,
)
else:
import sys
snake_case_ : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 236
| 1
|
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import YolosImageProcessor
class lowerCamelCase__ ( unittest.TestCase):
'''simple docstring'''
def __init__( self :int , a :Optional[Any] , a :List[Any]=7 , a :int=3 , a :Union[str, Any]=3_0 , a :Tuple=4_0_0 , a :int=True , a :Union[str, Any]=None , a :Dict=True , a :Optional[int]=[0.5, 0.5, 0.5] , a :int=[0.5, 0.5, 0.5] , a :List[str]=True , a :Optional[int]=1 / 2_5_5 , a :Dict=True , ) -> Any:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
__UpperCamelCase : str = size if size is not None else {"shortest_edge": 1_8, "longest_edge": 1_3_3_3}
__UpperCamelCase : List[str] = parent
__UpperCamelCase : Optional[Any] = batch_size
__UpperCamelCase : Any = num_channels
__UpperCamelCase : int = min_resolution
__UpperCamelCase : List[Any] = max_resolution
__UpperCamelCase : Optional[int] = do_resize
__UpperCamelCase : Tuple = size
__UpperCamelCase : int = do_normalize
__UpperCamelCase : Tuple = image_mean
__UpperCamelCase : List[str] = image_std
__UpperCamelCase : Dict = do_rescale
__UpperCamelCase : Optional[int] = rescale_factor
__UpperCamelCase : Any = do_pad
def _lowerCamelCase ( self :int ) -> Tuple:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def _lowerCamelCase ( self :int , a :Tuple , a :str=False ) -> str:
if not batched:
__UpperCamelCase : Optional[int] = image_inputs[0]
if isinstance(__snake_case , Image.Image ):
__UpperCamelCase , __UpperCamelCase : List[str] = image.size
else:
__UpperCamelCase , __UpperCamelCase : Optional[Any] = image.shape[1], image.shape[2]
if w < h:
__UpperCamelCase : int = int(self.size["shortest_edge"] * h / w )
__UpperCamelCase : Optional[int] = self.size["shortest_edge"]
elif w > h:
__UpperCamelCase : Union[str, Any] = self.size["shortest_edge"]
__UpperCamelCase : str = int(self.size["shortest_edge"] * w / h )
else:
__UpperCamelCase : List[Any] = self.size["shortest_edge"]
__UpperCamelCase : List[Any] = self.size["shortest_edge"]
else:
__UpperCamelCase : str = []
for image in image_inputs:
__UpperCamelCase , __UpperCamelCase : Tuple = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__UpperCamelCase : Optional[Any] = max(__snake_case , key=lambda a : item[0] )[0]
__UpperCamelCase : List[str] = max(__snake_case , key=lambda a : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowerCamelCase__ ( lowerCamelCase_ , unittest.TestCase):
'''simple docstring'''
_A = YolosImageProcessor if is_vision_available() else None
def _lowerCamelCase ( self :Dict ) -> Any:
__UpperCamelCase : Tuple = YolosImageProcessingTester(self )
@property
def _lowerCamelCase ( self :int ) -> Tuple:
return self.image_processor_tester.prepare_image_processor_dict()
def _lowerCamelCase ( self :int ) -> Optional[Any]:
__UpperCamelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__snake_case , "image_mean" ) )
self.assertTrue(hasattr(__snake_case , "image_std" ) )
self.assertTrue(hasattr(__snake_case , "do_normalize" ) )
self.assertTrue(hasattr(__snake_case , "do_resize" ) )
self.assertTrue(hasattr(__snake_case , "size" ) )
def _lowerCamelCase ( self :List[str] ) -> str:
__UpperCamelCase : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 1_8, "longest_edge": 1_3_3_3} )
self.assertEqual(image_processor.do_pad , __snake_case )
__UpperCamelCase : Union[str, Any] = self.image_processing_class.from_dict(
self.image_processor_dict , size=4_2 , max_size=8_4 , pad_and_return_pixel_mask=__snake_case )
self.assertEqual(image_processor.size , {"shortest_edge": 4_2, "longest_edge": 8_4} )
self.assertEqual(image_processor.do_pad , __snake_case )
def _lowerCamelCase ( self :str ) -> List[str]:
pass
def _lowerCamelCase ( self :Union[str, Any] ) -> Any:
# Initialize image_processing
__UpperCamelCase : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__UpperCamelCase : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case , Image.Image )
# Test not batched input
__UpperCamelCase : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
__UpperCamelCase , __UpperCamelCase : Any = self.image_processor_tester.get_expected_values(__snake_case )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__UpperCamelCase , __UpperCamelCase : List[Any] = self.image_processor_tester.get_expected_values(__snake_case , batched=__snake_case )
__UpperCamelCase : str = image_processing(__snake_case , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _lowerCamelCase ( self :Optional[int] ) -> Dict:
# Initialize image_processing
__UpperCamelCase : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__UpperCamelCase : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case , numpify=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case , np.ndarray )
# Test not batched input
__UpperCamelCase : Tuple = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
__UpperCamelCase , __UpperCamelCase : Dict = self.image_processor_tester.get_expected_values(__snake_case )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__UpperCamelCase : List[Any] = image_processing(__snake_case , return_tensors="pt" ).pixel_values
__UpperCamelCase , __UpperCamelCase : str = self.image_processor_tester.get_expected_values(__snake_case , batched=__snake_case )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _lowerCamelCase ( self :Optional[Any] ) -> int:
# Initialize image_processing
__UpperCamelCase : int = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__UpperCamelCase : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case , torchify=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case , torch.Tensor )
# Test not batched input
__UpperCamelCase : Optional[int] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
__UpperCamelCase , __UpperCamelCase : List[Any] = self.image_processor_tester.get_expected_values(__snake_case )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__UpperCamelCase : List[Any] = image_processing(__snake_case , return_tensors="pt" ).pixel_values
__UpperCamelCase , __UpperCamelCase : str = self.image_processor_tester.get_expected_values(__snake_case , batched=__snake_case )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _lowerCamelCase ( self :List[Any] ) -> Dict:
# Initialize image_processings
__UpperCamelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
__UpperCamelCase : str = self.image_processing_class(do_resize=__snake_case , do_normalize=__snake_case , do_rescale=__snake_case )
# create random PyTorch tensors
__UpperCamelCase : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case , torchify=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case , torch.Tensor )
# Test whether the method "pad" and calling the image processor return the same tensors
__UpperCamelCase : List[str] = image_processing_a.pad(__snake_case , return_tensors="pt" )
__UpperCamelCase : Tuple = image_processing_a(__snake_case , return_tensors="pt" )
self.assertTrue(
torch.allclose(encoded_images_with_method["pixel_values"] , encoded_images["pixel_values"] , atol=1E-4 ) )
@slow
def _lowerCamelCase ( self :List[Any] ) -> Union[str, Any]:
# prepare image and target
__UpperCamelCase : str = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
__UpperCamelCase : List[str] = json.loads(f.read() )
__UpperCamelCase : Optional[Any] = {"image_id": 3_9_7_6_9, "annotations": target}
# encode them
__UpperCamelCase : Optional[Any] = YolosImageProcessor.from_pretrained("hustvl/yolos-small" )
__UpperCamelCase : Dict = image_processing(images=__snake_case , annotations=__snake_case , return_tensors="pt" )
# verify pixel values
__UpperCamelCase : List[str] = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding["pixel_values"].shape , __snake_case )
__UpperCamelCase : Union[str, Any] = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __snake_case , atol=1E-4 ) )
# verify area
__UpperCamelCase : List[str] = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __snake_case ) )
# verify boxes
__UpperCamelCase : Optional[Any] = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , __snake_case )
__UpperCamelCase : Union[str, Any] = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __snake_case , atol=1E-3 ) )
# verify image_id
__UpperCamelCase : int = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __snake_case ) )
# verify is_crowd
__UpperCamelCase : List[Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __snake_case ) )
# verify class_labels
__UpperCamelCase : Optional[Any] = torch.tensor([7_5, 7_5, 6_3, 6_5, 1_7, 1_7] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __snake_case ) )
# verify orig_size
__UpperCamelCase : Optional[Any] = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __snake_case ) )
# verify size
__UpperCamelCase : Any = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __snake_case ) )
@slow
def _lowerCamelCase ( self :str ) -> Tuple:
# prepare image, target and masks_path
__UpperCamelCase : Optional[int] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
__UpperCamelCase : Optional[Any] = json.loads(f.read() )
__UpperCamelCase : List[str] = {"file_name": "000000039769.png", "image_id": 3_9_7_6_9, "segments_info": target}
__UpperCamelCase : str = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
__UpperCamelCase : Tuple = YolosImageProcessor(format="coco_panoptic" )
__UpperCamelCase : List[Any] = image_processing(images=__snake_case , annotations=__snake_case , masks_path=__snake_case , return_tensors="pt" )
# verify pixel values
__UpperCamelCase : Dict = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding["pixel_values"].shape , __snake_case )
__UpperCamelCase : Dict = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __snake_case , atol=1E-4 ) )
# verify area
__UpperCamelCase : List[str] = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __snake_case ) )
# verify boxes
__UpperCamelCase : Union[str, Any] = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , __snake_case )
__UpperCamelCase : List[Any] = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __snake_case , atol=1E-3 ) )
# verify image_id
__UpperCamelCase : Tuple = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __snake_case ) )
# verify is_crowd
__UpperCamelCase : Any = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __snake_case ) )
# verify class_labels
__UpperCamelCase : Optional[Any] = torch.tensor([1_7, 1_7, 6_3, 7_5, 7_5, 9_3] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __snake_case ) )
# verify masks
__UpperCamelCase : Optional[int] = 8_2_2_8_7_3
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , __snake_case )
# verify orig_size
__UpperCamelCase : List[Any] = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __snake_case ) )
# verify size
__UpperCamelCase : str = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __snake_case ) )
| 232
|
from argparse import ArgumentParser
from .env import EnvironmentCommand
def __lowercase ( ):
a__ = ArgumentParser('Diffusers CLI tool' , usage='diffusers-cli <command> [<args>]' )
a__ = parser.add_subparsers(help='diffusers-cli command helpers' )
# Register commands
EnvironmentCommand.register_subcommand(__lowerCAmelCase )
# Let's go
a__ = parser.parse_args()
if not hasattr(__lowerCAmelCase , 'func' ):
parser.print_help()
exit(1 )
# Run
a__ = args.func(__lowerCAmelCase )
service.run()
if __name__ == "__main__":
main()
| 240
| 0
|
import contextlib
import copy
import random
from typing import Any, Dict, Iterable, Optional, Union
import numpy as np
import torch
from .utils import deprecate, is_transformers_available
if is_transformers_available():
import transformers
def __lowerCamelCase ( lowerCamelCase__ : int ):
'''simple docstring'''
random.seed(lowerCamelCase__ )
np.random.seed(lowerCamelCase__ )
torch.manual_seed(lowerCamelCase__ )
torch.cuda.manual_seed_all(lowerCamelCase__ )
# ^^ safe to call this function even if cuda is not available
class __lowercase :
"""simple docstring"""
def __init__( self , A , A = 0.9999 , A = 0.0 , A = 0 , A = False , A = 1.0 , A = 2 / 3 , A = None , A = None , **A , ) -> List[str]:
'''simple docstring'''
if isinstance(A , torch.nn.Module ):
lowerCamelCase = (
"""Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. """
"""Please pass the parameters of the module instead."""
)
deprecate(
"""passing a `torch.nn.Module` to `ExponentialMovingAverage`""" , """1.0.0""" , A , standard_warn=A , )
lowerCamelCase = parameters.parameters()
# set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility
lowerCamelCase = True
if kwargs.get("""max_value""" , A ) is not None:
lowerCamelCase = """The `max_value` argument is deprecated. Please use `decay` instead."""
deprecate("""max_value""" , """1.0.0""" , A , standard_warn=A )
lowerCamelCase = kwargs["""max_value"""]
if kwargs.get("""min_value""" , A ) is not None:
lowerCamelCase = """The `min_value` argument is deprecated. Please use `min_decay` instead."""
deprecate("""min_value""" , """1.0.0""" , A , standard_warn=A )
lowerCamelCase = kwargs["""min_value"""]
lowerCamelCase = list(A )
lowerCamelCase = [p.clone().detach() for p in parameters]
if kwargs.get("""device""" , A ) is not None:
lowerCamelCase = """The `device` argument is deprecated. Please use `to` instead."""
deprecate("""device""" , """1.0.0""" , A , standard_warn=A )
self.to(device=kwargs["""device"""] )
lowerCamelCase = None
lowerCamelCase = decay
lowerCamelCase = min_decay
lowerCamelCase = update_after_step
lowerCamelCase = use_ema_warmup
lowerCamelCase = inv_gamma
lowerCamelCase = power
lowerCamelCase = 0
lowerCamelCase = None # set in `step()`
lowerCamelCase = model_cls
lowerCamelCase = model_config
@classmethod
def __A ( cls , A , A ) -> "EMAModel":
'''simple docstring'''
lowerCamelCase , lowerCamelCase = model_cls.load_config(A , return_unused_kwargs=A )
lowerCamelCase = model_cls.from_pretrained(A )
lowerCamelCase = cls(model.parameters() , model_cls=A , model_config=model.config )
ema_model.load_state_dict(A )
return ema_model
def __A ( self , A ) -> int:
'''simple docstring'''
if self.model_cls is None:
raise ValueError("""`save_pretrained` can only be used if `model_cls` was defined at __init__.""" )
if self.model_config is None:
raise ValueError("""`save_pretrained` can only be used if `model_config` was defined at __init__.""" )
lowerCamelCase = self.model_cls.from_config(self.model_config )
lowerCamelCase = self.state_dict()
state_dict.pop("""shadow_params""" , A )
model.register_to_config(**A )
self.copy_to(model.parameters() )
model.save_pretrained(A )
def __A ( self , A ) -> float:
'''simple docstring'''
lowerCamelCase = max(0 , optimization_step - self.update_after_step - 1 )
if step <= 0:
return 0.0
if self.use_ema_warmup:
lowerCamelCase = 1 - (1 + step / self.inv_gamma) ** -self.power
else:
lowerCamelCase = (1 + step) / (10 + step)
lowerCamelCase = min(A , self.decay )
# make sure decay is not smaller than min_decay
lowerCamelCase = max(A , self.min_decay )
return cur_decay_value
@torch.no_grad()
def __A ( self , A ) -> Optional[int]:
'''simple docstring'''
if isinstance(A , torch.nn.Module ):
lowerCamelCase = (
"""Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. """
"""Please pass the parameters of the module instead."""
)
deprecate(
"""passing a `torch.nn.Module` to `ExponentialMovingAverage.step`""" , """1.0.0""" , A , standard_warn=A , )
lowerCamelCase = parameters.parameters()
lowerCamelCase = list(A )
self.optimization_step += 1
# Compute the decay factor for the exponential moving average.
lowerCamelCase = self.get_decay(self.optimization_step )
lowerCamelCase = decay
lowerCamelCase = 1 - decay
lowerCamelCase = contextlib.nullcontext
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
import deepspeed
for s_param, param in zip(self.shadow_params , A ):
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
lowerCamelCase = deepspeed.zero.GatheredParameters(A , modifier_rank=A )
with context_manager():
if param.requires_grad:
s_param.sub_(one_minus_decay * (s_param - param) )
else:
s_param.copy_(A )
def __A ( self , A ) -> None:
'''simple docstring'''
lowerCamelCase = list(A )
for s_param, param in zip(self.shadow_params , A ):
param.data.copy_(s_param.to(param.device ).data )
def __A ( self , A=None , A=None ) -> None:
'''simple docstring'''
lowerCamelCase = [
p.to(device=A , dtype=A ) if p.is_floating_point() else p.to(device=A )
for p in self.shadow_params
]
def __A ( self ) -> dict:
'''simple docstring'''
return {
"decay": self.decay,
"min_decay": self.min_decay,
"optimization_step": self.optimization_step,
"update_after_step": self.update_after_step,
"use_ema_warmup": self.use_ema_warmup,
"inv_gamma": self.inv_gamma,
"power": self.power,
"shadow_params": self.shadow_params,
}
def __A ( self , A ) -> None:
'''simple docstring'''
lowerCamelCase = [param.detach().cpu().clone() for param in parameters]
def __A ( self , A ) -> None:
'''simple docstring'''
if self.temp_stored_params is None:
raise RuntimeError("""This ExponentialMovingAverage has no `store()`ed weights """ """to `restore()`""" )
for c_param, param in zip(self.temp_stored_params , A ):
param.data.copy_(c_param.data )
# Better memory-wise.
lowerCamelCase = None
def __A ( self , A ) -> None:
'''simple docstring'''
lowerCamelCase = copy.deepcopy(A )
lowerCamelCase = state_dict.get("""decay""" , self.decay )
if self.decay < 0.0 or self.decay > 1.0:
raise ValueError("""Decay must be between 0 and 1""" )
lowerCamelCase = state_dict.get("""min_decay""" , self.min_decay )
if not isinstance(self.min_decay , A ):
raise ValueError("""Invalid min_decay""" )
lowerCamelCase = state_dict.get("""optimization_step""" , self.optimization_step )
if not isinstance(self.optimization_step , A ):
raise ValueError("""Invalid optimization_step""" )
lowerCamelCase = state_dict.get("""update_after_step""" , self.update_after_step )
if not isinstance(self.update_after_step , A ):
raise ValueError("""Invalid update_after_step""" )
lowerCamelCase = state_dict.get("""use_ema_warmup""" , self.use_ema_warmup )
if not isinstance(self.use_ema_warmup , A ):
raise ValueError("""Invalid use_ema_warmup""" )
lowerCamelCase = state_dict.get("""inv_gamma""" , self.inv_gamma )
if not isinstance(self.inv_gamma , (float, int) ):
raise ValueError("""Invalid inv_gamma""" )
lowerCamelCase = state_dict.get("""power""" , self.power )
if not isinstance(self.power , (float, int) ):
raise ValueError("""Invalid power""" )
lowerCamelCase = state_dict.get("""shadow_params""" , A )
if shadow_params is not None:
lowerCamelCase = shadow_params
if not isinstance(self.shadow_params , A ):
raise ValueError("""shadow_params must be a list""" )
if not all(isinstance(A , torch.Tensor ) for p in self.shadow_params ):
raise ValueError("""shadow_params must all be Tensors""" )
| 351
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase : Union[str, Any] = {
"configuration_mobilebert": [
"MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"MobileBertConfig",
"MobileBertOnnxConfig",
],
"tokenization_mobilebert": ["MobileBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Dict = ["MobileBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : str = [
"MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MobileBertForMaskedLM",
"MobileBertForMultipleChoice",
"MobileBertForNextSentencePrediction",
"MobileBertForPreTraining",
"MobileBertForQuestionAnswering",
"MobileBertForSequenceClassification",
"MobileBertForTokenClassification",
"MobileBertLayer",
"MobileBertModel",
"MobileBertPreTrainedModel",
"load_tf_weights_in_mobilebert",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : str = [
"TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFMobileBertForMaskedLM",
"TFMobileBertForMultipleChoice",
"TFMobileBertForNextSentencePrediction",
"TFMobileBertForPreTraining",
"TFMobileBertForQuestionAnswering",
"TFMobileBertForSequenceClassification",
"TFMobileBertForTokenClassification",
"TFMobileBertMainLayer",
"TFMobileBertModel",
"TFMobileBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mobilebert import (
MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileBertConfig,
MobileBertOnnxConfig,
)
from .tokenization_mobilebert import MobileBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mobilebert_fast import MobileBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilebert import (
MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertLayer,
MobileBertModel,
MobileBertPreTrainedModel,
load_tf_weights_in_mobilebert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilebert import (
TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertMainLayer,
TFMobileBertModel,
TFMobileBertPreTrainedModel,
)
else:
import sys
UpperCAmelCase : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 66
| 0
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
SCREAMING_SNAKE_CASE__ : Tuple = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Dict = {'vocab_file': 'vocab.txt'}
SCREAMING_SNAKE_CASE__ : Tuple = {
'vocab_file': {
'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt',
'YituTech/conv-bert-medium-small': (
'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt'
),
'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt',
}
}
SCREAMING_SNAKE_CASE__ : Dict = {
'YituTech/conv-bert-base': 512,
'YituTech/conv-bert-medium-small': 512,
'YituTech/conv-bert-small': 512,
}
SCREAMING_SNAKE_CASE__ : int = {
'YituTech/conv-bert-base': {'do_lower_case': True},
'YituTech/conv-bert-medium-small': {'do_lower_case': True},
'YituTech/conv-bert-small': {'do_lower_case': True},
}
class UpperCamelCase__ (lowerCAmelCase__ ):
'''simple docstring'''
lowerCamelCase_ : Optional[Any] = VOCAB_FILES_NAMES
lowerCamelCase_ : List[str] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase_ : Optional[int] = PRETRAINED_INIT_CONFIGURATION
lowerCamelCase_ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase_ : Dict = ConvBertTokenizer
def __init__( self , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=True , UpperCamelCase__="[UNK]" , UpperCamelCase__="[SEP]" , UpperCamelCase__="[PAD]" , UpperCamelCase__="[CLS]" , UpperCamelCase__="[MASK]" , UpperCamelCase__=True , UpperCamelCase__=None , **UpperCamelCase__ , ) -> List[str]:
super().__init__(
UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , do_lower_case=UpperCamelCase__ , unk_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , tokenize_chinese_chars=UpperCamelCase__ , strip_accents=UpperCamelCase__ , **UpperCamelCase__ , )
lowerCamelCase : int = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , UpperCamelCase__ ) != do_lower_case
or normalizer_state.get("strip_accents" , UpperCamelCase__ ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , UpperCamelCase__ ) != tokenize_chinese_chars
):
lowerCamelCase : Dict = getattr(UpperCamelCase__ , normalizer_state.pop("type" ) )
lowerCamelCase : Any = do_lower_case
lowerCamelCase : Optional[Any] = strip_accents
lowerCamelCase : Optional[int] = tokenize_chinese_chars
lowerCamelCase : Optional[int] = normalizer_class(**UpperCamelCase__ )
lowerCamelCase : int = do_lower_case
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__=None ) -> int:
lowerCamelCase : Optional[int] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> List[int]:
lowerCamelCase : Any = [self.sep_token_id]
lowerCamelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> Tuple[str]:
lowerCamelCase : Optional[Any] = self._tokenizer.model.save(UpperCamelCase__ , name=UpperCamelCase__ )
return tuple(UpperCamelCase__ )
| 48
|
from __future__ import annotations
import requests
def A ( _SCREAMING_SNAKE_CASE ) -> dict:
lowerCamelCase : Tuple = f'''https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty'''
return requests.get(_SCREAMING_SNAKE_CASE ).json()
def A ( _SCREAMING_SNAKE_CASE = 10 ) -> list[dict]:
lowerCamelCase : str = "https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty"
lowerCamelCase : Any = requests.get(_SCREAMING_SNAKE_CASE ).json()[:max_stories]
return [get_hackernews_story(_SCREAMING_SNAKE_CASE ) for story_id in story_ids]
def A ( _SCREAMING_SNAKE_CASE = 10 ) -> str:
lowerCamelCase : str = hackernews_top_stories(_SCREAMING_SNAKE_CASE )
return "\n".join("* [{title}]({url})".format(**_SCREAMING_SNAKE_CASE ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown())
| 48
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"edbeeching/decision-transformer-gym-hopper-medium": (
"https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json"
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class _UpperCAmelCase ( lowercase_ ):
'''simple docstring'''
__A = "decision_transformer"
__A = ["past_key_values"]
__A = {
"max_position_embeddings": "n_positions",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : Optional[int] , lowercase_ : Union[str, Any]=17 , lowercase_ : List[Any]=4 , lowercase_ : Tuple=128 , lowercase_ : Dict=4096 , lowercase_ : Optional[int]=True , lowercase_ : List[str]=1 , lowercase_ : List[str]=1024 , lowercase_ : Optional[Any]=3 , lowercase_ : Dict=1 , lowercase_ : Dict=None , lowercase_ : Union[str, Any]="relu" , lowercase_ : str=0.1 , lowercase_ : Union[str, Any]=0.1 , lowercase_ : Any=0.1 , lowercase_ : Union[str, Any]=1e-5 , lowercase_ : Optional[int]=0.02 , lowercase_ : Optional[Any]=True , lowercase_ : Tuple=True , lowercase_ : str=50256 , lowercase_ : List[Any]=50256 , lowercase_ : List[str]=False , lowercase_ : Tuple=False , **lowercase_ : Dict , ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = state_dim
_UpperCamelCase = act_dim
_UpperCamelCase = hidden_size
_UpperCamelCase = max_ep_len
_UpperCamelCase = action_tanh
_UpperCamelCase = vocab_size
_UpperCamelCase = n_positions
_UpperCamelCase = n_layer
_UpperCamelCase = n_head
_UpperCamelCase = n_inner
_UpperCamelCase = activation_function
_UpperCamelCase = resid_pdrop
_UpperCamelCase = embd_pdrop
_UpperCamelCase = attn_pdrop
_UpperCamelCase = layer_norm_epsilon
_UpperCamelCase = initializer_range
_UpperCamelCase = scale_attn_weights
_UpperCamelCase = use_cache
_UpperCamelCase = scale_attn_by_inverse_layer_idx
_UpperCamelCase = reorder_and_upcast_attn
_UpperCamelCase = bos_token_id
_UpperCamelCase = eos_token_id
super().__init__(bos_token_id=a__ , eos_token_id=a__ , **a__)
| 353
|
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Optional[int] , lowercase_ : List[str] , lowercase_ : Any=13 , lowercase_ : Optional[int]=7 , lowercase_ : Optional[Any]=True , lowercase_ : str=True , lowercase_ : Tuple=True , lowercase_ : List[Any]=True , lowercase_ : str=99 , lowercase_ : Any=32 , lowercase_ : Union[str, Any]=5 , lowercase_ : List[Any]=4 , lowercase_ : List[str]=37 , lowercase_ : List[Any]="gelu" , lowercase_ : Union[str, Any]=0.1 , lowercase_ : Any=0.1 , lowercase_ : Union[str, Any]=512 , lowercase_ : int=16 , lowercase_ : str=2 , lowercase_ : Tuple=0.02 , lowercase_ : Dict=4 , ) -> List[str]:
"""simple docstring"""
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = seq_length
_UpperCamelCase = is_training
_UpperCamelCase = use_attention_mask
_UpperCamelCase = use_token_type_ids
_UpperCamelCase = use_labels
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_act
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = type_vocab_size
_UpperCamelCase = type_sequence_label_size
_UpperCamelCase = initializer_range
_UpperCamelCase = num_choices
def __UpperCAmelCase ( self : List[Any]) -> str:
"""simple docstring"""
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
_UpperCamelCase = None
if self.use_attention_mask:
_UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length])
_UpperCamelCase = None
if self.use_token_type_ids:
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
_UpperCamelCase = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase_ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def __UpperCAmelCase ( self : List[str]) -> Tuple:
"""simple docstring"""
_UpperCamelCase = self.prepare_config_and_inputs()
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = config_and_inputs
_UpperCamelCase = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_flax
class _UpperCAmelCase ( lowerCAmelCase, unittest.TestCase ):
'''simple docstring'''
__A = True
__A = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def __UpperCAmelCase ( self : Dict) -> Dict:
"""simple docstring"""
_UpperCamelCase = FlaxRoFormerModelTester(self)
@slow
def __UpperCAmelCase ( self : Optional[Any]) -> str:
"""simple docstring"""
for model_class_name in self.all_model_classes:
_UpperCamelCase = model_class_name.from_pretrained("junnyu/roformer_chinese_small" , from_pt=lowercase_)
_UpperCamelCase = model(np.ones((1, 1)))
self.assertIsNotNone(lowercase_)
@require_flax
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def __UpperCAmelCase ( self : List[Any]) -> List[str]:
"""simple docstring"""
_UpperCamelCase = FlaxRoFormerForMaskedLM.from_pretrained("junnyu/roformer_chinese_base")
_UpperCamelCase = jnp.array([[0, 1, 2, 3, 4, 5]])
_UpperCamelCase = model(lowercase_)[0]
_UpperCamelCase = 50000
_UpperCamelCase = (1, 6, vocab_size)
self.assertEqual(output.shape , lowercase_)
_UpperCamelCase = jnp.array(
[[[-0.12_05, -1.02_65, 0.29_22], [-1.51_34, 0.19_74, 0.15_19], [-5.01_35, -3.90_03, -0.84_04]]])
self.assertTrue(jnp.allclose(output[:, :3, :3] , lowercase_ , atol=1e-4))
| 63
| 0
|
"""simple docstring"""
import os
import unittest
from transformers import FunnelTokenizer, FunnelTokenizerFast
from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : List[str] = FunnelTokenizer
__UpperCAmelCase : List[Any] = FunnelTokenizerFast
__UpperCAmelCase : Any = True
__UpperCAmelCase : List[str] = True
def __UpperCAmelCase ( self ):
super().setUp()
__a = [
'''<unk>''',
'''<cls>''',
'''<sep>''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
__a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def __UpperCAmelCase ( self , **_a ):
return FunnelTokenizer.from_pretrained(self.tmpdirname , **_a )
def __UpperCAmelCase ( self , **_a ):
return FunnelTokenizerFast.from_pretrained(self.tmpdirname , **_a )
def __UpperCAmelCase ( self , _a ):
__a = '''UNwant\u00E9d,running'''
__a = '''unwanted, running'''
return input_text, output_text
def __UpperCAmelCase ( self ):
__a = self.tokenizer_class(self.vocab_file )
__a = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(_a , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) , [7, 4, 5, 10, 8, 9] )
def __UpperCAmelCase ( self ):
__a = self.get_tokenizers(do_lower_case=_a )
for tokenizer in tokenizers:
__a = tokenizer('''UNwant\u00E9d,running''' )
__a = len(inputs['''input_ids'''] ) - 1
self.assertListEqual(inputs['''token_type_ids'''] , [2] + [0] * sentence_len )
__a = tokenizer('''UNwant\u00E9d,running''' , '''UNwant\u00E9d,running''' )
self.assertListEqual(inputs['''token_type_ids'''] , [2] + [0] * sentence_len + [1] * sentence_len )
| 45
|
import re
import time
from typing import Optional
import IPython.display as disp
from ..trainer_callback import TrainerCallback
from ..trainer_utils import IntervalStrategy, has_length
def _a ( SCREAMING_SNAKE_CASE : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase: Union[str, Any] = int(SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase: List[str] = t // 36_00, (t // 60) % 60, t % 60
return f'''{h}:{m:02d}:{s:02d}''' if h != 0 else f'''{m:02d}:{s:02d}'''
def _a ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : str=3_00 ) -> int:
"""simple docstring"""
return f'''
<div>
{prefix}
<progress value=\'{value}\' max=\'{total}\' style=\'width:{width}px; height:20px; vertical-align: middle;\'></progress>
{label}
</div>
'''
def _a ( SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase: List[str] = '<table border="1" class="dataframe">\n'
html_code += """ <thead>\n <tr style="text-align: left;">\n"""
for i in items[0]:
html_code += f''' <th>{i}</th>\n'''
html_code += " </tr>\n </thead>\n <tbody>\n"
for line in items[1:]:
html_code += " <tr>\n"
for elt in line:
__lowerCAmelCase: List[Any] = f'''{elt:.6f}''' if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else str(SCREAMING_SNAKE_CASE )
html_code += f''' <td>{elt}</td>\n'''
html_code += " </tr>\n"
html_code += " </tbody>\n</table><p>"
return html_code
class A_ :
_lowercase : str = 5
_lowercase : str = 0.2
def __init__( self : Union[str, Any] , UpperCAmelCase : int , UpperCAmelCase : Optional[str] = None , UpperCAmelCase : bool = True , UpperCAmelCase : Optional["NotebookTrainingTracker"] = None , UpperCAmelCase : int = 3_0_0 , ) -> List[Any]:
__lowerCAmelCase: List[str] = total
__lowerCAmelCase: Optional[int] = '' if prefix is None else prefix
__lowerCAmelCase: int = leave
__lowerCAmelCase: List[str] = parent
__lowerCAmelCase: Optional[Any] = width
__lowerCAmelCase: List[str] = None
__lowerCAmelCase: Dict = None
__lowerCAmelCase: List[str] = None
def UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase : int , UpperCAmelCase : bool = False , UpperCAmelCase : str = None ) -> Optional[int]:
__lowerCAmelCase: int = value
if comment is not None:
__lowerCAmelCase: Any = comment
if self.last_value is None:
__lowerCAmelCase: List[Any] = time.time()
__lowerCAmelCase: Any = value
__lowerCAmelCase: List[str] = None
__lowerCAmelCase: Dict = self.warmup
__lowerCAmelCase: List[str] = 1
self.update_bar(UpperCAmelCase )
elif value <= self.last_value and not force_update:
return
elif force_update or self.first_calls > 0 or value >= min(self.last_value + self.wait_for , self.total ):
if self.first_calls > 0:
self.first_calls -= 1
__lowerCAmelCase: Union[str, Any] = time.time()
__lowerCAmelCase: str = current_time - self.start_time
# We could have value = self.start_value if the update is called twixe with the same start value.
if value > self.start_value:
__lowerCAmelCase: Dict = self.elapsed_time / (value - self.start_value)
else:
__lowerCAmelCase: int = None
if value >= self.total:
__lowerCAmelCase: Any = self.total
__lowerCAmelCase: str = None
if not self.leave:
self.close()
elif self.average_time_per_item is not None:
__lowerCAmelCase: List[str] = self.average_time_per_item * (self.total - value)
self.update_bar(UpperCAmelCase )
__lowerCAmelCase: Tuple = value
__lowerCAmelCase: int = current_time
if self.average_time_per_item is None:
__lowerCAmelCase: Optional[int] = 1
else:
__lowerCAmelCase: Optional[Any] = max(int(self.update_every / self.average_time_per_item ) , 1 )
def UpperCAmelCase ( self : int , UpperCAmelCase : Any , UpperCAmelCase : List[Any]=None ) -> Union[str, Any]:
__lowerCAmelCase: int = ' ' * (len(str(self.total ) ) - len(str(UpperCAmelCase ) )) + str(UpperCAmelCase )
if self.elapsed_time is None:
__lowerCAmelCase: Dict = F'''[{spaced_value}/{self.total} : < :'''
elif self.predicted_remaining is None:
__lowerCAmelCase: str = F'''[{spaced_value}/{self.total} {format_time(self.elapsed_time )}'''
else:
__lowerCAmelCase: Any = (
F'''[{spaced_value}/{self.total} {format_time(self.elapsed_time )} <'''
F''' {format_time(self.predicted_remaining )}'''
)
self.label += F''', {1/self.average_time_per_item:.2f} it/s'''
self.label += "]" if self.comment is None or len(self.comment ) == 0 else F''', {self.comment}]'''
self.display()
def UpperCAmelCase ( self : Any ) -> Optional[Any]:
__lowerCAmelCase: Any = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.parent is not None:
# If this is a child bar, the parent will take care of the display.
self.parent.display()
return
if self.output is None:
__lowerCAmelCase: Tuple = disp.display(disp.HTML(self.html_code ) , display_id=UpperCAmelCase )
else:
self.output.update(disp.HTML(self.html_code ) )
def UpperCAmelCase ( self : str ) -> Optional[Any]:
if self.parent is None and self.output is not None:
self.output.update(disp.HTML('' ) )
class A_ ( snake_case__ ):
def __init__( self : Tuple , UpperCAmelCase : Dict , UpperCAmelCase : List[Any]=None ) -> Any:
super().__init__(UpperCAmelCase )
__lowerCAmelCase: Tuple = None if column_names is None else [column_names]
__lowerCAmelCase: Union[str, Any] = None
def UpperCAmelCase ( self : Union[str, Any] ) -> Any:
__lowerCAmelCase: str = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.inner_table is not None:
self.html_code += text_to_html_table(self.inner_table )
if self.child_bar is not None:
self.html_code += self.child_bar.html_code
if self.output is None:
__lowerCAmelCase: Optional[Any] = disp.display(disp.HTML(self.html_code ) , display_id=UpperCAmelCase )
else:
self.output.update(disp.HTML(self.html_code ) )
def UpperCAmelCase ( self : Tuple , UpperCAmelCase : List[Any] ) -> Dict:
if self.inner_table is None:
__lowerCAmelCase: List[str] = [list(values.keys() ), list(values.values() )]
else:
__lowerCAmelCase: Any = self.inner_table[0]
if len(self.inner_table ) == 1:
# We give a chance to update the column names at the first iteration
for key in values.keys():
if key not in columns:
columns.append(UpperCAmelCase )
__lowerCAmelCase: List[Any] = columns
self.inner_table.append([values[c] for c in columns] )
def UpperCAmelCase ( self : Dict , UpperCAmelCase : int , UpperCAmelCase : List[Any]=None , UpperCAmelCase : List[str]=3_0_0 ) -> List[Any]:
__lowerCAmelCase: Union[str, Any] = NotebookProgressBar(UpperCAmelCase , prefix=UpperCAmelCase , parent=self , width=UpperCAmelCase )
return self.child_bar
def UpperCAmelCase ( self : Optional[int] ) -> Optional[int]:
__lowerCAmelCase: Tuple = None
self.display()
class A_ ( snake_case__ ):
def __init__( self : Any ) -> List[str]:
__lowerCAmelCase: int = None
__lowerCAmelCase: Optional[int] = None
__lowerCAmelCase: str = False
def UpperCAmelCase ( self : Tuple , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Any , **UpperCAmelCase : Tuple ) -> str:
__lowerCAmelCase: Tuple = 'Epoch' if args.evaluation_strategy == IntervalStrategy.EPOCH else 'Step'
__lowerCAmelCase: Optional[int] = 0
__lowerCAmelCase: Any = 0
__lowerCAmelCase: Tuple = [self.first_column] + ['Training Loss']
if args.evaluation_strategy != IntervalStrategy.NO:
column_names.append('Validation Loss' )
__lowerCAmelCase: List[Any] = NotebookTrainingTracker(state.max_steps , UpperCAmelCase )
def UpperCAmelCase ( self : Optional[int] , UpperCAmelCase : Tuple , UpperCAmelCase : Any , UpperCAmelCase : Optional[Any] , **UpperCAmelCase : Union[str, Any] ) -> Any:
__lowerCAmelCase: Union[str, Any] = int(state.epoch ) if int(state.epoch ) == state.epoch else F'''{state.epoch:.2f}'''
self.training_tracker.update(
state.global_step + 1 , comment=F'''Epoch {epoch}/{state.num_train_epochs}''' , force_update=self._force_next_update , )
__lowerCAmelCase: Any = False
def UpperCAmelCase ( self : List[Any] , UpperCAmelCase : str , UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : int=None , **UpperCAmelCase : Dict ) -> List[Any]:
if not has_length(UpperCAmelCase ):
return
if self.prediction_bar is None:
if self.training_tracker is not None:
__lowerCAmelCase: int = self.training_tracker.add_child(len(UpperCAmelCase ) )
else:
__lowerCAmelCase: List[str] = NotebookProgressBar(len(UpperCAmelCase ) )
self.prediction_bar.update(1 )
else:
self.prediction_bar.update(self.prediction_bar.value + 1 )
def UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase : Dict , UpperCAmelCase : Dict , UpperCAmelCase : Optional[Any] , **UpperCAmelCase : int ) -> Union[str, Any]:
if self.prediction_bar is not None:
self.prediction_bar.close()
__lowerCAmelCase: Any = None
def UpperCAmelCase ( self : str , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : int=None , **UpperCAmelCase : Optional[Any] ) -> Optional[Any]:
# Only for when there is no evaluation
if args.evaluation_strategy == IntervalStrategy.NO and "loss" in logs:
__lowerCAmelCase: Union[str, Any] = {'Training Loss': logs['loss']}
# First column is necessarily Step sine we're not in epoch eval strategy
__lowerCAmelCase: Dict = state.global_step
self.training_tracker.write_line(UpperCAmelCase )
def UpperCAmelCase ( self : int , UpperCAmelCase : List[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Tuple=None , **UpperCAmelCase : int ) -> List[str]:
if self.training_tracker is not None:
__lowerCAmelCase: Dict = {'Training Loss': 'No log', 'Validation Loss': 'No log'}
for log in reversed(state.log_history ):
if "loss" in log:
__lowerCAmelCase: List[str] = log['loss']
break
if self.first_column == "Epoch":
__lowerCAmelCase: int = int(state.epoch )
else:
__lowerCAmelCase: Tuple = state.global_step
__lowerCAmelCase: Optional[int] = 'eval'
for k in metrics:
if k.endswith('_loss' ):
__lowerCAmelCase: Union[str, Any] = re.sub(R'\_loss$' , '' , UpperCAmelCase )
__lowerCAmelCase: Optional[Any] = metrics.pop('total_flos' , UpperCAmelCase )
__lowerCAmelCase: str = metrics.pop('epoch' , UpperCAmelCase )
__lowerCAmelCase: int = metrics.pop(F'''{metric_key_prefix}_runtime''' , UpperCAmelCase )
__lowerCAmelCase: List[Any] = metrics.pop(F'''{metric_key_prefix}_samples_per_second''' , UpperCAmelCase )
__lowerCAmelCase: List[str] = metrics.pop(F'''{metric_key_prefix}_steps_per_second''' , UpperCAmelCase )
__lowerCAmelCase: Tuple = metrics.pop(F'''{metric_key_prefix}_jit_compilation_time''' , UpperCAmelCase )
for k, v in metrics.items():
if k == F'''{metric_key_prefix}_loss''':
__lowerCAmelCase: Tuple = v
else:
__lowerCAmelCase: int = k.split('_' )
__lowerCAmelCase: List[Any] = ' '.join([part.capitalize() for part in splits[1:]] )
__lowerCAmelCase: List[Any] = v
self.training_tracker.write_line(UpperCAmelCase )
self.training_tracker.remove_child()
__lowerCAmelCase: List[str] = None
# Evaluation takes a long time so we should force the next update.
__lowerCAmelCase: str = True
def UpperCAmelCase ( self : int , UpperCAmelCase : int , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[int] , **UpperCAmelCase : Optional[int] ) -> Optional[int]:
self.training_tracker.update(
state.global_step , comment=F'''Epoch {int(state.epoch )}/{state.num_train_epochs}''' , force_update=UpperCAmelCase )
__lowerCAmelCase: Union[str, Any] = None
| 322
| 0
|
import unittest
from transformers import TrOCRConfig
from transformers.testing_utils import is_torch_available, require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM
@require_torch
class a__ :
"""simple docstring"""
def __init__(self , __lowercase , __lowercase=99 , __lowercase=13 , __lowercase=16 , __lowercase=7 , __lowercase=True , __lowercase=True , __lowercase=True , __lowercase=False , __lowercase=True , __lowercase=2 , __lowercase=32 , __lowercase=4 , __lowercase=4 , __lowercase=30 , __lowercase=0 , __lowercase=1 , __lowercase=2 , __lowercase=None , ):
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = decoder_seq_length
# For common tests
__lowerCAmelCase = self.decoder_seq_length
__lowerCAmelCase = is_training
__lowerCAmelCase = use_attention_mask
__lowerCAmelCase = use_labels
__lowerCAmelCase = vocab_size
__lowerCAmelCase = d_model
__lowerCAmelCase = d_model
__lowerCAmelCase = decoder_layers
__lowerCAmelCase = decoder_layers
__lowerCAmelCase = decoder_ffn_dim
__lowerCAmelCase = decoder_attention_heads
__lowerCAmelCase = decoder_attention_heads
__lowerCAmelCase = eos_token_id
__lowerCAmelCase = bos_token_id
__lowerCAmelCase = pad_token_id
__lowerCAmelCase = decoder_start_token_id
__lowerCAmelCase = use_cache
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = None
__lowerCAmelCase = decoder_seq_length
__lowerCAmelCase = 2
__lowerCAmelCase = 1
def _snake_case (self ):
__lowerCAmelCase = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
__lowerCAmelCase = None
if self.use_attention_mask:
__lowerCAmelCase = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 )
__lowerCAmelCase = None
if self.use_labels:
__lowerCAmelCase = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
__lowerCAmelCase = TrOCRConfig(
vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , )
return (config, input_ids, attention_mask, lm_labels)
def _snake_case (self , __lowercase , __lowercase , __lowercase , __lowercase , ):
__lowerCAmelCase = True
__lowerCAmelCase = TrOCRDecoder(config=_UpperCAmelCase ).to(_UpperCAmelCase ).eval()
__lowerCAmelCase = input_ids[:2]
input_ids[input_ids == 0] += 1
# first forward pass
__lowerCAmelCase = model(_UpperCAmelCase , use_cache=_UpperCAmelCase )
__lowerCAmelCase = model(_UpperCAmelCase )
__lowerCAmelCase = model(_UpperCAmelCase , use_cache=_UpperCAmelCase )
self.parent.assertTrue(len(_UpperCAmelCase ) == len(_UpperCAmelCase ) )
self.parent.assertTrue(len(_UpperCAmelCase ) == len(_UpperCAmelCase ) + 1 )
__lowerCAmelCase = outputs['''past_key_values''']
# create hypothetical next token and extent to next_input_ids
__lowerCAmelCase = ids_tensor((2, 1) , config.vocab_size - 1 ) + 1
# append to next input_ids and
__lowerCAmelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
__lowerCAmelCase = model(_UpperCAmelCase )['''last_hidden_state''']
__lowerCAmelCase = model(_UpperCAmelCase , past_key_values=_UpperCAmelCase )['''last_hidden_state''']
# select random slice
__lowerCAmelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__lowerCAmelCase = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
__lowerCAmelCase = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1e-3 )
def _snake_case (self ):
__lowerCAmelCase = self.prepare_config_and_inputs()
__lowerCAmelCase = config_and_inputs
__lowerCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_torch
class a__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase : List[Any] = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else ()
__UpperCamelCase : Optional[Any] = (TrOCRForCausalLM,) if is_torch_available() else ()
__UpperCamelCase : int = {"text-generation": TrOCRForCausalLM} if is_torch_available() else {}
__UpperCamelCase : int = True
__UpperCamelCase : Any = False
def _snake_case (self ):
__lowerCAmelCase = TrOCRStandaloneDecoderModelTester(self , is_training=_UpperCAmelCase )
__lowerCAmelCase = ConfigTester(self , config_class=_UpperCAmelCase )
def _snake_case (self ):
pass
def _snake_case (self ):
pass
def _snake_case (self ):
pass
def _snake_case (self ):
self.config_tester.run_common_tests()
def _snake_case (self ):
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*_UpperCAmelCase )
def _snake_case (self ):
return
@unittest.skip('''The model doesn\'t support left padding''' ) # and it's not used enough to be worth fixing :)
def _snake_case (self ):
pass
| 352
|
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class a__ ( unittest.TestCase ):
"""simple docstring"""
def _snake_case (self ):
__lowerCAmelCase = tempfile.mkdtemp()
# fmt: off
__lowerCAmelCase = ['''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>''']
# fmt: on
__lowerCAmelCase = dict(zip(__lowercase , range(len(__lowercase ) ) ) )
__lowerCAmelCase = ['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''', '''''']
__lowerCAmelCase = {'''unk_token''': '''<unk>'''}
__lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
__lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__lowercase ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__lowercase ) )
__lowerCAmelCase = {
'''do_resize''': True,
'''size''': 20,
'''do_center_crop''': True,
'''crop_size''': 18,
'''do_normalize''': True,
'''image_mean''': [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
'''image_std''': [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
}
__lowerCAmelCase = os.path.join(self.tmpdirname , __lowercase )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(__lowercase , __lowercase )
def _snake_case (self , **__lowercase ):
return CLIPTokenizer.from_pretrained(self.tmpdirname , **__lowercase )
def _snake_case (self , **__lowercase ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **__lowercase )
def _snake_case (self , **__lowercase ):
return CLIPImageProcessor.from_pretrained(self.tmpdirname , **__lowercase )
def _snake_case (self ):
shutil.rmtree(self.tmpdirname )
def _snake_case (self ):
__lowerCAmelCase = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
__lowerCAmelCase = [Image.fromarray(np.moveaxis(__lowercase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _snake_case (self ):
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = self.get_rust_tokenizer()
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = CLIPProcessor(tokenizer=__lowercase , image_processor=__lowercase )
processor_slow.save_pretrained(self.tmpdirname )
__lowerCAmelCase = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=__lowercase )
__lowerCAmelCase = CLIPProcessor(tokenizer=__lowercase , image_processor=__lowercase )
processor_fast.save_pretrained(self.tmpdirname )
__lowerCAmelCase = CLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , __lowercase )
self.assertIsInstance(processor_fast.tokenizer , __lowercase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , __lowercase )
self.assertIsInstance(processor_fast.image_processor , __lowercase )
def _snake_case (self ):
__lowerCAmelCase = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__lowerCAmelCase = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
__lowerCAmelCase = self.get_image_processor(do_normalize=__lowercase , padding_value=1.0 )
__lowerCAmelCase = CLIPProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=__lowercase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __lowercase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowercase )
def _snake_case (self ):
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = CLIPProcessor(tokenizer=__lowercase , image_processor=__lowercase )
__lowerCAmelCase = self.prepare_image_inputs()
__lowerCAmelCase = image_processor(__lowercase , return_tensors='''np''' )
__lowerCAmelCase = processor(images=__lowercase , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def _snake_case (self ):
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = CLIPProcessor(tokenizer=__lowercase , image_processor=__lowercase )
__lowerCAmelCase = '''lower newer'''
__lowerCAmelCase = processor(text=__lowercase )
__lowerCAmelCase = tokenizer(__lowercase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _snake_case (self ):
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = CLIPProcessor(tokenizer=__lowercase , image_processor=__lowercase )
__lowerCAmelCase = '''lower newer'''
__lowerCAmelCase = self.prepare_image_inputs()
__lowerCAmelCase = processor(text=__lowercase , images=__lowercase )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(__lowercase ):
processor()
def _snake_case (self ):
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = CLIPProcessor(tokenizer=__lowercase , image_processor=__lowercase )
__lowerCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__lowerCAmelCase = processor.batch_decode(__lowercase )
__lowerCAmelCase = tokenizer.batch_decode(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
def _snake_case (self ):
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = CLIPProcessor(tokenizer=__lowercase , image_processor=__lowercase )
__lowerCAmelCase = '''lower newer'''
__lowerCAmelCase = self.prepare_image_inputs()
__lowerCAmelCase = processor(text=__lowercase , images=__lowercase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 9
| 0
|
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class __lowerCAmelCase ( unittest.TestCase):
_a = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def SCREAMING_SNAKE_CASE ( self: Optional[Any] , _lowerCAmelCase: Dict , _lowerCAmelCase: Optional[Any] , _lowerCAmelCase: Optional[Any] ):
lowercase :int = hf_hub_download(
repo_id="nateraw/video-demo" , filename="archery.mp4" , repo_type="dataset" )
lowercase :List[str] = VideoClassificationPipeline(model=_lowerCAmelCase , image_processor=_lowerCAmelCase , top_k=2 )
lowercase :int = [
example_video_filepath,
"https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4",
]
return video_classifier, examples
def SCREAMING_SNAKE_CASE ( self: Dict , _lowerCAmelCase: Union[str, Any] , _lowerCAmelCase: Optional[Any] ):
for example in examples:
lowercase :Tuple = video_classifier(_lowerCAmelCase )
self.assertEqual(
_lowerCAmelCase , [
{"score": ANY(_lowerCAmelCase ), "label": ANY(_lowerCAmelCase )},
{"score": ANY(_lowerCAmelCase ), "label": ANY(_lowerCAmelCase )},
] , )
@require_torch
def SCREAMING_SNAKE_CASE ( self: Dict ):
lowercase :List[Any] = "hf-internal-testing/tiny-random-VideoMAEForVideoClassification"
lowercase :Optional[Any] = VideoMAEFeatureExtractor(
size={"shortest_edge": 10} , crop_size={"height": 10, "width": 10} )
lowercase :Dict = pipeline(
"video-classification" , model=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , frame_sampling_rate=4 )
lowercase :Any = hf_hub_download(repo_id="nateraw/video-demo" , filename="archery.mp4" , repo_type="dataset" )
lowercase :Optional[int] = video_classifier(_lowerCAmelCase , top_k=2 )
self.assertEqual(
nested_simplify(_lowerCAmelCase , decimals=4 ) , [{"score": 0.51_99, "label": "LABEL_0"}, {"score": 0.48_01, "label": "LABEL_1"}] , )
lowercase :Any = video_classifier(
[
video_file_path,
video_file_path,
] , top_k=2 , )
self.assertEqual(
nested_simplify(_lowerCAmelCase , decimals=4 ) , [
[{"score": 0.51_99, "label": "LABEL_0"}, {"score": 0.48_01, "label": "LABEL_1"}],
[{"score": 0.51_99, "label": "LABEL_0"}, {"score": 0.48_01, "label": "LABEL_1"}],
] , )
@require_tf
def SCREAMING_SNAKE_CASE ( self: List[str] ):
pass
| 236
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_UpperCAmelCase : Any = logging.get_logger(__name__)
_UpperCAmelCase : List[Any] = {"vocab_file": "sentencepiece.bpe.model"}
_UpperCAmelCase : List[str] = {
"vocab_file": {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model",
}
}
_UpperCAmelCase : Any = {
"camembert-base": 512,
}
_UpperCAmelCase : List[Any] = "▁"
class __lowerCAmelCase ( lowerCAmelCase):
_a = VOCAB_FILES_NAMES
_a = PRETRAINED_VOCAB_FILES_MAP
_a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a = ['''input_ids''', '''attention_mask''']
def __init__( self: List[str] , _lowerCAmelCase: Optional[int] , _lowerCAmelCase: Dict="<s>" , _lowerCAmelCase: Union[str, Any]="</s>" , _lowerCAmelCase: Optional[int]="</s>" , _lowerCAmelCase: List[Any]="<s>" , _lowerCAmelCase: Tuple="<unk>" , _lowerCAmelCase: Union[str, Any]="<pad>" , _lowerCAmelCase: str="<mask>" , _lowerCAmelCase: int=["<s>NOTUSED", "</s>NOTUSED"] , _lowerCAmelCase: Optional[Dict[str, Any]] = None , **_lowerCAmelCase: Any , ):
# Mask token behave like a normal word, i.e. include the space before it
lowercase :Dict = AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else mask_token
lowercase :Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_lowerCAmelCase , eos_token=_lowerCAmelCase , unk_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , mask_token=_lowerCAmelCase , additional_special_tokens=_lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCAmelCase , )
lowercase :Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_lowerCAmelCase ) )
lowercase :Tuple = vocab_file
# HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual
# sentencepiece vocabulary (this is the case for <s> and </s>
lowercase :int = {"<s>NOTUSED": 0, "<pad>": 1, "</s>NOTUSED": 2, "<unk>": 3}
lowercase :Tuple = len(self.fairseq_tokens_to_ids )
lowercase :int = len(self.sp_model ) + len(self.fairseq_tokens_to_ids )
lowercase :Any = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def SCREAMING_SNAKE_CASE ( self: Tuple , _lowerCAmelCase: List[int] , _lowerCAmelCase: Optional[List[int]] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase :List[Any] = [self.cls_token_id]
lowercase :List[str] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE ( self: Dict , _lowerCAmelCase: List[int] , _lowerCAmelCase: Optional[List[int]] = None , _lowerCAmelCase: bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCAmelCase , token_ids_a=_lowerCAmelCase , already_has_special_tokens=_lowerCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(_lowerCAmelCase )) + [1]
return [1] + ([0] * len(_lowerCAmelCase )) + [1, 1] + ([0] * len(_lowerCAmelCase )) + [1]
def SCREAMING_SNAKE_CASE ( self: Union[str, Any] , _lowerCAmelCase: List[int] , _lowerCAmelCase: Optional[List[int]] = None ):
lowercase :Any = [self.sep_token_id]
lowercase :str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def SCREAMING_SNAKE_CASE ( self: Union[str, Any] ):
return len(self.fairseq_tokens_to_ids ) + len(self.sp_model )
def SCREAMING_SNAKE_CASE ( self: Tuple ):
lowercase :Any = {self.convert_ids_to_tokens(_lowerCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def SCREAMING_SNAKE_CASE ( self: int , _lowerCAmelCase: str ):
return self.sp_model.encode(_lowerCAmelCase , out_type=_lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self: Dict , _lowerCAmelCase: List[str] ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
elif self.sp_model.PieceToId(_lowerCAmelCase ) == 0:
# Convert sentence piece unk token to fairseq unk token index
return self.unk_token_id
return self.fairseq_offset + self.sp_model.PieceToId(_lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self: str , _lowerCAmelCase: Tuple ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def SCREAMING_SNAKE_CASE ( self: List[Any] , _lowerCAmelCase: List[str] ):
lowercase :Tuple = []
lowercase :Any = ""
lowercase :str = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_lowerCAmelCase ) + token
lowercase :Optional[int] = True
lowercase :Any = []
else:
current_sub_tokens.append(_lowerCAmelCase )
lowercase :str = False
out_string += self.sp_model.decode(_lowerCAmelCase )
return out_string.strip()
def __getstate__( self: Dict ):
lowercase :int = self.__dict__.copy()
lowercase :List[str] = None
return state
def __setstate__( self: Optional[Any] , _lowerCAmelCase: Dict ):
lowercase :int = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
lowercase :Optional[int] = {}
lowercase :List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def SCREAMING_SNAKE_CASE ( self: Optional[int] , _lowerCAmelCase: str , _lowerCAmelCase: Optional[str] = None ):
if not os.path.isdir(_lowerCAmelCase ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
lowercase :Any = os.path.join(
_lowerCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _lowerCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_lowerCAmelCase , "wb" ) as fi:
lowercase :Dict = self.sp_model.serialized_model_proto()
fi.write(_lowerCAmelCase )
return (out_vocab_file,)
| 236
| 1
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES, TORCH_DYNAMO_MODES
from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType
from ...utils.imports import is_botoa_available
from .config_args import SageMakerConfig
from .config_utils import (
DYNAMO_BACKENDS,
_ask_field,
_ask_options,
_convert_dynamo_backend,
_convert_mixed_precision,
_convert_sagemaker_distributed_mode,
_convert_yes_no_to_bool,
)
if is_botoa_available():
import botoa # noqa: F401
def __lowerCAmelCase (SCREAMING_SNAKE_CASE )-> Any:
"""simple docstring"""
snake_case_ = botoa.client('''iam''' )
snake_case_ = {
'''Version''': '''2012-10-17''',
'''Statement''': [
{'''Effect''': '''Allow''', '''Principal''': {'''Service''': '''sagemaker.amazonaws.com'''}, '''Action''': '''sts:AssumeRole'''}
],
}
try:
# create the role, associated with the chosen trust policy
iam_client.create_role(
RoleName=SCREAMING_SNAKE_CASE , AssumeRolePolicyDocument=json.dumps(SCREAMING_SNAKE_CASE , indent=2 ) )
snake_case_ = {
'''Version''': '''2012-10-17''',
'''Statement''': [
{
'''Effect''': '''Allow''',
'''Action''': [
'''sagemaker:*''',
'''ecr:GetDownloadUrlForLayer''',
'''ecr:BatchGetImage''',
'''ecr:BatchCheckLayerAvailability''',
'''ecr:GetAuthorizationToken''',
'''cloudwatch:PutMetricData''',
'''cloudwatch:GetMetricData''',
'''cloudwatch:GetMetricStatistics''',
'''cloudwatch:ListMetrics''',
'''logs:CreateLogGroup''',
'''logs:CreateLogStream''',
'''logs:DescribeLogStreams''',
'''logs:PutLogEvents''',
'''logs:GetLogEvents''',
'''s3:CreateBucket''',
'''s3:ListBucket''',
'''s3:GetBucketLocation''',
'''s3:GetObject''',
'''s3:PutObject''',
],
'''Resource''': '''*''',
}
],
}
# attach policy to role
iam_client.put_role_policy(
RoleName=SCREAMING_SNAKE_CASE , PolicyName=f'''{role_name}_policy_permission''' , PolicyDocument=json.dumps(SCREAMING_SNAKE_CASE , indent=2 ) , )
except iam_client.exceptions.EntityAlreadyExistsException:
print(f'''role {role_name} already exists. Using existing one''' )
def __lowerCAmelCase (SCREAMING_SNAKE_CASE )-> Union[str, Any]:
"""simple docstring"""
snake_case_ = botoa.client('''iam''' )
return iam_client.get_role(RoleName=SCREAMING_SNAKE_CASE )["Role"]["Arn"]
def __lowerCAmelCase ()-> Any:
"""simple docstring"""
snake_case_ = _ask_options(
'''How do you want to authorize?''' , ['''AWS Profile''', '''Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) '''] , SCREAMING_SNAKE_CASE , )
snake_case_ = None
if credentials_configuration == 0:
snake_case_ = _ask_field('''Enter your AWS Profile name: [default] ''' , default='''default''' )
snake_case_ = aws_profile
else:
print(
'''Note you will need to provide AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY when you launch you training script with,'''
'''`accelerate launch --aws_access_key_id XXX --aws_secret_access_key YYY`''' )
snake_case_ = _ask_field('''AWS Access Key ID: ''' )
snake_case_ = aws_access_key_id
snake_case_ = _ask_field('''AWS Secret Access Key: ''' )
snake_case_ = aws_secret_access_key
snake_case_ = _ask_field('''Enter your AWS Region: [us-east-1]''' , default='''us-east-1''' )
snake_case_ = aws_region
snake_case_ = _ask_options(
'''Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?''' , ['''Provide IAM Role name''', '''Create new IAM role using credentials'''] , SCREAMING_SNAKE_CASE , )
if role_management == 0:
snake_case_ = _ask_field('''Enter your IAM role name: ''' )
else:
snake_case_ = '''accelerate_sagemaker_execution_role'''
print(f'''Accelerate will create an iam role "{iam_role_name}" using the provided credentials''' )
_create_iam_role_for_sagemaker(SCREAMING_SNAKE_CASE )
snake_case_ = _ask_field(
'''Do you want to use custom Docker image? [yes/NO]: ''' , _convert_yes_no_to_bool , default=SCREAMING_SNAKE_CASE , error_message='''Please enter yes or no.''' , )
snake_case_ = None
if is_custom_docker_image:
snake_case_ = _ask_field('''Enter your Docker image: ''' , lambda SCREAMING_SNAKE_CASE : str(SCREAMING_SNAKE_CASE ).lower() )
snake_case_ = _ask_field(
'''Do you want to provide SageMaker input channels with data locations? [yes/NO]: ''' , _convert_yes_no_to_bool , default=SCREAMING_SNAKE_CASE , error_message='''Please enter yes or no.''' , )
snake_case_ = None
if is_sagemaker_inputs_enabled:
snake_case_ = _ask_field(
'''Enter the path to the SageMaker inputs TSV file with columns (channel_name, data_location): ''' , lambda SCREAMING_SNAKE_CASE : str(SCREAMING_SNAKE_CASE ).lower() , )
snake_case_ = _ask_field(
'''Do you want to enable SageMaker metrics? [yes/NO]: ''' , _convert_yes_no_to_bool , default=SCREAMING_SNAKE_CASE , error_message='''Please enter yes or no.''' , )
snake_case_ = None
if is_sagemaker_metrics_enabled:
snake_case_ = _ask_field(
'''Enter the path to the SageMaker metrics TSV file with columns (metric_name, metric_regex): ''' , lambda SCREAMING_SNAKE_CASE : str(SCREAMING_SNAKE_CASE ).lower() , )
snake_case_ = _ask_options(
'''What is the distributed mode?''' , ['''No distributed training''', '''Data parallelism'''] , _convert_sagemaker_distributed_mode , )
snake_case_ = {}
snake_case_ = _ask_field(
'''Do you wish to optimize your script with torch dynamo?[yes/NO]:''' , _convert_yes_no_to_bool , default=SCREAMING_SNAKE_CASE , error_message='''Please enter yes or no.''' , )
if use_dynamo:
snake_case_ = '''dynamo_'''
snake_case_ = _ask_options(
'''Which dynamo backend would you like to use?''' , [x.lower() for x in DYNAMO_BACKENDS] , _convert_dynamo_backend , default=2 , )
snake_case_ = _ask_field(
'''Do you want to customize the defaults sent to torch.compile? [yes/NO]: ''' , _convert_yes_no_to_bool , default=SCREAMING_SNAKE_CASE , error_message='''Please enter yes or no.''' , )
if use_custom_options:
snake_case_ = _ask_options(
'''Which mode do you want to use?''' , SCREAMING_SNAKE_CASE , lambda SCREAMING_SNAKE_CASE : TORCH_DYNAMO_MODES[int(SCREAMING_SNAKE_CASE )] , default='''default''' , )
snake_case_ = _ask_field(
'''Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: ''' , _convert_yes_no_to_bool , default=SCREAMING_SNAKE_CASE , error_message='''Please enter yes or no.''' , )
snake_case_ = _ask_field(
'''Do you want to enable dynamic shape tracing? [yes/NO]: ''' , _convert_yes_no_to_bool , default=SCREAMING_SNAKE_CASE , error_message='''Please enter yes or no.''' , )
snake_case_ = '''Which EC2 instance type you want to use for your training?'''
if distributed_type != SageMakerDistributedType.NO:
snake_case_ = _ask_options(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , lambda SCREAMING_SNAKE_CASE : SAGEMAKER_PARALLEL_EC2_INSTANCES[int(SCREAMING_SNAKE_CASE )] )
else:
eca_instance_query += "? [ml.p3.2xlarge]:"
snake_case_ = _ask_field(SCREAMING_SNAKE_CASE , lambda SCREAMING_SNAKE_CASE : str(SCREAMING_SNAKE_CASE ).lower() , default='''ml.p3.2xlarge''' )
snake_case_ = 1
if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL):
snake_case_ = _ask_field(
'''How many machines do you want use? [1]: ''' , SCREAMING_SNAKE_CASE , default=1 , )
snake_case_ = _ask_options(
'''Do you wish to use FP16 or BF16 (mixed precision)?''' , ['''no''', '''fp16''', '''bf16''', '''fp8'''] , _convert_mixed_precision , )
if use_dynamo and mixed_precision == "no":
print(
'''Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts.''' )
return SageMakerConfig(
image_uri=SCREAMING_SNAKE_CASE , compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER , distributed_type=SCREAMING_SNAKE_CASE , use_cpu=SCREAMING_SNAKE_CASE , dynamo_config=SCREAMING_SNAKE_CASE , eca_instance_type=SCREAMING_SNAKE_CASE , profile=SCREAMING_SNAKE_CASE , region=SCREAMING_SNAKE_CASE , iam_role_name=SCREAMING_SNAKE_CASE , mixed_precision=SCREAMING_SNAKE_CASE , num_machines=SCREAMING_SNAKE_CASE , sagemaker_inputs_file=SCREAMING_SNAKE_CASE , sagemaker_metrics_file=SCREAMING_SNAKE_CASE , )
| 365
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowerCAmelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
'''simple docstring'''
__snake_case = StableDiffusionInpaintPipeline
__snake_case = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
__snake_case = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
__snake_case = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
__snake_case = frozenset([] )
def UpperCamelCase__ ( self ):
torch.manual_seed(0 )
snake_case_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=_UpperCAmelCase , )
snake_case_ = PNDMScheduler(skip_prk_steps=_UpperCAmelCase )
torch.manual_seed(0 )
snake_case_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=1_28 , )
torch.manual_seed(0 )
snake_case_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act='''gelu''' , projection_dim=5_12 , )
snake_case_ = CLIPTextModel(_UpperCAmelCase )
snake_case_ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
snake_case_ = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def UpperCamelCase__ ( self , _UpperCAmelCase , _UpperCAmelCase=0 ):
# TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched
snake_case_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(_UpperCAmelCase ) ).to(_UpperCAmelCase )
snake_case_ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
snake_case_ = Image.fromarray(np.uinta(_UpperCAmelCase ) ).convert('''RGB''' ).resize((64, 64) )
snake_case_ = Image.fromarray(np.uinta(image + 4 ) ).convert('''RGB''' ).resize((64, 64) )
if str(_UpperCAmelCase ).startswith('''mps''' ):
snake_case_ = torch.manual_seed(_UpperCAmelCase )
else:
snake_case_ = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase )
snake_case_ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': init_image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def UpperCamelCase__ ( self ):
snake_case_ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
snake_case_ = self.get_dummy_components()
snake_case_ = StableDiffusionInpaintPipeline(**_UpperCAmelCase )
snake_case_ = sd_pipe.to(_UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
snake_case_ = self.get_dummy_inputs(_UpperCAmelCase )
snake_case_ = sd_pipe(**_UpperCAmelCase ).images
snake_case_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
snake_case_ = np.array([0.4_727, 0.5_735, 0.3_941, 0.5_446, 0.5_926, 0.4_394, 0.5_062, 0.4_654, 0.4_476] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase__ ( self ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self ):
snake_case_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
snake_case_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
snake_case_ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'''
'''/yellow_cat_sitting_on_a_park_bench.npy''' )
snake_case_ = '''stabilityai/stable-diffusion-2-inpainting'''
snake_case_ = StableDiffusionInpaintPipeline.from_pretrained(_UpperCAmelCase , safety_checker=_UpperCAmelCase )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
pipe.enable_attention_slicing()
snake_case_ = '''Face of a yellow cat, high resolution, sitting on a park bench'''
snake_case_ = torch.manual_seed(0 )
snake_case_ = pipe(
prompt=_UpperCAmelCase , image=_UpperCAmelCase , mask_image=_UpperCAmelCase , generator=_UpperCAmelCase , output_type='''np''' , )
snake_case_ = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 9E-3
def UpperCamelCase__ ( self ):
snake_case_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
snake_case_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
snake_case_ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'''
'''/yellow_cat_sitting_on_a_park_bench_fp16.npy''' )
snake_case_ = '''stabilityai/stable-diffusion-2-inpainting'''
snake_case_ = StableDiffusionInpaintPipeline.from_pretrained(
_UpperCAmelCase , torch_dtype=torch.floataa , safety_checker=_UpperCAmelCase , )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
pipe.enable_attention_slicing()
snake_case_ = '''Face of a yellow cat, high resolution, sitting on a park bench'''
snake_case_ = torch.manual_seed(0 )
snake_case_ = pipe(
prompt=_UpperCAmelCase , image=_UpperCAmelCase , mask_image=_UpperCAmelCase , generator=_UpperCAmelCase , output_type='''np''' , )
snake_case_ = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def UpperCamelCase__ ( self ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
snake_case_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
snake_case_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
snake_case_ = '''stabilityai/stable-diffusion-2-inpainting'''
snake_case_ = PNDMScheduler.from_pretrained(_UpperCAmelCase , subfolder='''scheduler''' )
snake_case_ = StableDiffusionInpaintPipeline.from_pretrained(
_UpperCAmelCase , safety_checker=_UpperCAmelCase , scheduler=_UpperCAmelCase , torch_dtype=torch.floataa , )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
snake_case_ = '''Face of a yellow cat, high resolution, sitting on a park bench'''
snake_case_ = torch.manual_seed(0 )
snake_case_ = pipe(
prompt=_UpperCAmelCase , image=_UpperCAmelCase , mask_image=_UpperCAmelCase , generator=_UpperCAmelCase , num_inference_steps=2 , output_type='''np''' , )
snake_case_ = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 10**9
| 267
| 0
|
import string
import numpy
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Any:
return b if a == 0 else greatest_common_divisor(b % a , _lowercase )
class A__ :
lowercase = string.ascii_uppercase + string.digits
# This cipher takes alphanumerics into account
# i.e. a total of 36 characters
# take x and return x % len(key_string)
lowercase = numpy.vectorize(lambda __magic_name__ : x % 36 )
lowercase = numpy.vectorize(_lowerCAmelCase )
def __init__( self : Optional[int] , a : numpy.ndarray ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = self.modulus(a ) # mod36 calc's on the encrypt key
self.check_determinant() # validate the determinant of the encryption key
lowerCAmelCase__ : Union[str, Any] = encrypt_key.shape[0]
def _lowerCamelCase ( self : Tuple , a : str ):
'''simple docstring'''
return self.key_string.index(a )
def _lowerCamelCase ( self : Tuple , a : int ):
'''simple docstring'''
return self.key_string[round(a )]
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
lowerCAmelCase__ : Any = det % len(self.key_string )
lowerCAmelCase__ : Union[str, Any] = len(self.key_string )
if greatest_common_divisor(a , len(self.key_string ) ) != 1:
lowerCAmelCase__ : str = (
f'''determinant modular {req_l} of encryption key({det}) '''
f'''is not co prime w.r.t {req_l}.\nTry another key.'''
)
raise ValueError(a )
def _lowerCamelCase ( self : Optional[int] , a : str ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = [char for char in text.upper() if char in self.key_string]
lowerCAmelCase__ : Union[str, Any] = chars[-1]
while len(a ) % self.break_key != 0:
chars.append(a )
return "".join(a )
def _lowerCamelCase ( self : Dict , a : str ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = self.process_text(text.upper() )
lowerCAmelCase__ : List[Any] = """"""
for i in range(0 , len(a ) - self.break_key + 1 , self.break_key ):
lowerCAmelCase__ : int = text[i : i + self.break_key]
lowerCAmelCase__ : int = [self.replace_letters(a ) for char in batch]
lowerCAmelCase__ : Optional[int] = numpy.array([vec] ).T
lowerCAmelCase__ : Any = self.modulus(self.encrypt_key.dot(a ) ).T.tolist()[
0
]
lowerCAmelCase__ : Optional[Any] = """""".join(
self.replace_digits(a ) for num in batch_encrypted )
encrypted += encrypted_batch
return encrypted
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
lowerCAmelCase__ : List[Any] = det % len(self.key_string )
lowerCAmelCase__ : Optional[int] = None
for i in range(len(self.key_string ) ):
if (det * i) % len(self.key_string ) == 1:
lowerCAmelCase__ : Dict = i
break
lowerCAmelCase__ : Optional[int] = (
det_inv
* numpy.linalg.det(self.encrypt_key )
* numpy.linalg.inv(self.encrypt_key )
)
return self.to_int(self.modulus(a ) )
def _lowerCamelCase ( self : Optional[Any] , a : str ):
'''simple docstring'''
lowerCAmelCase__ : Dict = self.make_decrypt_key()
lowerCAmelCase__ : Tuple = self.process_text(text.upper() )
lowerCAmelCase__ : Optional[int] = """"""
for i in range(0 , len(a ) - self.break_key + 1 , self.break_key ):
lowerCAmelCase__ : Tuple = text[i : i + self.break_key]
lowerCAmelCase__ : Dict = [self.replace_letters(a ) for char in batch]
lowerCAmelCase__ : List[str] = numpy.array([vec] ).T
lowerCAmelCase__ : Optional[Any] = self.modulus(decrypt_key.dot(a ) ).T.tolist()[0]
lowerCAmelCase__ : int = """""".join(
self.replace_digits(a ) for num in batch_decrypted )
decrypted += decrypted_batch
return decrypted
def lowerCAmelCase__ ( ) -> Optional[int]:
lowerCAmelCase__ : Dict = int(input('Enter the order of the encryption key: ' ) )
lowerCAmelCase__ : Union[str, Any] = []
print('Enter each row of the encryption key with space separated integers' )
for _ in range(_lowercase ):
lowerCAmelCase__ : Union[str, Any] = [int(_lowercase ) for x in input().split()]
hill_matrix.append(_lowercase )
lowerCAmelCase__ : List[Any] = HillCipher(numpy.array(_lowercase ) )
print('Would you like to encrypt or decrypt some text? (1 or 2)' )
lowerCAmelCase__ : int = input('\n1. Encrypt\n2. Decrypt\n' )
if option == "1":
lowerCAmelCase__ : Optional[Any] = input('What text would you like to encrypt?: ' )
print('Your encrypted text is:' )
print(hc.encrypt(_lowercase ) )
elif option == "2":
lowerCAmelCase__ : Dict = input('What text would you like to decrypt?: ' )
print('Your decrypted text is:' )
print(hc.decrypt(_lowercase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 212
|
"""simple docstring"""
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def A_ ( _lowercase ):
'''simple docstring'''
snake_case_ :Union[str, Any] = os.path.join(args.tf_model_dir, """parameters.json""" )
snake_case_ :Any = json.loads(open(_lowercase ).read() )
if not params:
raise ValueError(
f"""It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.""" )
if not args.output.endswith(""".pt""" ):
snake_case_ :Optional[int] = args.output + """.pt"""
snake_case_ :List[str] = OrderedDict()
with tf.device("""/CPU:0""" ):
snake_case_ :Dict = tf.train.load_checkpoint(args.tf_model_dir )
snake_case_ :str = reader.get_variable_to_shape_map()
for key_name in shapes.keys():
snake_case_ :List[Any] = reader.get_tensor(_lowercase ).astype(np.floataa )
if key_name.endswith("""/adam_m""" ) or key_name.endswith("""/adam_v""" ):
continue
if key_name.startswith("""pasts/""" ):
if key_name.startswith("""pasts/mlp""" ):
snake_case_ :Any = int(key_name[9] )
elif key_name.startswith("""pasts/out""" ):
snake_case_ :Optional[int] = 8
snake_case_ :List[str] = """model.sqout.%d.weight""" % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
snake_case_ :Optional[Any] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
snake_case_ :List[str] = torch.tensor(_lowercase )
elif key_name.startswith("""model/moe""" ):
snake_case_ :Tuple = int(key_name[9:].split("""/""" )[0] )
if key_name.endswith("""/switch_gating/kernel""" ):
snake_case_ :Union[str, Any] = """model.blocks.%d.feed_forward.mlp.router.classifier.weight""" % player
snake_case_ :Optional[Any] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
snake_case_ :Optional[Any] = torch.tensor(_lowercase )
elif key_name.endswith("""/softmlp/kernel""" ):
snake_case_ :List[Any] = """model.blocks.%d.feed_forward.soft_bypass_mlp.weight""" % player
snake_case_ :Optional[int] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
snake_case_ :Optional[Any] = torch.tensor(_lowercase )
elif key_name.endswith("""/wo/kernel""" ) or key_name.endswith("""/wi/kernel""" ):
snake_case_ :Dict = key_name[-9:-7]
for i in range(16 ):
snake_case_ :str = """model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight""" % (player, i, nlayer)
snake_case_ :Tuple = (
vnp[i].transpose([1, 0] ).copy()
) # In Mesh-Tensorflow, it is one array, so it is divided
snake_case_ :Optional[int] = torch.tensor(_lowercase )
elif key_name.startswith("""model/mlp""" ):
snake_case_ :Optional[int] = int(key_name[9:].split("""/""" )[0] )
if key_name.endswith("""/p1/kernel""" ):
snake_case_ :Union[str, Any] = """model.blocks.%d.feed_forward.mlp.wi.weight""" % player
snake_case_ :Dict = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
snake_case_ :Optional[Any] = torch.tensor(_lowercase )
elif key_name.endswith("""/p1/bias""" ):
snake_case_ :List[Any] = """model.blocks.%d.feed_forward.mlp.wi.bias""" % player
snake_case_ :str = vnp.copy() # same because it is one dimensional
snake_case_ :Optional[Any] = torch.tensor(_lowercase )
elif key_name.endswith("""/p2/kernel""" ):
snake_case_ :Union[str, Any] = """model.blocks.%d.feed_forward.mlp.wo.weight""" % player
snake_case_ :Dict = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
snake_case_ :Tuple = torch.tensor(_lowercase )
elif key_name.endswith("""/p2/bias""" ):
snake_case_ :Dict = """model.blocks.%d.feed_forward.mlp.wo.bias""" % player
snake_case_ :Any = vnp.copy() # same because it is one dimensional
snake_case_ :Optional[int] = torch.tensor(_lowercase )
elif key_name.startswith("""model/ln""" ):
snake_case_ :Union[str, Any] = int(key_name[8:].split("""/""" )[0] )
if key_name.endswith("""/b""" ):
snake_case_ :str = """model.blocks.%d.feed_forward.norm.bias""" % player
snake_case_ :Dict = vnp.copy() # same because it is one dimensional
snake_case_ :int = torch.tensor(_lowercase )
elif key_name.endswith("""/g""" ):
snake_case_ :Dict = """model.blocks.%d.feed_forward.norm.weight""" % player
snake_case_ :Dict = vnp.copy() # same because it is one dimensional
snake_case_ :Tuple = torch.tensor(_lowercase )
elif key_name.startswith("""model/att""" ):
snake_case_ :List[str] = int(key_name[9:].split("""/""" )[0] )
if key_name.endswith("""/qkv/kernel""" ):
snake_case_ :Optional[int] = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
snake_case_ :Dict = state[:, 0, :, :]
snake_case_ :int = state[:, 1, :, :]
snake_case_ :List[str] = state[:, 2, :, :]
snake_case_ :str = (
state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
snake_case_ :Any = (
state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
snake_case_ :Optional[int] = (
state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
snake_case_ :int = """model.blocks.%d.self_attn.self_attn.q_proj.weight""" % player
snake_case_ :int = torch.tensor(_lowercase )
snake_case_ :Optional[Any] = """model.blocks.%d.self_attn.self_attn.k_proj.weight""" % player
snake_case_ :Dict = torch.tensor(_lowercase )
snake_case_ :Dict = """model.blocks.%d.self_attn.self_attn.v_proj.weight""" % player
snake_case_ :Optional[Any] = torch.tensor(_lowercase )
elif key_name.endswith("""/o/kernel""" ):
snake_case_ :str = """model.blocks.%d.self_attn.self_attn.out_proj.weight""" % player
snake_case_ :str = (
vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy()
) # Mesh-Tensorflow is a diagonal matrix
snake_case_ :Any = torch.tensor(_lowercase )
elif key_name.startswith("""model/an""" ):
snake_case_ :Optional[int] = int(key_name[8:].split("""/""" )[0] )
if key_name.endswith("""/b""" ):
snake_case_ :Any = """model.blocks.%d.self_attn.norm.bias""" % player
snake_case_ :Optional[int] = vnp.copy() # same because it is one dimensional
snake_case_ :Tuple = torch.tensor(_lowercase )
elif key_name.endswith("""/g""" ):
snake_case_ :Union[str, Any] = """model.blocks.%d.self_attn.norm.weight""" % player
snake_case_ :Dict = vnp.copy() # same because it is one dimensional
snake_case_ :Optional[int] = torch.tensor(_lowercase )
elif (
key_name.startswith("""model/wte""" )
or key_name.startswith("""model/wpe""" )
or key_name.startswith("""model/ete""" )
):
snake_case_ :List[Any] = {"""wte""": """embed_tokens""", """wpe""": """position_embeddings""", """ete""": """extra_position_embeddings"""}[
key_name[-3:]
]
snake_case_ :Optional[Any] = """model.%s.weight""" % nlayer
snake_case_ :Any = vnp.copy() # same in embedded
snake_case_ :List[Any] = torch.tensor(_lowercase )
if key_name.startswith("""model/wte""" ):
snake_case_ :Tuple = """lm_head.weight"""
snake_case_ :List[str] = vnp.copy() # same in embedded
snake_case_ :List[Any] = torch.tensor(_lowercase )
elif key_name.startswith("""model/wob""" ):
snake_case_ :str = """final_logits_bias"""
snake_case_ :Any = vnp.copy() # same in embedded
snake_case_ :List[Any] = state.reshape((1, -1) )
snake_case_ :Union[str, Any] = torch.tensor(_lowercase )
elif key_name == "model/dense/kernel":
snake_case_ :str = """model.last_project.weight"""
snake_case_ :Dict = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
snake_case_ :int = torch.tensor(_lowercase )
elif key_name == "model/dense_1/bias":
snake_case_ :Optional[int] = """model.last_project.bias"""
snake_case_ :Tuple = vnp.copy() # same because it is one dimensional
snake_case_ :Any = torch.tensor(_lowercase )
torch.save(_lowercase, args.output )
if __name__ == "__main__":
__a = argparse.ArgumentParser(
description="model converter.", formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument("--tf_model_dir", metavar="PATH", type=str, required=True, help="import model")
parser.add_argument("--output", metavar="PATH", type=str, required=True, help="output model")
__a = parser.parse_args()
convert_tf_gptsan_to_pt(args)
| 66
| 0
|
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class a_ ( __lowercase ):
'''simple docstring'''
__a: List[str] = ''''''
__a: Optional[int] = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
__a: Any = None # compression type in fsspec. ex: "gzip"
__a: Union[str, Any] = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self , lowercase_ = "" , lowercase_ = None , lowercase_ = None , **lowercase_ ) -> str:
'''simple docstring'''
super().__init__(self , **_a )
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
lowerCAmelCase_ = fsspec.open(
_a , mode='rb' , protocol=_a , compression=self.compression , client_kwargs={
'requote_redirect_url': False, # see https://github.com/huggingface/datasets/pull/5459
'trust_env': True, # Enable reading proxy env variables.
**(target_options or {}).pop('client_kwargs' , {} ), # To avoid issues if it was already passed.
} , **(target_options or {}) , )
lowerCAmelCase_ = os.path.basename(self.file.path.split('::' )[0] )
lowerCAmelCase_ = (
self.compressed_name[: self.compressed_name.rindex('.' )]
if '.' in self.compressed_name
else self.compressed_name
)
lowerCAmelCase_ = None
@classmethod
def _lowercase ( cls , lowercase_ ) -> Optional[Any]:
'''simple docstring'''
return super()._strip_protocol(_a ).lstrip('/' )
def _lowercase ( self ) -> Tuple:
'''simple docstring'''
if self.dir_cache is None:
lowerCAmelCase_ = {**self.file.fs.info(self.file.path ), 'name': self.uncompressed_name}
lowerCAmelCase_ = {f['name']: f}
def _lowercase ( self , lowercase_ ) -> List[str]:
'''simple docstring'''
return self.file.open().read()
def _lowercase ( self , lowercase_ , lowercase_ = "rb" , lowercase_=None , lowercase_=True , lowercase_=None , **lowercase_ , ) -> str:
'''simple docstring'''
lowerCAmelCase_ = self._strip_protocol(_a )
if mode != "rb":
raise ValueError(f'''Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'''' )
return self.file.open()
class a_ ( __lowercase ):
'''simple docstring'''
__a: Optional[Any] = '''bz2'''
__a: List[str] = '''bz2'''
__a: Tuple = '''.bz2'''
class a_ ( __lowercase ):
'''simple docstring'''
__a: Union[str, Any] = '''gzip'''
__a: List[str] = '''gzip'''
__a: List[str] = '''.gz'''
class a_ ( __lowercase ):
'''simple docstring'''
__a: List[str] = '''lz4'''
__a: Any = '''lz4'''
__a: List[str] = '''.lz4'''
class a_ ( __lowercase ):
'''simple docstring'''
__a: Any = '''xz'''
__a: List[Any] = '''xz'''
__a: Union[str, Any] = '''.xz'''
class a_ ( __lowercase ):
'''simple docstring'''
__a: List[str] = '''zstd'''
__a: Dict = '''zstd'''
__a: Dict = '''.zst'''
def __init__( self , lowercase_ , lowercase_ = "rb" , lowercase_ = None , lowercase_ = None , lowercase_ = DEFAULT_BLOCK_SIZE , **lowercase_ , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(
fo=_a , mode=_a , target_protocol=_a , target_options=_a , block_size=_a , **_a , )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
lowerCAmelCase_ = self.file.__enter__
class a_ :
'''simple docstring'''
def __init__( self , lowercase_ ) -> int:
'''simple docstring'''
lowerCAmelCase_ = file_
def __enter__( self ) -> Dict:
'''simple docstring'''
self._file.__enter__()
return self
def __exit__( self , *lowercase_ , **lowercase_ ) -> Dict:
'''simple docstring'''
self._file.__exit__(*_a , **_a )
def __iter__( self ) -> Optional[Any]:
'''simple docstring'''
return iter(self._file )
def _lowercase ( self ) -> Optional[int]:
'''simple docstring'''
return next(self._file )
def __getattr__( self , lowercase_ ) -> Union[str, Any]:
'''simple docstring'''
return getattr(self._file , _a )
def fixed_enter(*lowercase_ , **lowercase_ ):
return WrappedFile(_enter(*_a , **_a ) )
lowerCAmelCase_ = fixed_enter
| 371
|
def lowerCamelCase ( a_ , a_ ) -> List[Any]:
lowerCAmelCase_ = 0
while b > 0:
if b & 1:
res += a
a += a
b >>= 1
return res
def lowerCamelCase ( a_ , a_ , a_ ) -> Union[str, Any]:
lowerCAmelCase_ = 0
while b > 0:
if b & 1:
lowerCAmelCase_ = ((res % c) + (a % c)) % c
a += a
b >>= 1
return res
| 14
| 0
|
from __future__ import annotations
from collections.abc import Iterator
from typing import Any
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self , lowerCamelCase__ ) -> Any:
'''simple docstring'''
__lowerCamelCase = data
__lowerCamelCase = None
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self ) -> List[str]:
'''simple docstring'''
__lowerCamelCase = None
__lowerCamelCase = None
def __iter__( self ) -> Iterator[Any]:
'''simple docstring'''
__lowerCamelCase = self.head
while self.head:
yield node.data
__lowerCamelCase = node.next
if node == self.head:
break
def __len__( self ) -> int:
'''simple docstring'''
return sum(1 for _ in self )
def __repr__( self ) -> List[str]:
'''simple docstring'''
return "->".join(str(lowerCamelCase__ ) for item in iter(self ) )
def lowercase_ ( self , lowerCamelCase__ ) -> None:
'''simple docstring'''
self.insert_nth(len(self ) , lowerCamelCase__ )
def lowercase_ ( self , lowerCamelCase__ ) -> None:
'''simple docstring'''
self.insert_nth(0 , lowerCamelCase__ )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> None:
'''simple docstring'''
if index < 0 or index > len(self ):
raise IndexError('list index out of range.' )
__lowerCamelCase = Node(lowerCamelCase__ )
if self.head is None:
__lowerCamelCase = new_node # first node points itself
__lowerCamelCase = __lowerCamelCase = new_node
elif index == 0: # insert at head
__lowerCamelCase = self.head
__lowerCamelCase = __lowerCamelCase = new_node
else:
__lowerCamelCase = self.head
for _ in range(index - 1 ):
__lowerCamelCase = temp.next
__lowerCamelCase = temp.next
__lowerCamelCase = new_node
if index == len(self ) - 1: # insert at tail
__lowerCamelCase = new_node
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
return self.delete_nth(0 )
def lowercase_ ( self ) -> Any:
'''simple docstring'''
return self.delete_nth(len(self ) - 1 )
def lowercase_ ( self , lowerCamelCase__ = 0 ) -> Any:
'''simple docstring'''
if not 0 <= index < len(self ):
raise IndexError('list index out of range.' )
__lowerCamelCase = self.head
if self.head == self.tail: # just one node
__lowerCamelCase = __lowerCamelCase = None
elif index == 0: # delete head node
__lowerCamelCase = self.tail.next.next
__lowerCamelCase = self.head.next
else:
__lowerCamelCase = self.head
for _ in range(index - 1 ):
__lowerCamelCase = temp.next
__lowerCamelCase = temp.next
__lowerCamelCase = temp.next.next
if index == len(self ) - 1: # delete at tail
__lowerCamelCase = temp
return delete_node.data
def lowercase_ ( self ) -> bool:
'''simple docstring'''
return len(self ) == 0
def lowerCamelCase_ ( ) -> None:
"""simple docstring"""
__lowerCamelCase = CircularLinkedList()
assert len(UpperCamelCase__ ) == 0
assert circular_linked_list.is_empty() is True
assert str(UpperCamelCase__ ) == ""
try:
circular_linked_list.delete_front()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_tail()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_nth(-1 )
raise AssertionError
except IndexError:
assert True
try:
circular_linked_list.delete_nth(0 )
raise AssertionError
except IndexError:
assert True
assert circular_linked_list.is_empty() is True
for i in range(5 ):
assert len(UpperCamelCase__ ) == i
circular_linked_list.insert_nth(UpperCamelCase__ , i + 1 )
assert str(UpperCamelCase__ ) == "->".join(str(UpperCamelCase__ ) for i in range(1 , 6 ) )
circular_linked_list.insert_tail(6 )
assert str(UpperCamelCase__ ) == "->".join(str(UpperCamelCase__ ) for i in range(1 , 7 ) )
circular_linked_list.insert_head(0 )
assert str(UpperCamelCase__ ) == "->".join(str(UpperCamelCase__ ) for i in range(0 , 7 ) )
assert circular_linked_list.delete_front() == 0
assert circular_linked_list.delete_tail() == 6
assert str(UpperCamelCase__ ) == "->".join(str(UpperCamelCase__ ) for i in range(1 , 6 ) )
assert circular_linked_list.delete_nth(2 ) == 3
circular_linked_list.insert_nth(2 , 3 )
assert str(UpperCamelCase__ ) == "->".join(str(UpperCamelCase__ ) for i in range(1 , 6 ) )
assert circular_linked_list.is_empty() is False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 90
|
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a =['image_processor', 'tokenizer']
__a ='LayoutLMv3ImageProcessor'
__a =('LayoutLMv3Tokenizer', 'LayoutLMv3TokenizerFast')
def __init__( self : Tuple , __a : int=None , __a : Union[str, Any]=None , **__a : Optional[Any] ):
_a = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __a , )
_a = kwargs.pop("feature_extractor" )
_a = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(__a , __a )
def __call__( self : Any , __a : List[str] , __a : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __a : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , __a : Union[List[List[int]], List[List[List[int]]]] = None , __a : Optional[Union[List[int], List[List[int]]]] = None , __a : bool = True , __a : Union[bool, str, PaddingStrategy] = False , __a : Union[bool, str, TruncationStrategy] = None , __a : Optional[int] = None , __a : int = 0 , __a : Optional[int] = None , __a : Optional[bool] = None , __a : Optional[bool] = None , __a : bool = False , __a : bool = False , __a : bool = False , __a : bool = False , __a : bool = True , __a : Optional[Union[str, TensorType]] = None , **__a : Dict , ):
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True." )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"You cannot provide word labels if you initialized the image processor with apply_ocr set to True." )
# first, apply the image processor
_a = self.image_processor(images=__a , return_tensors=__a )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(__a , __a ):
_a = [text] # add batch dimension (as the image processor always adds a batch dimension)
_a = features["words"]
_a = self.tokenizer(
text=text if text is not None else features["words"] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["boxes"] , word_labels=__a , add_special_tokens=__a , padding=__a , truncation=__a , max_length=__a , stride=__a , pad_to_multiple_of=__a , return_token_type_ids=__a , return_attention_mask=__a , return_overflowing_tokens=__a , return_special_tokens_mask=__a , return_offsets_mapping=__a , return_length=__a , verbose=__a , return_tensors=__a , **__a , )
# add pixel values
_a = features.pop("pixel_values" )
if return_overflowing_tokens is True:
_a = self.get_overflowing_images(__a , encoded_inputs["overflow_to_sample_mapping"] )
_a = images
return encoded_inputs
def UpperCamelCase__ ( self : Optional[int] , __a : str , __a : List[Any] ):
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
_a = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(__a ) != len(__a ):
raise ValueError(
"Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"
f' {len(__a )} and {len(__a )}' )
return images_with_overflow
def UpperCamelCase__ ( self : int , *__a : str , **__a : Tuple ):
return self.tokenizer.batch_decode(*__a , **__a )
def UpperCamelCase__ ( self : str , *__a : List[Any] , **__a : List[str] ):
return self.tokenizer.decode(*__a , **__a )
@property
def UpperCamelCase__ ( self : Tuple ):
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def UpperCamelCase__ ( self : int ):
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __a , )
return self.image_processor_class
@property
def UpperCamelCase__ ( self : List[str] ):
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __a , )
return self.image_processor
| 63
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__a = {
'configuration_bridgetower': [
'BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BridgeTowerConfig',
'BridgeTowerTextConfig',
'BridgeTowerVisionConfig',
],
'processing_bridgetower': ['BridgeTowerProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ['BridgeTowerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST',
'BridgeTowerForContrastiveLearning',
'BridgeTowerForImageAndTextRetrieval',
'BridgeTowerForMaskedLM',
'BridgeTowerModel',
'BridgeTowerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_bridgetower import (
BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP,
BridgeTowerConfig,
BridgeTowerTextConfig,
BridgeTowerVisionConfig,
)
from .processing_bridgetower import BridgeTowerProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_bridgetower import BridgeTowerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bridgetower import (
BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST,
BridgeTowerForContrastiveLearning,
BridgeTowerForImageAndTextRetrieval,
BridgeTowerForMaskedLM,
BridgeTowerModel,
BridgeTowerPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 17
|
'''simple docstring'''
from importlib import import_module
from .logging import get_logger
__a = get_logger(__name__)
class A__ :
"""simple docstring"""
def __init__( self : List[str] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Optional[Any]=None ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase : Any = attrs or []
if module is not None:
for key in module.__dict__:
if key in attrs or not key.startswith("__" ):
setattr(self , lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
_UpperCAmelCase : int = module._original_module if isinstance(lowerCAmelCase__ , _PatchedModuleObj ) else module
class A__ :
"""simple docstring"""
UpperCamelCase_ : Union[str, Any] = []
def __init__( self : int , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : str , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Optional[int]=None ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase : List[Any] = obj
_UpperCAmelCase : int = target
_UpperCAmelCase : Optional[int] = new
_UpperCAmelCase : Any = target.split("." )[0]
_UpperCAmelCase : Optional[int] = {}
_UpperCAmelCase : Dict = attrs or []
def __enter__( self : List[str] ) -> int:
"""simple docstring"""
*_UpperCAmelCase , _UpperCAmelCase : List[str] = self.target.split("." )
# Patch modules:
# it's used to patch attributes of submodules like "os.path.join";
# in this case we need to patch "os" and "os.path"
for i in range(len(lowerCAmelCase__ ) ):
try:
_UpperCAmelCase : int = import_module(".".join(submodules[: i + 1] ) )
except ModuleNotFoundError:
continue
# We iterate over all the globals in self.obj in case we find "os" or "os.path"
for attr in self.obj.__dir__():
_UpperCAmelCase : List[Any] = getattr(self.obj , lowerCAmelCase__ )
# We don't check for the name of the global, but rather if its value *is* "os" or "os.path".
# This allows to patch renamed modules like "from os import path as ospath".
if obj_attr is submodule or (
(isinstance(lowerCAmelCase__ , _PatchedModuleObj ) and obj_attr._original_module is submodule)
):
_UpperCAmelCase : Tuple = obj_attr
# patch at top level
setattr(self.obj , lowerCAmelCase__ , _PatchedModuleObj(lowerCAmelCase__ , attrs=self.attrs ) )
_UpperCAmelCase : List[Any] = getattr(self.obj , lowerCAmelCase__ )
# construct lower levels patches
for key in submodules[i + 1 :]:
setattr(lowerCAmelCase__ , lowerCAmelCase__ , _PatchedModuleObj(getattr(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) , attrs=self.attrs ) )
_UpperCAmelCase : Any = getattr(lowerCAmelCase__ , lowerCAmelCase__ )
# finally set the target attribute
setattr(lowerCAmelCase__ , lowerCAmelCase__ , self.new )
# Patch attribute itself:
# it's used for builtins like "open",
# and also to patch "os.path.join" we may also need to patch "join"
# itself if it was imported as "from os.path import join".
if submodules: # if it's an attribute of a submodule like "os.path.join"
try:
_UpperCAmelCase : Dict = getattr(import_module(".".join(lowerCAmelCase__ ) ) , lowerCAmelCase__ )
except (AttributeError, ModuleNotFoundError):
return
# We iterate over all the globals in self.obj in case we find "os.path.join"
for attr in self.obj.__dir__():
# We don't check for the name of the global, but rather if its value *is* "os.path.join".
# This allows to patch renamed attributes like "from os.path import join as pjoin".
if getattr(self.obj , lowerCAmelCase__ ) is attr_value:
_UpperCAmelCase : Optional[Any] = getattr(self.obj , lowerCAmelCase__ )
setattr(self.obj , lowerCAmelCase__ , self.new )
elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open"
_UpperCAmelCase : Dict = globals()["__builtins__"][target_attr]
setattr(self.obj , lowerCAmelCase__ , self.new )
else:
raise RuntimeError(F"""Tried to patch attribute {target_attr} instead of a submodule.""" )
def __exit__( self : Optional[int] , *lowerCAmelCase__ : List[str] ) -> Union[str, Any]:
"""simple docstring"""
for attr in list(self.original ):
setattr(self.obj , lowerCAmelCase__ , self.original.pop(lowerCAmelCase__ ) )
def _lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
self.__enter__()
self._active_patches.append(self )
def _lowerCAmelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
try:
self._active_patches.remove(self )
except ValueError:
# If the patch hasn't been started this will fail
return None
return self.__exit__()
| 17
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase : List[str] = {
'configuration_owlvit': [
'OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'OwlViTConfig',
'OwlViTOnnxConfig',
'OwlViTTextConfig',
'OwlViTVisionConfig',
],
'processing_owlvit': ['OwlViTProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Optional[int] = ['OwlViTFeatureExtractor']
lowerCamelCase : str = ['OwlViTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : int = [
'OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'OwlViTModel',
'OwlViTPreTrainedModel',
'OwlViTTextModel',
'OwlViTVisionModel',
'OwlViTForObjectDetection',
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
lowerCamelCase : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 2
|
import os
import unittest
from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class _lowercase ( A__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = TransfoXLTokenizer
SCREAMING_SNAKE_CASE__ : int = False
SCREAMING_SNAKE_CASE__ : Optional[Any] = False
def __magic_name__( self :str ) -> Dict:
super().setUp()
__SCREAMING_SNAKE_CASE : List[str] = [
'''<unk>''',
'''[CLS]''',
'''[SEP]''',
'''want''',
'''unwanted''',
'''wa''',
'''un''',
'''running''',
''',''',
'''low''',
'''l''',
]
__SCREAMING_SNAKE_CASE : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def __magic_name__( self :Any , **lowerCAmelCase__ :int ) -> str:
__SCREAMING_SNAKE_CASE : Optional[Any] = True
return TransfoXLTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def __magic_name__( self :Union[str, Any] , lowerCAmelCase__ :List[Any] ) -> Tuple:
__SCREAMING_SNAKE_CASE : Dict = '''<unk> UNwanted , running'''
__SCREAMING_SNAKE_CASE : List[str] = '''<unk> unwanted, running'''
return input_text, output_text
def __magic_name__( self :Any ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : int = TransfoXLTokenizer(vocab_file=self.vocab_file , lower_case=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = tokenizer.tokenize('''<unk> UNwanted , running''' )
self.assertListEqual(lowerCAmelCase__ , ['''<unk>''', '''unwanted''', ''',''', '''running'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , [0, 4, 8, 7] )
def __magic_name__( self :Tuple ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : Optional[int] = TransfoXLTokenizer(lower_case=lowerCAmelCase__ )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo ! how \n Are yoU ? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
def __magic_name__( self :Tuple ) -> List[Any]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = TransfoXLTokenizer(lower_case=lowerCAmelCase__ )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo ! how \n Are yoU ? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def __magic_name__( self :Dict ) -> List[Any]:
__SCREAMING_SNAKE_CASE : List[str] = TransfoXLTokenizer(lower_case=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = '''Hello (bracket) and side-scrolled [and] Henry\'s $5,000 with 3.34 m. What\'s up!?'''
__SCREAMING_SNAKE_CASE : Optional[int] = [
'''Hello''',
'''(''',
'''bracket''',
''')''',
'''and''',
'''side''',
'''@-@''',
'''scrolled''',
'''[''',
'''and''',
''']''',
'''Henry''',
'''\'s''',
'''$''',
'''5''',
'''@,@''',
'''000''',
'''with''',
'''3''',
'''@.@''',
'''34''',
'''m''',
'''.''',
'''What''',
'''\'s''',
'''up''',
'''!''',
'''?''',
]
self.assertListEqual(tokenizer.tokenize(lowerCAmelCase__ ) , lowerCAmelCase__ )
self.assertEqual(tokenizer.convert_tokens_to_string(lowerCAmelCase__ ) , lowerCAmelCase__ )
def __magic_name__( self :str ) -> int:
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_tokenizer()
__SCREAMING_SNAKE_CASE : Any = len(lowerCAmelCase__ )
tokenizer.add_tokens(['''new1''', '''new2'''] )
tokenizer.move_added_token('''new1''' , 1 )
# Check that moved token is not copied (duplicate)
self.assertEqual(len(lowerCAmelCase__ ) , original_len + 2 )
# Check that token is moved to specified id
self.assertEqual(tokenizer.encode('''new1''' ) , [1] )
self.assertEqual(tokenizer.decode([1] ) , '''new1''' )
| 9
| 0
|
import importlib.metadata
import operator
import re
import sys
from typing import Optional
from packaging import version
__a = {
'''<''': operator.lt,
'''<=''': operator.le,
'''==''': operator.eq,
'''!=''': operator.ne,
'''>=''': operator.ge,
'''>''': operator.gt,
}
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->List[Any]:
"""simple docstring"""
if got_ver is None or want_ver is None:
raise ValueError(
f"""Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider"""
f""" reinstalling {pkg}.""" )
if not ops[op](version.parse(_UpperCamelCase ), version.parse(_UpperCamelCase ) ):
raise ImportError(
f"""{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}""" )
def __lowercase ( _UpperCamelCase, _UpperCamelCase = None ) ->None:
"""simple docstring"""
lowercase : List[str] = f"""\n{hint}""" if hint is not None else ''''''
# non-versioned check
if re.match(R'''^[\w_\-\d]+$''', _UpperCamelCase ):
lowercase : Any = requirement, None, None
else:
lowercase : int = re.findall(R'''^([^!=<>\s]+)([\s!=<>]{1,2}.+)''', _UpperCamelCase )
if not match:
raise ValueError(
'''requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but'''
f""" got {requirement}""" )
lowercase : int = match[0]
lowercase : Dict = want_full.split(''',''' ) # there could be multiple requirements
lowercase : Optional[Any] = {}
for w in want_range:
lowercase : Any = re.findall(R'''^([\s!=<>]{1,2})(.+)''', _UpperCamelCase )
if not match:
raise ValueError(
'''requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,'''
f""" but got {requirement}""" )
lowercase : Tuple = match[0]
lowercase : Any = want_ver
if op not in ops:
raise ValueError(f"""{requirement}: need one of {list(ops.keys() )}, but got {op}""" )
# special case
if pkg == "python":
lowercase : Tuple = '''.'''.join([str(_UpperCamelCase ) for x in sys.version_info[:3]] )
for op, want_ver in wanted.items():
_compare_versions(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase )
return
# check if any version is installed
try:
lowercase : int = importlib.metadata.version(_UpperCamelCase )
except importlib.metadata.PackageNotFoundError:
raise importlib.metadata.PackageNotFoundError(
f"""The '{requirement}' distribution was not found and is required by this application. {hint}""" )
# check that the right version is installed if version number or a range was provided
if want_ver is not None:
for op, want_ver in wanted.items():
_compare_versions(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase )
def __lowercase ( _UpperCamelCase ) ->Dict:
"""simple docstring"""
lowercase : str = '''Try: pip install transformers -U or pip install -e \'.[dev]\' if you\'re working with git main'''
return require_version(_UpperCamelCase, _UpperCamelCase )
| 368
|
# Algorithm for the pigeonhole sorting
def __lowercase ( _UpperCamelCase ) ->List[Any]:
"""simple docstring"""
lowercase : List[Any] = min(_UpperCamelCase ) # min() finds the minimum value
lowercase : Union[str, Any] = max(_UpperCamelCase ) # max() finds the maximum value
lowercase : Tuple = max_val - min_val + 1 # size is difference of max and min values plus one
# list of pigeonholes of size equal to the variable size
lowercase : List[Any] = [0] * size
# Populate the pigeonholes.
for x in a:
assert isinstance(_UpperCamelCase, _UpperCamelCase ), "integers only please"
holes[x - min_val] += 1
# Putting the elements back into the array in an order.
lowercase : Tuple = 0
for count in range(_UpperCamelCase ):
while holes[count] > 0:
holes[count] -= 1
lowercase : str = count + min_val
i += 1
def __lowercase ( ) ->List[str]:
"""simple docstring"""
lowercase : Union[str, Any] = [8, 3, 2, 7, 4, 6, 8]
pigeonhole_sort(_UpperCamelCase )
print('''Sorted order is:''', ''' '''.join(_UpperCamelCase ) )
if __name__ == "__main__":
main()
| 173
| 0
|
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class snake_case :
'''simple docstring'''
def __init__( self : Any , lowerCAmelCase : str , ) -> Tuple:
"""simple docstring"""
_snake_case : List[Any] = parent
_snake_case : Any = 13
_snake_case : Union[str, Any] = 7
_snake_case : Union[str, Any] = True
_snake_case : List[str] = True
_snake_case : List[str] = True
_snake_case : Tuple = 99
_snake_case : Any = 32
_snake_case : Optional[int] = 2
_snake_case : List[str] = 4
_snake_case : Union[str, Any] = 37
_snake_case : Any = """gelu"""
_snake_case : Dict = 0.1
_snake_case : Tuple = 0.1
_snake_case : Optional[Any] = 512
_snake_case : Optional[Any] = 16
_snake_case : Dict = 2
_snake_case : str = 0.02
_snake_case : List[Any] = 3
_snake_case : int = 4
_snake_case : Tuple = None
def UpperCamelCase_ ( self : Optional[Any]) -> str:
"""simple docstring"""
_snake_case : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
_snake_case : List[Any] = None
if self.use_input_mask:
_snake_case : Optional[int] = random_attention_mask([self.batch_size, self.seq_length])
_snake_case : Optional[int] = None
_snake_case : Optional[int] = None
_snake_case : Optional[Any] = None
if self.use_labels:
_snake_case : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size)
_snake_case : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
_snake_case : Optional[int] = ids_tensor([self.batch_size] , self.num_choices)
_snake_case : Dict = EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase_ ( self : Union[str, Any]) -> Union[str, Any]:
"""simple docstring"""
(
(
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) ,
) : str = self.prepare_config_and_inputs()
_snake_case : List[Any] = True
_snake_case : Union[str, Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size])
_snake_case : int = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2)
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def UpperCamelCase_ ( self : List[Any] , lowerCAmelCase : Tuple , lowerCAmelCase : str , lowerCAmelCase : str , lowerCAmelCase : Tuple , lowerCAmelCase : Tuple , lowerCAmelCase : int) -> List[Any]:
"""simple docstring"""
_snake_case : List[Any] = TFEsmModel(config=__SCREAMING_SNAKE_CASE)
_snake_case : Tuple = {"""input_ids""": input_ids, """attention_mask""": input_mask}
_snake_case : str = model(__SCREAMING_SNAKE_CASE)
_snake_case : Optional[int] = [input_ids, input_mask]
_snake_case : str = model(__SCREAMING_SNAKE_CASE)
_snake_case : Optional[Any] = model(__SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def UpperCamelCase_ ( self : Union[str, Any] , lowerCAmelCase : Any , lowerCAmelCase : List[Any] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Dict , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : List[Any] , lowerCAmelCase : Optional[Any] , ) -> int:
"""simple docstring"""
_snake_case : List[Any] = True
_snake_case : int = TFEsmModel(config=__SCREAMING_SNAKE_CASE)
_snake_case : Optional[Any] = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""encoder_hidden_states""": encoder_hidden_states,
"""encoder_attention_mask""": encoder_attention_mask,
}
_snake_case : List[str] = model(__SCREAMING_SNAKE_CASE)
_snake_case : Optional[Any] = [input_ids, input_mask]
_snake_case : str = model(__SCREAMING_SNAKE_CASE , encoder_hidden_states=__SCREAMING_SNAKE_CASE)
# Also check the case where encoder outputs are not passed
_snake_case : int = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def UpperCamelCase_ ( self : Optional[int] , lowerCAmelCase : str , lowerCAmelCase : Optional[Any] , lowerCAmelCase : int , lowerCAmelCase : Optional[Any] , lowerCAmelCase : int , lowerCAmelCase : List[Any]) -> int:
"""simple docstring"""
_snake_case : Optional[Any] = TFEsmForMaskedLM(config=__SCREAMING_SNAKE_CASE)
_snake_case : str = model([input_ids, input_mask])
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def UpperCamelCase_ ( self : Optional[Any] , lowerCAmelCase : Any , lowerCAmelCase : Optional[int] , lowerCAmelCase : Dict , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : List[str] , lowerCAmelCase : int) -> List[str]:
"""simple docstring"""
_snake_case : List[Any] = self.num_labels
_snake_case : str = TFEsmForTokenClassification(config=__SCREAMING_SNAKE_CASE)
_snake_case : str = {"""input_ids""": input_ids, """attention_mask""": input_mask}
_snake_case : Any = model(__SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def UpperCamelCase_ ( self : List[Any]) -> int:
"""simple docstring"""
_snake_case : Optional[int] = self.prepare_config_and_inputs()
(
(
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) ,
) : Optional[int] = config_and_inputs
_snake_case : List[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class snake_case ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,unittest.TestCase ):
'''simple docstring'''
snake_case_ : Tuple = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
snake_case_ : Any = (
{
"""feature-extraction""": TFEsmModel,
"""fill-mask""": TFEsmForMaskedLM,
"""text-classification""": TFEsmForSequenceClassification,
"""token-classification""": TFEsmForTokenClassification,
"""zero-shot""": TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
snake_case_ : List[Any] = False
snake_case_ : Tuple = False
def UpperCamelCase_ ( self : Dict) -> Any:
"""simple docstring"""
_snake_case : Optional[Any] = TFEsmModelTester(self)
_snake_case : str = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , hidden_size=37)
def UpperCamelCase_ ( self : Optional[Any]) -> Any:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self : List[str]) -> List[str]:
"""simple docstring"""
_snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE)
def UpperCamelCase_ ( self : Any) -> Optional[Any]:
"""simple docstring"""
_snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*__SCREAMING_SNAKE_CASE)
def UpperCamelCase_ ( self : Optional[Any]) -> int:
"""simple docstring"""
_snake_case : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__SCREAMING_SNAKE_CASE)
def UpperCamelCase_ ( self : Union[str, Any]) -> Tuple:
"""simple docstring"""
_snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__SCREAMING_SNAKE_CASE)
@slow
def UpperCamelCase_ ( self : str) -> int:
"""simple docstring"""
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case : int = TFEsmModel.from_pretrained(__SCREAMING_SNAKE_CASE)
self.assertIsNotNone(__SCREAMING_SNAKE_CASE)
@unittest.skip("""Protein models do not support embedding resizing.""")
def UpperCamelCase_ ( self : Optional[Any]) -> Tuple:
"""simple docstring"""
pass
@unittest.skip("""Protein models do not support embedding resizing.""")
def UpperCamelCase_ ( self : List[Any]) -> Tuple:
"""simple docstring"""
pass
def UpperCamelCase_ ( self : str) -> Tuple:
"""simple docstring"""
_snake_case , _snake_case : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case : Optional[int] = model_class(__SCREAMING_SNAKE_CASE)
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer)
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
_snake_case : Dict = model.get_bias()
assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
for k, v in name.items():
assert isinstance(__SCREAMING_SNAKE_CASE , tf.Variable)
else:
_snake_case : str = model.get_output_embeddings()
assert x is None
_snake_case : str = model.get_bias()
assert name is None
@require_tf
class snake_case ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCamelCase_ ( self : Tuple) -> Any:
"""simple docstring"""
_snake_case : str = TFEsmForMaskedLM.from_pretrained("""facebook/esm2_t6_8M_UR50D""")
_snake_case : List[str] = tf.constant([[0, 1, 2, 3, 4, 5]])
_snake_case : Union[str, Any] = model(__SCREAMING_SNAKE_CASE)[0]
_snake_case : Union[str, Any] = [1, 6, 33]
self.assertEqual(list(output.numpy().shape) , __SCREAMING_SNAKE_CASE)
# compare the actual values for a slice.
_snake_case : List[str] = tf.constant(
[
[
[8.921_518, -10.589_814, -6.4_671_307],
[-6.3_967_156, -13.911_377, -1.1_211_915],
[-7.781_247, -13.951_557, -3.740_592],
]
])
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-2))
@slow
def UpperCamelCase_ ( self : Optional[Any]) -> str:
"""simple docstring"""
_snake_case : int = TFEsmModel.from_pretrained("""facebook/esm2_t6_8M_UR50D""")
_snake_case : Optional[Any] = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]])
_snake_case : Optional[Any] = model(__SCREAMING_SNAKE_CASE)[0]
# compare the actual values for a slice.
_snake_case : List[Any] = tf.constant(
[
[
[0.14_443_092, 0.54_125_327, 0.3_247_739],
[0.30_340_484, 0.00_526_676, 0.31_077_722],
[0.32_278_043, -0.24_987_096, 0.3_414_628],
]
])
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4))
| 317
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
UpperCAmelCase : List[str] = logging.get_logger(__name__)
class lowerCAmelCase__ ( a , a ):
"""simple docstring"""
lowerCAmelCase__ = "maskformer-swin"
lowerCAmelCase__ = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : str , __SCREAMING_SNAKE_CASE : Tuple=224 , __SCREAMING_SNAKE_CASE : str=4 , __SCREAMING_SNAKE_CASE : Union[str, Any]=3 , __SCREAMING_SNAKE_CASE : Optional[Any]=96 , __SCREAMING_SNAKE_CASE : Optional[Any]=[2, 2, 6, 2] , __SCREAMING_SNAKE_CASE : Any=[3, 6, 12, 24] , __SCREAMING_SNAKE_CASE : Dict=7 , __SCREAMING_SNAKE_CASE : Dict=4.0 , __SCREAMING_SNAKE_CASE : Dict=True , __SCREAMING_SNAKE_CASE : str=0.0 , __SCREAMING_SNAKE_CASE : int=0.0 , __SCREAMING_SNAKE_CASE : str=0.1 , __SCREAMING_SNAKE_CASE : List[Any]="gelu" , __SCREAMING_SNAKE_CASE : str=False , __SCREAMING_SNAKE_CASE : Optional[int]=0.02 , __SCREAMING_SNAKE_CASE : Optional[int]=1E-5 , __SCREAMING_SNAKE_CASE : Dict=None , __SCREAMING_SNAKE_CASE : Dict=None , **__SCREAMING_SNAKE_CASE : Tuple , ) -> Tuple:
"""simple docstring"""
super().__init__(**__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = image_size
__SCREAMING_SNAKE_CASE = patch_size
__SCREAMING_SNAKE_CASE = num_channels
__SCREAMING_SNAKE_CASE = embed_dim
__SCREAMING_SNAKE_CASE = depths
__SCREAMING_SNAKE_CASE = len(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = num_heads
__SCREAMING_SNAKE_CASE = window_size
__SCREAMING_SNAKE_CASE = mlp_ratio
__SCREAMING_SNAKE_CASE = qkv_bias
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = drop_path_rate
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = use_absolute_embeddings
__SCREAMING_SNAKE_CASE = layer_norm_eps
__SCREAMING_SNAKE_CASE = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
__SCREAMING_SNAKE_CASE = int(embed_dim * 2 ** (len(__SCREAMING_SNAKE_CASE ) - 1) )
__SCREAMING_SNAKE_CASE = ["""stem"""] + [f'stage{idx}' for idx in range(1 , len(__SCREAMING_SNAKE_CASE ) + 1 )]
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = get_aligned_output_features_output_indices(
out_features=__SCREAMING_SNAKE_CASE , out_indices=__SCREAMING_SNAKE_CASE , stage_names=self.stage_names )
| 267
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a :Union[str, Any] = logging.get_logger(__name__)
a :Optional[int] = {
'''EleutherAI/gpt-neox-20b''': '''https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json''',
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class __a (a__):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :Tuple = """gpt_neox"""
def __init__( self , _a=50_432 , _a=6_144 , _a=44 , _a=64 , _a=24_576 , _a="gelu" , _a=0.25 , _a=10_000 , _a=0.0 , _a=0.0 , _a=0.1 , _a=2_048 , _a=0.02 , _a=1E-5 , _a=True , _a=0 , _a=2 , _a=False , _a=True , _a=None , **_a , ) -> Tuple:
"""simple docstring"""
super().__init__(bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = vocab_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE__ : List[str] = hidden_size
SCREAMING_SNAKE_CASE__ : Dict = num_hidden_layers
SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_attention_heads
SCREAMING_SNAKE_CASE__ : Tuple = intermediate_size
SCREAMING_SNAKE_CASE__ : Optional[int] = hidden_act
SCREAMING_SNAKE_CASE__ : Optional[int] = rotary_pct
SCREAMING_SNAKE_CASE__ : List[str] = rotary_emb_base
SCREAMING_SNAKE_CASE__ : Dict = attention_dropout
SCREAMING_SNAKE_CASE__ : Union[str, Any] = hidden_dropout
SCREAMING_SNAKE_CASE__ : Tuple = classifier_dropout
SCREAMING_SNAKE_CASE__ : Dict = initializer_range
SCREAMING_SNAKE_CASE__ : List[str] = layer_norm_eps
SCREAMING_SNAKE_CASE__ : str = use_cache
SCREAMING_SNAKE_CASE__ : Optional[int] = tie_word_embeddings
SCREAMING_SNAKE_CASE__ : Tuple = use_parallel_residual
SCREAMING_SNAKE_CASE__ : Optional[int] = rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
"""The hidden size is not divisble by the number of attention heads! Make sure to update them!""" )
def _a ( self ) -> Union[str, Any]:
"""simple docstring"""
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , lowerCAmelCase__ ) or len(self.rope_scaling ) != 2:
raise ValueError(
"""`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, """
f'''got {self.rope_scaling}''' )
SCREAMING_SNAKE_CASE__ : str = self.rope_scaling.get("""type""" , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.rope_scaling.get("""factor""" , lowerCAmelCase__ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' )
if rope_scaling_factor is None or not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) or rope_scaling_factor <= 1.0:
raise ValueError(f'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
| 362
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a :List[str] = logging.get_logger(__name__)
a :Union[str, Any] = {
"MIT/ast-finetuned-audioset-10-10-0.4593": (
"https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json"
),
}
class __a (UpperCamelCase_):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :List[str] = """audio-spectrogram-transformer"""
def __init__( self , _a=768 , _a=12 , _a=12 , _a=3_072 , _a="gelu" , _a=0.0 , _a=0.0 , _a=0.02 , _a=1E-1_2 , _a=16 , _a=True , _a=10 , _a=10 , _a=1_024 , _a=128 , **_a , ) -> List[Any]:
"""simple docstring"""
super().__init__(**_a )
SCREAMING_SNAKE_CASE__ : Dict = hidden_size
SCREAMING_SNAKE_CASE__ : Any = num_hidden_layers
SCREAMING_SNAKE_CASE__ : Optional[Any] = num_attention_heads
SCREAMING_SNAKE_CASE__ : Optional[Any] = intermediate_size
SCREAMING_SNAKE_CASE__ : Dict = hidden_act
SCREAMING_SNAKE_CASE__ : Dict = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : List[str] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : List[Any] = initializer_range
SCREAMING_SNAKE_CASE__ : Dict = layer_norm_eps
SCREAMING_SNAKE_CASE__ : List[Any] = patch_size
SCREAMING_SNAKE_CASE__ : Dict = qkv_bias
SCREAMING_SNAKE_CASE__ : Any = frequency_stride
SCREAMING_SNAKE_CASE__ : int = time_stride
SCREAMING_SNAKE_CASE__ : int = max_length
SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_mel_bins
| 56
| 0
|
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
from typing import Any, Generic, TypeVar
__SCREAMING_SNAKE_CASE :Optional[int] = TypeVar('''T''')
class A_ ( Generic[T] ):
def __init__( self : List[Any] , snake_case_ : list[T] , snake_case_ : Callable[[T, T], T] ):
_UpperCAmelCase = None
_UpperCAmelCase = len(snake_case_ )
_UpperCAmelCase = [any_type for _ in range(self.N )] + arr
_UpperCAmelCase = fnc
self.build()
def lowercase ( self : List[Any] ):
for p in range(self.N - 1 , 0 , -1 ):
_UpperCAmelCase = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def lowercase ( self : Optional[Any] , snake_case_ : int , snake_case_ : T ):
p += self.N
_UpperCAmelCase = v
while p > 1:
_UpperCAmelCase = p // 2
_UpperCAmelCase = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def lowercase ( self : Any , snake_case_ : int , snake_case_ : int ): # noqa: E741
_UpperCAmelCase , _UpperCAmelCase = l + self.N, r + self.N
_UpperCAmelCase = None
while l <= r:
if l % 2 == 1:
_UpperCAmelCase = self.st[l] if res is None else self.fn(snake_case_ , self.st[l] )
if r % 2 == 0:
_UpperCAmelCase = self.st[r] if res is None else self.fn(snake_case_ , self.st[r] )
_UpperCAmelCase , _UpperCAmelCase = (l + 1) // 2, (r - 1) // 2
return res
if __name__ == "__main__":
from functools import reduce
__SCREAMING_SNAKE_CASE :Union[str, Any] = [1, 10, -2, 9, -3, 8, 4, -7, 5, 6, 11, -12]
__SCREAMING_SNAKE_CASE :List[str] = {
0: 7,
1: 2,
2: 6,
3: -14,
4: 5,
5: 4,
6: 7,
7: -10,
8: 9,
9: 10,
10: 12,
11: 1,
}
__SCREAMING_SNAKE_CASE :Any = SegmentTree(test_array, min)
__SCREAMING_SNAKE_CASE :Any = SegmentTree(test_array, max)
__SCREAMING_SNAKE_CASE :Any = SegmentTree(test_array, lambda a, b: a + b)
def UpperCAmelCase_ ( ) -> None:
'''simple docstring'''
for i in range(len(__lowercase ) ):
for j in range(__lowercase , len(__lowercase ) ):
_UpperCAmelCase = reduce(__lowercase , test_array[i : j + 1] )
_UpperCAmelCase = reduce(__lowercase , test_array[i : j + 1] )
_UpperCAmelCase = reduce(lambda __lowercase , __lowercase : a + b , test_array[i : j + 1] )
assert min_range == min_segment_tree.query(__lowercase , __lowercase )
assert max_range == max_segment_tree.query(__lowercase , __lowercase )
assert sum_range == sum_segment_tree.query(__lowercase , __lowercase )
test_all_segments()
for index, value in test_updates.items():
__SCREAMING_SNAKE_CASE :str = value
min_segment_tree.update(index, value)
max_segment_tree.update(index, value)
sum_segment_tree.update(index, value)
test_all_segments()
| 22
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCamelCase : List[str] = {"""configuration_vit_msn""": ["""VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ViTMSNConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : List[Any] = [
"""VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ViTMSNModel""",
"""ViTMSNForImageClassification""",
"""ViTMSNPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
_lowerCamelCase : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 14
| 0
|
'''simple docstring'''
import numpy as np
from cva import destroyAllWindows, imread, imshow, waitKey
class __UpperCAmelCase :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple:
if dst_width < 0 or dst_height < 0:
raise ValueError('''Destination width/height should be > 0''' )
A_ = img
A_ = img.shape[1]
A_ = img.shape[0]
A_ = dst_width
A_ = dst_height
A_ = self.src_w / self.dst_w
A_ = self.src_h / self.dst_h
A_ = A_ = (
np.ones((self.dst_h, self.dst_w, 3) , np.uinta ) * 255
)
def __A ( self ) -> Optional[Any]:
for i in range(self.dst_h ):
for j in range(self.dst_w ):
A_ = self.img[self.get_y(_SCREAMING_SNAKE_CASE )][self.get_x(_SCREAMING_SNAKE_CASE )]
def __A ( self , _SCREAMING_SNAKE_CASE ) -> int:
return int(self.ratio_x * x )
def __A ( self , _SCREAMING_SNAKE_CASE ) -> int:
return int(self.ratio_y * y )
if __name__ == "__main__":
__snake_case : List[str] = 800, 600
__snake_case : int = imread('image_data/lena.jpg', 1)
__snake_case : Union[str, Any] = NearestNeighbour(im, dst_w, dst_h)
n.process()
imshow(
F"""Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}""", n.output
)
waitKey(0)
destroyAllWindows()
| 365
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case : List[str] = logging.get_logger(__name__)
__snake_case : Union[str, Any] = {
'alibaba-damo/mgp-str-base': 'https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json',
}
class __UpperCAmelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowercase : Optional[int] = 'mgp-str'
def __init__( self , _SCREAMING_SNAKE_CASE=[32, 128] , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=27 , _SCREAMING_SNAKE_CASE=38 , _SCREAMING_SNAKE_CASE=5_0257 , _SCREAMING_SNAKE_CASE=3_0522 , _SCREAMING_SNAKE_CASE=768 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=4.0 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=1E-5 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=0.02 , **_SCREAMING_SNAKE_CASE , ) -> List[Any]:
super().__init__(**_SCREAMING_SNAKE_CASE )
A_ = image_size
A_ = patch_size
A_ = num_channels
A_ = max_token_length
A_ = num_character_labels
A_ = num_bpe_labels
A_ = num_wordpiece_labels
A_ = hidden_size
A_ = num_hidden_layers
A_ = num_attention_heads
A_ = mlp_ratio
A_ = distilled
A_ = layer_norm_eps
A_ = drop_rate
A_ = qkv_bias
A_ = attn_drop_rate
A_ = drop_path_rate
A_ = output_aa_attentions
A_ = initializer_range
| 18
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_a = {
'configuration_bridgetower': [
'BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BridgeTowerConfig',
'BridgeTowerTextConfig',
'BridgeTowerVisionConfig',
],
'processing_bridgetower': ['BridgeTowerProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = ['BridgeTowerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = [
'BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST',
'BridgeTowerForContrastiveLearning',
'BridgeTowerForImageAndTextRetrieval',
'BridgeTowerForMaskedLM',
'BridgeTowerModel',
'BridgeTowerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_bridgetower import (
BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP,
BridgeTowerConfig,
BridgeTowerTextConfig,
BridgeTowerVisionConfig,
)
from .processing_bridgetower import BridgeTowerProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_bridgetower import BridgeTowerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bridgetower import (
BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST,
BridgeTowerForContrastiveLearning,
BridgeTowerForImageAndTextRetrieval,
BridgeTowerForMaskedLM,
BridgeTowerModel,
BridgeTowerPreTrainedModel,
)
else:
import sys
_a = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 17
|
"""simple docstring"""
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
_a = datasets.utils.logging.get_logger(__name__)
_a = ['names', 'prefix']
_a = ['warn_bad_lines', 'error_bad_lines', 'mangle_dupe_cols']
_a = ['encoding_errors', 'on_bad_lines']
_a = ['date_format']
@dataclass
class _lowerCAmelCase ( datasets.BuilderConfig ):
"""simple docstring"""
__UpperCAmelCase : str = ","
__UpperCAmelCase : Optional[str] = None
__UpperCAmelCase : Optional[Union[int, List[int], str]] = "infer"
__UpperCAmelCase : Optional[List[str]] = None
__UpperCAmelCase : Optional[List[str]] = None
__UpperCAmelCase : Optional[Union[int, str, List[int], List[str]]] = None
__UpperCAmelCase : Optional[Union[List[int], List[str]]] = None
__UpperCAmelCase : Optional[str] = None
__UpperCAmelCase : bool = True
__UpperCAmelCase : Optional[Literal["c", "python", "pyarrow"]] = None
__UpperCAmelCase : Dict[Union[int, str], Callable[[Any], Any]] = None
__UpperCAmelCase : Optional[list] = None
__UpperCAmelCase : Optional[list] = None
__UpperCAmelCase : bool = False
__UpperCAmelCase : Optional[Union[int, List[int]]] = None
__UpperCAmelCase : Optional[int] = None
__UpperCAmelCase : Optional[Union[str, List[str]]] = None
__UpperCAmelCase : bool = True
__UpperCAmelCase : bool = True
__UpperCAmelCase : bool = False
__UpperCAmelCase : bool = True
__UpperCAmelCase : Optional[str] = None
__UpperCAmelCase : str = "."
__UpperCAmelCase : Optional[str] = None
__UpperCAmelCase : str = '"'
__UpperCAmelCase : int = 0
__UpperCAmelCase : Optional[str] = None
__UpperCAmelCase : Optional[str] = None
__UpperCAmelCase : Optional[str] = None
__UpperCAmelCase : Optional[str] = None
__UpperCAmelCase : bool = True
__UpperCAmelCase : bool = True
__UpperCAmelCase : int = 0
__UpperCAmelCase : bool = True
__UpperCAmelCase : bool = False
__UpperCAmelCase : Optional[str] = None
__UpperCAmelCase : int = 1_0_0_0_0
__UpperCAmelCase : Optional[datasets.Features] = None
__UpperCAmelCase : Optional[str] = "strict"
__UpperCAmelCase : Literal["error", "warn", "skip"] = "error"
__UpperCAmelCase : Optional[str] = None
def _lowercase ( self : Tuple ):
if self.delimiter is not None:
__lowercase = self.delimiter
if self.column_names is not None:
__lowercase = self.column_names
@property
def _lowercase ( self : Union[str, Any] ):
__lowercase = {
"sep": self.sep,
"header": self.header,
"names": self.names,
"index_col": self.index_col,
"usecols": self.usecols,
"prefix": self.prefix,
"mangle_dupe_cols": self.mangle_dupe_cols,
"engine": self.engine,
"converters": self.converters,
"true_values": self.true_values,
"false_values": self.false_values,
"skipinitialspace": self.skipinitialspace,
"skiprows": self.skiprows,
"nrows": self.nrows,
"na_values": self.na_values,
"keep_default_na": self.keep_default_na,
"na_filter": self.na_filter,
"verbose": self.verbose,
"skip_blank_lines": self.skip_blank_lines,
"thousands": self.thousands,
"decimal": self.decimal,
"lineterminator": self.lineterminator,
"quotechar": self.quotechar,
"quoting": self.quoting,
"escapechar": self.escapechar,
"comment": self.comment,
"encoding": self.encoding,
"dialect": self.dialect,
"error_bad_lines": self.error_bad_lines,
"warn_bad_lines": self.warn_bad_lines,
"skipfooter": self.skipfooter,
"doublequote": self.doublequote,
"memory_map": self.memory_map,
"float_precision": self.float_precision,
"chunksize": self.chunksize,
"encoding_errors": self.encoding_errors,
"on_bad_lines": self.on_bad_lines,
"date_format": self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig(), UpperCAmelCase__ ):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class _lowerCAmelCase ( datasets.ArrowBasedBuilder ):
"""simple docstring"""
__UpperCAmelCase : Tuple = CsvConfig
def _lowercase ( self : List[str] ):
return datasets.DatasetInfo(features=self.config.features )
def _lowercase ( self : List[Any], UpperCAmelCase__ : Dict ):
if not self.config.data_files:
raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
__lowercase = dl_manager.download_and_extract(self.config.data_files )
if isinstance(UpperCAmelCase__, (str, list, tuple) ):
__lowercase = data_files
if isinstance(UpperCAmelCase__, UpperCAmelCase__ ):
__lowercase = [files]
__lowercase = [dl_manager.iter_files(UpperCAmelCase__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"files": files} )]
__lowercase = []
for split_name, files in data_files.items():
if isinstance(UpperCAmelCase__, UpperCAmelCase__ ):
__lowercase = [files]
__lowercase = [dl_manager.iter_files(UpperCAmelCase__ ) for file in files]
splits.append(datasets.SplitGenerator(name=UpperCAmelCase__, gen_kwargs={"files": files} ) )
return splits
def _lowercase ( self : Dict, UpperCAmelCase__ : pa.Table ):
if self.config.features is not None:
__lowercase = self.config.features.arrow_schema
if all(not require_storage_cast(UpperCAmelCase__ ) for feature in self.config.features.values() ):
# cheaper cast
__lowercase = pa.Table.from_arrays([pa_table[field.name] for field in schema], schema=UpperCAmelCase__ )
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
__lowercase = table_cast(UpperCAmelCase__, UpperCAmelCase__ )
return pa_table
def _lowercase ( self : Optional[Any], UpperCAmelCase__ : List[str] ):
__lowercase = self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
__lowercase = (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(UpperCAmelCase__ ) else object
for name, dtype, feature in zip(schema.names, schema.types, self.config.features.values() )
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(UpperCAmelCase__ ) ):
__lowercase = pd.read_csv(UpperCAmelCase__, iterator=UpperCAmelCase__, dtype=UpperCAmelCase__, **self.config.pd_read_csv_kwargs )
try:
for batch_idx, df in enumerate(UpperCAmelCase__ ):
__lowercase = pa.Table.from_pandas(UpperCAmelCase__ )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(UpperCAmelCase__ )
except ValueError as e:
logger.error(F"""Failed to read file '{file}' with error {type(UpperCAmelCase__ )}: {e}""" )
raise
| 17
| 1
|
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mobilebert import MobileBertTokenizer
__UpperCamelCase : List[Any] = logging.get_logger(__name__)
__UpperCamelCase : Any = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
__UpperCamelCase : Dict = {
"vocab_file": {"mobilebert-uncased": "https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt"},
"tokenizer_file": {
"mobilebert-uncased": "https://huggingface.co/google/mobilebert-uncased/resolve/main/tokenizer.json"
},
}
__UpperCamelCase : Dict = {"mobilebert-uncased": 512}
__UpperCamelCase : Tuple = {}
class a ( UpperCamelCase_ ):
snake_case__ = VOCAB_FILES_NAMES
snake_case__ = PRETRAINED_VOCAB_FILES_MAP
snake_case__ = PRETRAINED_INIT_CONFIGURATION
snake_case__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ = MobileBertTokenizer
def __init__( self , _snake_case=None , _snake_case=None , _snake_case=True , _snake_case="[UNK]" , _snake_case="[SEP]" , _snake_case="[PAD]" , _snake_case="[CLS]" , _snake_case="[MASK]" , _snake_case=True , _snake_case=None , **_snake_case , ):
"""simple docstring"""
super().__init__(
_a , tokenizer_file=_a , do_lower_case=_a , unk_token=_a , sep_token=_a , pad_token=_a , cls_token=_a , mask_token=_a , tokenize_chinese_chars=_a , strip_accents=_a , **_a , )
lowerCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , _a ) != do_lower_case
or normalizer_state.get('strip_accents' , _a ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , _a ) != tokenize_chinese_chars
):
lowerCAmelCase = getattr(_a , normalizer_state.pop('type' ) )
lowerCAmelCase = do_lower_case
lowerCAmelCase = strip_accents
lowerCAmelCase = tokenize_chinese_chars
lowerCAmelCase = normalizer_class(**_a )
lowerCAmelCase = do_lower_case
def UpperCamelCase__ ( self , _snake_case , _snake_case=None ):
"""simple docstring"""
lowerCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase__ ( self , _snake_case , _snake_case = None ):
"""simple docstring"""
lowerCAmelCase = [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase__ ( self , _snake_case , _snake_case = None ):
"""simple docstring"""
lowerCAmelCase = self._tokenizer.model.save(_a , name=_a )
return tuple(_a )
| 362
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_poolformer import PoolFormerImageProcessor
__UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
class a ( a__ ):
def __init__( self , *_snake_case , **_snake_case ):
"""simple docstring"""
warnings.warn(
'The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use PoolFormerImageProcessor instead.' , _snake_case , )
super().__init__(*_snake_case , **_snake_case )
| 309
| 0
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_a = logging.get_logger(__name__)
_a = {'vocab_file': 'spm_char.model'}
_a = {
'vocab_file': {
'microsoft/speecht5_asr': 'https://huggingface.co/microsoft/speecht5_asr/resolve/main/spm_char.model',
'microsoft/speecht5_tts': 'https://huggingface.co/microsoft/speecht5_tts/resolve/main/spm_char.model',
'microsoft/speecht5_vc': 'https://huggingface.co/microsoft/speecht5_vc/resolve/main/spm_char.model',
}
}
_a = {
'microsoft/speecht5_asr': 10_24,
'microsoft/speecht5_tts': 10_24,
'microsoft/speecht5_vc': 10_24,
}
class _lowerCAmelCase ( UpperCAmelCase__ ):
"""simple docstring"""
__UpperCAmelCase : str = VOCAB_FILES_NAMES
__UpperCAmelCase : List[Any] = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : List[str] = ['input_ids', 'attention_mask']
def __init__( self : int, UpperCAmelCase__ : Tuple, UpperCAmelCase__ : Optional[int]="<s>", UpperCAmelCase__ : Optional[Any]="</s>", UpperCAmelCase__ : int="<unk>", UpperCAmelCase__ : Tuple="<pad>", UpperCAmelCase__ : Optional[Dict[str, Any]] = None, **UpperCAmelCase__ : Optional[int], ):
__lowercase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=UpperCAmelCase__, eos_token=UpperCAmelCase__, unk_token=UpperCAmelCase__, pad_token=UpperCAmelCase__, sp_model_kwargs=self.sp_model_kwargs, **UpperCAmelCase__, )
__lowercase = vocab_file
__lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCAmelCase__ )
@property
def _lowercase ( self : int ):
return self.sp_model.get_piece_size()
def _lowercase ( self : List[str] ):
__lowercase = {self.convert_ids_to_tokens(UpperCAmelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : List[str] ):
__lowercase = self.__dict__.copy()
__lowercase = None
return state
def __setstate__( self : Any, UpperCAmelCase__ : Any ):
__lowercase = d
# for backward compatibility
if not hasattr(self, "sp_model_kwargs" ):
__lowercase = {}
__lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _lowercase ( self : Optional[Any], UpperCAmelCase__ : str ):
return self.sp_model.encode(UpperCAmelCase__, out_type=UpperCAmelCase__ )
def _lowercase ( self : Optional[int], UpperCAmelCase__ : List[Any] ):
return self.sp_model.piece_to_id(UpperCAmelCase__ )
def _lowercase ( self : Union[str, Any], UpperCAmelCase__ : Any ):
__lowercase = self.sp_model.IdToPiece(UpperCAmelCase__ )
return token
def _lowercase ( self : str, UpperCAmelCase__ : Dict ):
__lowercase = []
__lowercase = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(UpperCAmelCase__ ) + token
__lowercase = []
else:
current_sub_tokens.append(UpperCAmelCase__ )
out_string += self.sp_model.decode(UpperCAmelCase__ )
return out_string.strip()
def _lowercase ( self : Any, UpperCAmelCase__ : Tuple, UpperCAmelCase__ : List[Any]=None ):
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def _lowercase ( self : Optional[int], UpperCAmelCase__ : List[int], UpperCAmelCase__ : Optional[List[int]] = None, UpperCAmelCase__ : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase__, token_ids_a=UpperCAmelCase__, already_has_special_tokens=UpperCAmelCase__ )
__lowercase = [1]
if token_ids_a is None:
return ([0] * len(UpperCAmelCase__ )) + suffix_ones
return ([0] * len(UpperCAmelCase__ )) + ([0] * len(UpperCAmelCase__ )) + suffix_ones
def _lowercase ( self : str, UpperCAmelCase__ : str, UpperCAmelCase__ : Optional[str] = None ):
if not os.path.isdir(UpperCAmelCase__ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
__lowercase = os.path.join(
UpperCAmelCase__, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file, UpperCAmelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCAmelCase__, "wb" ) as fi:
__lowercase = self.sp_model.serialized_model_proto()
fi.write(UpperCAmelCase__ )
return (out_vocab_file,)
| 17
|
"""simple docstring"""
import functools
import gc
import inspect
import torch
from .imports import is_npu_available, is_xpu_available
def __magic_name__ ( *lowercase ):
if not isinstance(lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: Optional[Any] =list(lowercase )
for i in range(len(lowercase ) ):
SCREAMING_SNAKE_CASE_: Optional[Any] =None
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
return objects
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: List[Any] =[
"""CUDA out of memory.""", # CUDA OOM
"""cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.""", # CUDNN SNAFU
"""DefaultCPUAllocator: can't allocate memory""", # CPU OOM
]
if isinstance(lowercase , lowercase ) and len(exception.args ) == 1:
return any(err in exception.args[0] for err in _statements )
return False
def __magic_name__ ( lowercase = None , lowercase = 128 ):
if function is None:
return functools.partial(lowercase , starting_batch_size=lowercase )
SCREAMING_SNAKE_CASE_: str =starting_batch_size
def decorator(*lowercase , **lowercase ):
nonlocal batch_size
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
SCREAMING_SNAKE_CASE_: Optional[int] =list(inspect.signature(lowercase ).parameters.keys() )
# Guard against user error
if len(lowercase ) < (len(lowercase ) + 1):
SCREAMING_SNAKE_CASE_: List[Any] =""", """.join([f'''{arg}={value}''' for arg, value in zip(params[1:] , args[1:] )] )
raise TypeError(
f'''Batch size was passed into `{function.__name__}` as the first argument when called.'''
f'''Remove this as the decorator already does so: `{function.__name__}({arg_str})`''' )
while True:
if batch_size == 0:
raise RuntimeError("""No executable batch size found, reached zero.""" )
try:
return function(lowercase , *lowercase , **lowercase )
except Exception as e:
if should_reduce_batch_size(lowercase ):
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
batch_size //= 2
else:
raise
return decorator
| 173
| 0
|
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def SCREAMING_SNAKE_CASE ( ) -> Tuple:
UpperCAmelCase_ = {
'''repo_name''': ['''test_repo1''', '''test_repo2''', '''test_repo3'''],
'''path''': ['''test_1.py''', '''test_2.py''', '''unit_test.py'''],
'''content''': ['''a ''' * 20, '''a ''' * 30, '''b ''' * 7],
}
UpperCAmelCase_ = Dataset.from_dict(__snake_case )
return dataset
class a ( lowerCamelCase__ ):
'''simple docstring'''
def lowerCamelCase_ ( self : List[str] ):
UpperCAmelCase_ = get_dataset()
UpperCAmelCase_ = make_duplicate_clusters(__snake_case , 0.85 )
self.assertEqual(len(duplicate_clusters[0] ) , 2 )
def lowerCamelCase_ ( self : Optional[Any] ):
UpperCAmelCase_ = get_dataset()
UpperCAmelCase_ , UpperCAmelCase_ = deduplicate_dataset(__snake_case )
self.assertEqual(len(__snake_case ) , 2 )
print(__snake_case )
self.assertEqual(duplicate_clusters[0][0]['''copies'''] , 2 )
self.assertEqual(duplicate_clusters[0][0]['''is_extreme'''] , __snake_case )
| 356
|
import numpy as np
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : np.array ) -> np.array:
return (2 / (1 + np.exp(-2 * vector ))) - 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 177
| 0
|
"""simple docstring"""
from __future__ import annotations
from random import random
from typing import Generic, TypeVar
_UpperCamelCase : Optional[int] = TypeVar("KT")
_UpperCamelCase : Tuple = TypeVar("VT")
class UpperCAmelCase_ ( Generic[KT, VT]):
def __init__( self , a = "root" , a = None ) -> Dict:
lowercase__ : str = key
lowercase__ : int = value
lowercase__ : list[Node[KT, VT]] = []
def __repr__( self ) -> str:
return f"""Node({self.key}: {self.value})"""
@property
def _UpperCAmelCase ( self ) -> int:
return len(self.forward )
class UpperCAmelCase_ ( Generic[KT, VT]):
def __init__( self , a = 0.5 , a = 1_6 ) -> Any:
lowercase__ : Node[KT, VT] = Node[KT, VT]()
lowercase__ : Any = 0
lowercase__ : int = p
lowercase__ : Optional[Any] = max_level
def __str__( self ) -> str:
lowercase__ : Any = list(self )
if len(a ) == 0:
return f"""SkipList(level={self.level})"""
lowercase__ : Optional[int] = max((len(str(a ) ) for item in items) , default=4 )
lowercase__ : Dict = max(a , 4 ) + 4
lowercase__ : Dict = self.head
lowercase__ : Tuple = []
lowercase__ : Any = node.forward.copy()
lines.append(f"""[{node.key}]""".ljust(a , '-' ) + '* ' * len(a ) )
lines.append(' ' * label_size + '| ' * len(a ) )
while len(node.forward ) != 0:
lowercase__ : Union[str, Any] = node.forward[0]
lines.append(
f"""[{node.key}]""".ljust(a , '-' )
+ ' '.join(str(n.key ) if n.key == node.key else '|' for n in forwards ) )
lines.append(' ' * label_size + '| ' * len(a ) )
lowercase__ : str = node.forward
lines.append('None'.ljust(a ) + '* ' * len(a ) )
return f"""SkipList(level={self.level})\n""" + "\n".join(a )
def __iter__( self ) -> Any:
lowercase__ : Dict = self.head
while len(node.forward ) != 0:
yield node.forward[0].key
lowercase__ : List[str] = node.forward[0]
def _UpperCAmelCase ( self ) -> int:
lowercase__ : List[Any] = 1
while random() < self.p and level < self.max_level:
level += 1
return level
def _UpperCAmelCase ( self , a ) -> tuple[Node[KT, VT] | None, list[Node[KT, VT]]]:
lowercase__ : Optional[int] = []
lowercase__ : Tuple = self.head
for i in reversed(range(self.level ) ):
# i < node.level - When node level is lesser than `i` decrement `i`.
# node.forward[i].key < key - Jumping to node with key value higher
# or equal to searched key would result
# in skipping searched key.
while i < node.level and node.forward[i].key < key:
lowercase__ : Dict = node.forward[i]
# Each leftmost node (relative to searched node) will potentially have to
# be updated.
update_vector.append(a )
update_vector.reverse() # Note that we were inserting values in reverse order.
# len(node.forward) != 0 - If current node doesn't contain any further
# references then searched key is not present.
# node.forward[0].key == key - Next node key should be equal to search key
# if key is present.
if len(node.forward ) != 0 and node.forward[0].key == key:
return node.forward[0], update_vector
else:
return None, update_vector
def _UpperCAmelCase ( self , a ) -> Dict:
lowercase__ , lowercase__ : Optional[Any] = self._locate_node(a )
if node is not None:
for i, update_node in enumerate(a ):
# Remove or replace all references to removed node.
if update_node.level > i and update_node.forward[i].key == key:
if node.level > i:
lowercase__ : List[str] = node.forward[i]
else:
lowercase__ : Optional[int] = update_node.forward[:i]
def _UpperCAmelCase ( self , a , a ) -> Optional[int]:
lowercase__ , lowercase__ : str = self._locate_node(a )
if node is not None:
lowercase__ : Optional[int] = value
else:
lowercase__ : List[str] = self.random_level()
if level > self.level:
# After level increase we have to add additional nodes to head.
for _ in range(self.level - 1 , a ):
update_vector.append(self.head )
lowercase__ : List[Any] = level
lowercase__ : Tuple = Node(a , a )
for i, update_node in enumerate(update_vector[:level] ):
# Change references to pass through new node.
if update_node.level > i:
new_node.forward.append(update_node.forward[i] )
if update_node.level < i + 1:
update_node.forward.append(a )
else:
lowercase__ : str = new_node
def _UpperCAmelCase ( self , a ) -> VT | None:
lowercase__ , lowercase__ : Optional[Any] = self._locate_node(a )
if node is not None:
return node.value
return None
def a_ ( ):
'''simple docstring'''
lowercase__ : Optional[Any] = SkipList()
skip_list.insert('Key1' , 3 )
skip_list.insert('Key2' , 12 )
skip_list.insert('Key3' , 41 )
skip_list.insert('Key4' , -19 )
lowercase__ : Dict = skip_list.head
lowercase__ : Dict = {}
while node.level != 0:
lowercase__ : Dict = node.forward[0]
lowercase__ : int = node.value
assert len(_lowerCAmelCase ) == 4
assert all_values["Key1"] == 3
assert all_values["Key2"] == 12
assert all_values["Key3"] == 41
assert all_values["Key4"] == -19
def a_ ( ):
'''simple docstring'''
lowercase__ : Optional[Any] = SkipList()
skip_list.insert('Key1' , 10 )
skip_list.insert('Key1' , 12 )
skip_list.insert('Key5' , 7 )
skip_list.insert('Key7' , 10 )
skip_list.insert('Key10' , 5 )
skip_list.insert('Key7' , 7 )
skip_list.insert('Key5' , 5 )
skip_list.insert('Key10' , 10 )
lowercase__ : Dict = skip_list.head
lowercase__ : Tuple = {}
while node.level != 0:
lowercase__ : Any = node.forward[0]
lowercase__ : Optional[int] = node.value
if len(_lowerCAmelCase ) != 4:
print()
assert len(_lowerCAmelCase ) == 4
assert all_values["Key1"] == 12
assert all_values["Key7"] == 7
assert all_values["Key5"] == 5
assert all_values["Key10"] == 10
def a_ ( ):
'''simple docstring'''
lowercase__ : Union[str, Any] = SkipList()
assert skip_list.find('Some key' ) is None
def a_ ( ):
'''simple docstring'''
lowercase__ : List[str] = SkipList()
skip_list.insert('Key2' , 20 )
assert skip_list.find('Key2' ) == 20
skip_list.insert('Some Key' , 10 )
skip_list.insert('Key2' , 8 )
skip_list.insert('V' , 13 )
assert skip_list.find('Y' ) is None
assert skip_list.find('Key2' ) == 8
assert skip_list.find('Some Key' ) == 10
assert skip_list.find('V' ) == 13
def a_ ( ):
'''simple docstring'''
lowercase__ : str = SkipList()
skip_list.delete('Some key' )
assert len(skip_list.head.forward ) == 0
def a_ ( ):
'''simple docstring'''
lowercase__ : Optional[Any] = SkipList()
skip_list.insert('Key1' , 12 )
skip_list.insert('V' , 13 )
skip_list.insert('X' , 14 )
skip_list.insert('Key2' , 15 )
skip_list.delete('V' )
skip_list.delete('Key2' )
assert skip_list.find('V' ) is None
assert skip_list.find('Key2' ) is None
def a_ ( ):
'''simple docstring'''
lowercase__ : List[str] = SkipList()
skip_list.insert('Key1' , 12 )
skip_list.insert('V' , 13 )
skip_list.insert('X' , 14 )
skip_list.insert('Key2' , 15 )
skip_list.delete('V' )
assert skip_list.find('V' ) is None
assert skip_list.find('X' ) == 14
assert skip_list.find('Key1' ) == 12
assert skip_list.find('Key2' ) == 15
skip_list.delete('X' )
assert skip_list.find('V' ) is None
assert skip_list.find('X' ) is None
assert skip_list.find('Key1' ) == 12
assert skip_list.find('Key2' ) == 15
skip_list.delete('Key1' )
assert skip_list.find('V' ) is None
assert skip_list.find('X' ) is None
assert skip_list.find('Key1' ) is None
assert skip_list.find('Key2' ) == 15
skip_list.delete('Key2' )
assert skip_list.find('V' ) is None
assert skip_list.find('X' ) is None
assert skip_list.find('Key1' ) is None
assert skip_list.find('Key2' ) is None
def a_ ( ):
'''simple docstring'''
lowercase__ : Union[str, Any] = SkipList()
skip_list.insert('Key1' , 12 )
skip_list.insert('V' , 13 )
skip_list.insert('X' , 142 )
skip_list.insert('Key2' , 15 )
skip_list.delete('X' )
def traverse_keys(_lowerCAmelCase : Tuple ):
yield node.key
for forward_node in node.forward:
yield from traverse_keys(_lowerCAmelCase )
assert len(set(traverse_keys(skip_list.head ) ) ) == 4
def a_ ( ):
'''simple docstring'''
def is_sorted(_lowerCAmelCase : Dict ):
return all(next_item >= item for item, next_item in zip(_lowerCAmelCase , lst[1:] ) )
lowercase__ : int = SkipList()
for i in range(10 ):
skip_list.insert(_lowerCAmelCase , _lowerCAmelCase )
assert is_sorted(list(_lowerCAmelCase ) )
skip_list.delete(5 )
skip_list.delete(8 )
skip_list.delete(2 )
assert is_sorted(list(_lowerCAmelCase ) )
skip_list.insert(-12 , -12 )
skip_list.insert(77 , 77 )
assert is_sorted(list(_lowerCAmelCase ) )
def a_ ( ):
'''simple docstring'''
for _ in range(100 ):
# Repeat test 100 times due to the probabilistic nature of skip list
# random values == random bugs
test_insert()
test_insert_overrides_existing_value()
test_searching_empty_list_returns_none()
test_search()
test_deleting_item_from_empty_list_do_nothing()
test_deleted_items_are_not_founded_by_find_method()
test_delete_removes_only_given_key()
test_delete_doesnt_leave_dead_nodes()
test_iter_always_yields_sorted_values()
def a_ ( ):
'''simple docstring'''
lowercase__ : Union[str, Any] = SkipList()
skip_list.insert(2 , '2' )
skip_list.insert(4 , '4' )
skip_list.insert(6 , '4' )
skip_list.insert(4 , '5' )
skip_list.insert(8 , '4' )
skip_list.insert(9 , '4' )
skip_list.delete(4 )
print(_lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 77
|
'''simple docstring'''
import gc
import unittest
import torch
from parameterized import parameterized
from diffusers import AutoencoderKL
from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class a ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
snake_case_ = AutoencoderKL
snake_case_ = "sample"
snake_case_ = 1e-2
@property
def A_ ( self : Dict ):
snake_case_ = 4
snake_case_ = 3
snake_case_ = (32, 32)
snake_case_ = floats_tensor((batch_size, num_channels) + sizes ).to(lowercase_ )
return {"sample": image}
@property
def A_ ( self : List[Any] ):
return (3, 32, 32)
@property
def A_ ( self : Dict ):
return (3, 32, 32)
def A_ ( self : Union[str, Any] ):
snake_case_ = {
'''block_out_channels''': [32, 64],
'''in_channels''': 3,
'''out_channels''': 3,
'''down_block_types''': ['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''],
'''up_block_types''': ['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''],
'''latent_channels''': 4,
}
snake_case_ = self.dummy_input
return init_dict, inputs_dict
def A_ ( self : Any ):
pass
def A_ ( self : str ):
pass
@unittest.skipIf(torch_device == '''mps''' , '''Gradient checkpointing skipped on MPS''' )
def A_ ( self : Dict ):
# enable deterministic behavior for gradient checkpointing
snake_case_ ,snake_case_ = self.prepare_init_args_and_inputs_for_common()
snake_case_ = self.model_class(**lowercase_ )
model.to(lowercase_ )
assert not model.is_gradient_checkpointing and model.training
snake_case_ = model(**lowercase_ ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model.zero_grad()
snake_case_ = torch.randn_like(lowercase_ )
snake_case_ = (out - labels).mean()
loss.backward()
# re-instantiate the model now enabling gradient checkpointing
snake_case_ = self.model_class(**lowercase_ )
# clone model
model_a.load_state_dict(model.state_dict() )
model_a.to(lowercase_ )
model_a.enable_gradient_checkpointing()
assert model_a.is_gradient_checkpointing and model_a.training
snake_case_ = model_a(**lowercase_ ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model_a.zero_grad()
snake_case_ = (out_a - labels).mean()
loss_a.backward()
# compare the output and parameters gradients
self.assertTrue((loss - loss_a).abs() < 1e-5 )
snake_case_ = dict(model.named_parameters() )
snake_case_ = dict(model_a.named_parameters() )
for name, param in named_params.items():
self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5e-5 ) )
def A_ ( self : Tuple ):
snake_case_ ,snake_case_ = AutoencoderKL.from_pretrained('''fusing/autoencoder-kl-dummy''' , output_loading_info=lowercase_ )
self.assertIsNotNone(lowercase_ )
self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 )
model.to(lowercase_ )
snake_case_ = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def A_ ( self : Tuple ):
snake_case_ = AutoencoderKL.from_pretrained('''fusing/autoencoder-kl-dummy''' )
snake_case_ = model.to(lowercase_ )
model.eval()
if torch_device == "mps":
snake_case_ = torch.manual_seed(0 )
else:
snake_case_ = torch.Generator(device=lowercase_ ).manual_seed(0 )
snake_case_ = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
snake_case_ = image.to(lowercase_ )
with torch.no_grad():
snake_case_ = model(lowercase_ , sample_posterior=lowercase_ , generator=lowercase_ ).sample
snake_case_ = output[0, -1, -3:, -3:].flatten().cpu()
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
if torch_device == "mps":
snake_case_ = torch.tensor(
[
-4.0_078e-01,
-3.8_323e-04,
-1.2_681e-01,
-1.1_462e-01,
2.0_095e-01,
1.0_893e-01,
-8.8_247e-02,
-3.0_361e-01,
-9.8_644e-03,
] )
elif torch_device == "cpu":
snake_case_ = torch.tensor(
[-0.1352, 0.0878, 0.0419, -0.0818, -0.1069, 0.0688, -0.1458, -0.4446, -0.0026] )
else:
snake_case_ = torch.tensor(
[-0.2421, 0.4642, 0.2507, -0.0438, 0.0682, 0.3160, -0.2018, -0.0727, 0.2485] )
self.assertTrue(torch_all_close(lowercase_ , lowercase_ , rtol=1e-2 ) )
@slow
class a ( unittest.TestCase ):
def A_ ( self : Union[str, Any] , lowercase_ : Optional[Any] , lowercase_ : Optional[int] ):
return F"gaussian_noise_s={seed}_shape={'_'.join([str(lowercase_ ) for s in shape] )}.npy"
def A_ ( self : Any ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A_ ( self : Dict , lowercase_ : List[Any]=0 , lowercase_ : Union[str, Any]=(4, 3, 512, 512) , lowercase_ : Optional[Any]=False ):
snake_case_ = torch.floataa if fpaa else torch.floataa
snake_case_ = torch.from_numpy(load_hf_numpy(self.get_file_format(lowercase_ , lowercase_ ) ) ).to(lowercase_ ).to(lowercase_ )
return image
def A_ ( self : Any , lowercase_ : Dict="CompVis/stable-diffusion-v1-4" , lowercase_ : List[str]=False ):
snake_case_ = '''fp16''' if fpaa else None
snake_case_ = torch.floataa if fpaa else torch.floataa
snake_case_ = AutoencoderKL.from_pretrained(
lowercase_ , subfolder='''vae''' , torch_dtype=lowercase_ , revision=lowercase_ , )
model.to(lowercase_ ).eval()
return model
def A_ ( self : Any , lowercase_ : int=0 ):
if torch_device == "mps":
return torch.manual_seed(lowercase_ )
return torch.Generator(device=lowercase_ ).manual_seed(lowercase_ )
@parameterized.expand(
[
# fmt: off
[33, [-0.1603, 0.9878, -0.0495, -0.0790, -0.2709, 0.8375, -0.2060, -0.0824], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]],
[47, [-0.2376, 0.1168, 0.1332, -0.4840, -0.2508, -0.0791, -0.0493, -0.4089], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]],
# fmt: on
] )
def A_ ( self : Union[str, Any] , lowercase_ : Tuple , lowercase_ : Tuple , lowercase_ : Tuple ):
snake_case_ = self.get_sd_vae_model()
snake_case_ = self.get_sd_image(lowercase_ )
snake_case_ = self.get_generator(lowercase_ )
with torch.no_grad():
snake_case_ = model(lowercase_ , generator=lowercase_ , sample_posterior=lowercase_ ).sample
assert sample.shape == image.shape
snake_case_ = sample[-1, -2:, -2:, :2].flatten().float().cpu()
snake_case_ = torch.tensor(expected_slice_mps if torch_device == '''mps''' else expected_slice )
assert torch_all_close(lowercase_ , lowercase_ , atol=3e-3 )
@parameterized.expand(
[
# fmt: off
[33, [-0.0513, 0.0289, 1.3799, 0.2166, -0.2573, -0.0871, 0.5103, -0.0999]],
[47, [-0.4128, -0.1320, -0.3704, 0.1965, -0.4116, -0.2332, -0.3340, 0.2247]],
# fmt: on
] )
@require_torch_gpu
def A_ ( self : Optional[int] , lowercase_ : Union[str, Any] , lowercase_ : Dict ):
snake_case_ = self.get_sd_vae_model(fpaa=lowercase_ )
snake_case_ = self.get_sd_image(lowercase_ , fpaa=lowercase_ )
snake_case_ = self.get_generator(lowercase_ )
with torch.no_grad():
snake_case_ = model(lowercase_ , generator=lowercase_ , sample_posterior=lowercase_ ).sample
assert sample.shape == image.shape
snake_case_ = sample[-1, -2:, :2, -2:].flatten().float().cpu()
snake_case_ = torch.tensor(lowercase_ )
assert torch_all_close(lowercase_ , lowercase_ , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.1609, 0.9866, -0.0487, -0.0777, -0.2716, 0.8368, -0.2055, -0.0814], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]],
[47, [-0.2377, 0.1147, 0.1333, -0.4841, -0.2506, -0.0805, -0.0491, -0.4085], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]],
# fmt: on
] )
def A_ ( self : Tuple , lowercase_ : Dict , lowercase_ : str , lowercase_ : Optional[int] ):
snake_case_ = self.get_sd_vae_model()
snake_case_ = self.get_sd_image(lowercase_ )
with torch.no_grad():
snake_case_ = model(lowercase_ ).sample
assert sample.shape == image.shape
snake_case_ = sample[-1, -2:, -2:, :2].flatten().float().cpu()
snake_case_ = torch.tensor(expected_slice_mps if torch_device == '''mps''' else expected_slice )
assert torch_all_close(lowercase_ , lowercase_ , atol=3e-3 )
@parameterized.expand(
[
# fmt: off
[13, [-0.2051, -0.1803, -0.2311, -0.2114, -0.3292, -0.3574, -0.2953, -0.3323]],
[37, [-0.2632, -0.2625, -0.2199, -0.2741, -0.4539, -0.4990, -0.3720, -0.4925]],
# fmt: on
] )
@require_torch_gpu
def A_ ( self : Dict , lowercase_ : Tuple , lowercase_ : Optional[int] ):
snake_case_ = self.get_sd_vae_model()
snake_case_ = self.get_sd_image(lowercase_ , shape=(3, 4, 64, 64) )
with torch.no_grad():
snake_case_ = model.decode(lowercase_ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
snake_case_ = sample[-1, -2:, :2, -2:].flatten().cpu()
snake_case_ = torch.tensor(lowercase_ )
assert torch_all_close(lowercase_ , lowercase_ , atol=1e-3 )
@parameterized.expand(
[
# fmt: off
[27, [-0.0369, 0.0207, -0.0776, -0.0682, -0.1747, -0.1930, -0.1465, -0.2039]],
[16, [-0.1628, -0.2134, -0.2747, -0.2642, -0.3774, -0.4404, -0.3687, -0.4277]],
# fmt: on
] )
@require_torch_gpu
def A_ ( self : Union[str, Any] , lowercase_ : Any , lowercase_ : Optional[Any] ):
snake_case_ = self.get_sd_vae_model(fpaa=lowercase_ )
snake_case_ = self.get_sd_image(lowercase_ , shape=(3, 4, 64, 64) , fpaa=lowercase_ )
with torch.no_grad():
snake_case_ = model.decode(lowercase_ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
snake_case_ = sample[-1, -2:, :2, -2:].flatten().float().cpu()
snake_case_ = torch.tensor(lowercase_ )
assert torch_all_close(lowercase_ , lowercase_ , atol=5e-3 )
@parameterized.expand([(13,), (16,), (27,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason='''xformers is not required when using PyTorch 2.0.''' )
def A_ ( self : Optional[Any] , lowercase_ : List[str] ):
snake_case_ = self.get_sd_vae_model(fpaa=lowercase_ )
snake_case_ = self.get_sd_image(lowercase_ , shape=(3, 4, 64, 64) , fpaa=lowercase_ )
with torch.no_grad():
snake_case_ = model.decode(lowercase_ ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
snake_case_ = model.decode(lowercase_ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(lowercase_ , lowercase_ , atol=1e-1 )
@parameterized.expand([(13,), (16,), (37,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason='''xformers is not required when using PyTorch 2.0.''' )
def A_ ( self : Optional[Any] , lowercase_ : Any ):
snake_case_ = self.get_sd_vae_model()
snake_case_ = self.get_sd_image(lowercase_ , shape=(3, 4, 64, 64) )
with torch.no_grad():
snake_case_ = model.decode(lowercase_ ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
snake_case_ = model.decode(lowercase_ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(lowercase_ , lowercase_ , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.3001, 0.0918, -2.6984, -3.9720, -3.2099, -5.0353, 1.7338, -0.2065, 3.4267]],
[47, [-1.5030, -4.3871, -6.0355, -9.1157, -1.6661, -2.7853, 2.1607, -5.0823, 2.5633]],
# fmt: on
] )
def A_ ( self : str , lowercase_ : Optional[int] , lowercase_ : Tuple ):
snake_case_ = self.get_sd_vae_model()
snake_case_ = self.get_sd_image(lowercase_ )
snake_case_ = self.get_generator(lowercase_ )
with torch.no_grad():
snake_case_ = model.encode(lowercase_ ).latent_dist
snake_case_ = dist.sample(generator=lowercase_ )
assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]]
snake_case_ = sample[0, -1, -3:, -3:].flatten().cpu()
snake_case_ = torch.tensor(lowercase_ )
snake_case_ = 3e-3 if torch_device != '''mps''' else 1e-2
assert torch_all_close(lowercase_ , lowercase_ , atol=lowercase_ )
| 56
| 0
|
from math import sqrt
def lowercase__ ( __snake_case : int = 1_000_000 ):
'''simple docstring'''
UpperCAmelCase_ : int = 0
UpperCAmelCase_ : int = 0
UpperCAmelCase_ : int
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(__snake_case , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(F'{solution() = }')
| 145
|
def lowercase__ ( __snake_case : str , __snake_case : int , __snake_case : List[str] ):
'''simple docstring'''
if n == 0:
return 1
elif n % 2 == 1:
return (binary_exponentiation(__snake_case , n - 1 , __snake_case ) * a) % mod
else:
UpperCAmelCase_ : Optional[int] = binary_exponentiation(__snake_case , n / 2 , __snake_case )
return (b * b) % mod
# a prime number
__UpperCAmelCase = 701
__UpperCAmelCase = 1000000000
__UpperCAmelCase = 10
# using binary exponentiation function, O(log(p)):
print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p)
print((a / b) % p == (a * b ** (p - 2)) % p)
| 145
| 1
|
from __future__ import annotations
__snake_case = list[tuple[int, int]]
__snake_case = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
__snake_case = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right
class __snake_case :
def __init__( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : str =pos_x
UpperCAmelCase : Any =pos_y
UpperCAmelCase : str =(pos_y, pos_x)
UpperCAmelCase : Tuple =goal_x
UpperCAmelCase : Any =goal_y
UpperCAmelCase : str =g_cost
UpperCAmelCase : str =parent
UpperCAmelCase : Dict =self.calculate_heuristic()
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase : Any =abs(self.pos_x - self.goal_x )
UpperCAmelCase : Optional[Any] =abs(self.pos_y - self.goal_y )
return dx + dy
def __lt__( self , snake_case__ ) -> Optional[int]:
'''simple docstring'''
return self.f_cost < other.f_cost
class __snake_case :
def __init__( self , snake_case__ , snake_case__ ) -> List[str]:
'''simple docstring'''
UpperCAmelCase : List[str] =Node(start[1] , start[0] , goal[1] , goal[0] , 0 , _A )
UpperCAmelCase : List[str] =Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_9999 , _A )
UpperCAmelCase : Optional[int] =[self.start]
UpperCAmelCase : list[Node] =[]
UpperCAmelCase : Dict =False
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
UpperCAmelCase : List[str] =self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
UpperCAmelCase : Optional[int] =True
return self.retrace_path(_A )
self.closed_nodes.append(_A )
UpperCAmelCase : List[str] =self.get_successors(_A )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(_A )
else:
# retrieve the best current path
UpperCAmelCase : Tuple =self.open_nodes.pop(self.open_nodes.index(_A ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(_A )
else:
self.open_nodes.append(_A )
if not self.reached:
return [self.start.pos]
return None
def UpperCAmelCase__ ( self , snake_case__ ) -> str:
'''simple docstring'''
UpperCAmelCase : List[str] =[]
for action in delta:
UpperCAmelCase : Dict =parent.pos_x + action[1]
UpperCAmelCase : Dict =parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(_A ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
_A , _A , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , _A , ) )
return successors
def UpperCAmelCase__ ( self , snake_case__ ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] =node
UpperCAmelCase : Optional[int] =[]
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
UpperCAmelCase : Optional[Any] =current_node.parent
path.reverse()
return path
if __name__ == "__main__":
__snake_case = (0, 0)
__snake_case = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
print('''------''')
__snake_case = GreedyBestFirst(init, goal)
__snake_case = greedy_bf.search()
if path:
for pos_x, pos_y in path:
__snake_case = 2
for elem in grid:
print(elem)
| 348
|
from collections.abc import Sequence
from queue import Queue
class a__ :
def __init__( self : int,_A : List[Any],_A : Optional[Any],_A : Optional[int],_A : int=None,_A : List[str]=None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = start
SCREAMING_SNAKE_CASE_ : List[str] = end
SCREAMING_SNAKE_CASE_ : Tuple = val
SCREAMING_SNAKE_CASE_ : List[str] = (start + end) // 2
SCREAMING_SNAKE_CASE_ : Optional[int] = left
SCREAMING_SNAKE_CASE_ : str = right
def __repr__( self : Tuple ):
"""simple docstring"""
return F'SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})'
class a__ :
def __init__( self : Any,_A : Sequence,_A : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = collection
SCREAMING_SNAKE_CASE_ : Optional[int] = function
if self.collection:
SCREAMING_SNAKE_CASE_ : List[str] = self._build_tree(0,len(_A ) - 1 )
def __UpperCamelCase ( self : int,_A : Any,_A : List[Any] ):
"""simple docstring"""
self._update_tree(self.root,_A,_A )
def __UpperCamelCase ( self : str,_A : Any,_A : List[Any] ):
"""simple docstring"""
return self._query_range(self.root,_A,_A )
def __UpperCamelCase ( self : Any,_A : Optional[int],_A : int ):
"""simple docstring"""
if start == end:
return SegmentTreeNode(_A,_A,self.collection[start] )
SCREAMING_SNAKE_CASE_ : List[Any] = (start + end) // 2
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self._build_tree(_A,_A )
SCREAMING_SNAKE_CASE_ : Optional[int] = self._build_tree(mid + 1,_A )
return SegmentTreeNode(_A,_A,self.fn(left.val,right.val ),_A,_A )
def __UpperCamelCase ( self : int,_A : int,_A : Tuple,_A : Dict ):
"""simple docstring"""
if node.start == i and node.end == i:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = val
return
if i <= node.mid:
self._update_tree(node.left,_A,_A )
else:
self._update_tree(node.right,_A,_A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.fn(node.left.val,node.right.val )
def __UpperCamelCase ( self : str,_A : List[str],_A : Optional[int],_A : Optional[Any] ):
"""simple docstring"""
if node.start == i and node.end == j:
return node.val
if i <= node.mid:
if j <= node.mid:
# range in left child tree
return self._query_range(node.left,_A,_A )
else:
# range in left child tree and right child tree
return self.fn(
self._query_range(node.left,_A,node.mid ),self._query_range(node.right,node.mid + 1,_A ),)
else:
# range in right child tree
return self._query_range(node.right,_A,_A )
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
if self.root is not None:
SCREAMING_SNAKE_CASE_ : int = Queue()
queue.put(self.root )
while not queue.empty():
SCREAMING_SNAKE_CASE_ : Tuple = queue.get()
yield node
if node.left is not None:
queue.put(node.left )
if node.right is not None:
queue.put(node.right )
if __name__ == "__main__":
import operator
for fn in [operator.add, max, min]:
print('''*''' * 50)
__lowerCamelCase : int = SegmentTree([2, 1, 5, 3, 4], fn)
for node in arr.traverse():
print(node)
print()
arr.update(1, 5)
for node in arr.traverse():
print(node)
print()
print(arr.query_range(3, 4)) # 7
print(arr.query_range(2, 2)) # 5
print(arr.query_range(1, 3)) # 13
print()
| 18
| 0
|
import gc
import inspect
import unittest
import torch
from parameterized import parameterized
from diffusers import PriorTransformer
from diffusers.utils import floats_tensor, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin
enable_full_determinism()
class _lowerCAmelCase ( __snake_case , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ = PriorTransformer
lowerCAmelCase_ = "hidden_states"
@property
def lowercase (self ) -> Any:
_snake_case = 4
_snake_case = 8
_snake_case = 7
_snake_case = floats_tensor((batch_size, embedding_dim) ).to(UpperCAmelCase )
_snake_case = floats_tensor((batch_size, embedding_dim) ).to(UpperCAmelCase )
_snake_case = floats_tensor((batch_size, num_embeddings, embedding_dim) ).to(UpperCAmelCase )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def lowercase (self , UpperCAmelCase=0 ) -> Union[str, Any]:
torch.manual_seed(UpperCAmelCase )
_snake_case = 4
_snake_case = 8
_snake_case = 7
_snake_case = torch.randn((batch_size, embedding_dim) ).to(UpperCAmelCase )
_snake_case = torch.randn((batch_size, embedding_dim) ).to(UpperCAmelCase )
_snake_case = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(UpperCAmelCase )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
@property
def lowercase (self ) -> Dict:
return (4, 8)
@property
def lowercase (self ) -> Any:
return (4, 8)
def lowercase (self ) -> Optional[int]:
_snake_case = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 4,
"""num_layers""": 2,
"""embedding_dim""": 8,
"""num_embeddings""": 7,
"""additional_embeddings""": 4,
}
_snake_case = self.dummy_input
return init_dict, inputs_dict
def lowercase (self ) -> Dict:
_snake_case, _snake_case = PriorTransformer.from_pretrained(
"""hf-internal-testing/prior-dummy""" , output_loading_info=UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 )
model.to(UpperCAmelCase )
_snake_case = model(**self.dummy_input )[0]
assert hidden_states is not None, "Make sure output is not None"
def lowercase (self ) -> Any:
_snake_case, _snake_case = self.prepare_init_args_and_inputs_for_common()
_snake_case = self.model_class(**UpperCAmelCase )
_snake_case = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_snake_case = [*signature.parameters.keys()]
_snake_case = ["""hidden_states""", """timestep"""]
self.assertListEqual(arg_names[:2] , UpperCAmelCase )
def lowercase (self ) -> Optional[Any]:
_snake_case = PriorTransformer.from_pretrained("""hf-internal-testing/prior-dummy""" )
_snake_case = model.to(UpperCAmelCase )
if hasattr(UpperCAmelCase , """set_default_attn_processor""" ):
model.set_default_attn_processor()
_snake_case = self.get_dummy_seed_input()
with torch.no_grad():
_snake_case = model(**UpperCAmelCase )[0]
_snake_case = output[0, :5].flatten().cpu()
print(UpperCAmelCase )
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
_snake_case = torch.tensor([-1.3436, -0.2870, 0.7538, 0.4368, -0.0239] )
self.assertTrue(torch_all_close(UpperCAmelCase , UpperCAmelCase , rtol=1e-2 ) )
@slow
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowercase (self , UpperCAmelCase=1 , UpperCAmelCase=768 , UpperCAmelCase=77 , UpperCAmelCase=0 ) -> Optional[int]:
torch.manual_seed(UpperCAmelCase )
_snake_case = batch_size
_snake_case = embedding_dim
_snake_case = num_embeddings
_snake_case = torch.randn((batch_size, embedding_dim) ).to(UpperCAmelCase )
_snake_case = torch.randn((batch_size, embedding_dim) ).to(UpperCAmelCase )
_snake_case = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(UpperCAmelCase )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def lowercase (self ) -> List[str]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@parameterized.expand(
[
# fmt: off
[13, [-0.5861, 0.1283, -0.0931, 0.0882, 0.4476, 0.1329, -0.0498, 0.0640]],
[37, [-0.4913, 0.0110, -0.0483, 0.0541, 0.4954, -0.0170, 0.0354, 0.1651]],
# fmt: on
] )
def lowercase (self , UpperCAmelCase , UpperCAmelCase ) -> List[Any]:
_snake_case = PriorTransformer.from_pretrained("""kandinsky-community/kandinsky-2-1-prior""" , subfolder="""prior""" )
model.to(UpperCAmelCase )
_snake_case = self.get_dummy_seed_input(seed=UpperCAmelCase )
with torch.no_grad():
_snake_case = model(**UpperCAmelCase )[0]
assert list(sample.shape ) == [1, 768]
_snake_case = sample[0, :8].flatten().cpu()
print(UpperCAmelCase )
_snake_case = torch.tensor(UpperCAmelCase )
assert torch_all_close(UpperCAmelCase , UpperCAmelCase , atol=1e-3 )
| 359
|
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if not (isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )):
raise ValueError("""longest_common_substring() takes two strings for inputs""" )
_snake_case = len(_SCREAMING_SNAKE_CASE )
_snake_case = len(_SCREAMING_SNAKE_CASE )
_snake_case = [[0] * (texta_length + 1) for _ in range(texta_length + 1 )]
_snake_case = 0
_snake_case = 0
for i in range(1 , texta_length + 1 ):
for j in range(1 , texta_length + 1 ):
if texta[i - 1] == texta[j - 1]:
_snake_case = 1 + dp[i - 1][j - 1]
if dp[i][j] > ans_length:
_snake_case = i
_snake_case = dp[i][j]
return texta[ans_index - ans_length : ans_index]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 270
| 0
|
from diffusers.utils.testing_utils import require_onnxruntime
@require_onnxruntime
class UpperCamelCase__ :
'''simple docstring'''
pass
| 48
|
'''simple docstring'''
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = """▁"""
UpperCamelCase_ = {
"""vocab_file""": """vocab.json""",
"""spm_file""": """sentencepiece.bpe.model""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
UpperCamelCase_ = {
"""vocab_file""": {
"""facebook/m2m100_418M""": """https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json""",
"""facebook/m2m100_1.2B""": """https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json""",
},
"""spm_file""": {
"""facebook/m2m100_418M""": """https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model""",
"""facebook/m2m100_1.2B""": """https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model""",
},
"""tokenizer_config_file""": {
"""facebook/m2m100_418M""": """https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json""",
"""facebook/m2m100_1.2B""": """https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json""",
},
}
UpperCamelCase_ = {
"""facebook/m2m100_418M""": 10_24,
}
# fmt: off
UpperCamelCase_ = {
"""m2m100""": ["""af""", """am""", """ar""", """ast""", """az""", """ba""", """be""", """bg""", """bn""", """br""", """bs""", """ca""", """ceb""", """cs""", """cy""", """da""", """de""", """el""", """en""", """es""", """et""", """fa""", """ff""", """fi""", """fr""", """fy""", """ga""", """gd""", """gl""", """gu""", """ha""", """he""", """hi""", """hr""", """ht""", """hu""", """hy""", """id""", """ig""", """ilo""", """is""", """it""", """ja""", """jv""", """ka""", """kk""", """km""", """kn""", """ko""", """lb""", """lg""", """ln""", """lo""", """lt""", """lv""", """mg""", """mk""", """ml""", """mn""", """mr""", """ms""", """my""", """ne""", """nl""", """no""", """ns""", """oc""", """or""", """pa""", """pl""", """ps""", """pt""", """ro""", """ru""", """sd""", """si""", """sk""", """sl""", """so""", """sq""", """sr""", """ss""", """su""", """sv""", """sw""", """ta""", """th""", """tl""", """tn""", """tr""", """uk""", """ur""", """uz""", """vi""", """wo""", """xh""", """yi""", """yo""", """zh""", """zu"""],
"""wmt21""": ["""en""", """ha""", """is""", """ja""", """cs""", """ru""", """zh""", """de"""]
}
class a_ (_a ):
__lowerCAmelCase : Optional[Any] = VOCAB_FILES_NAMES
__lowerCAmelCase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase : Dict = ["""input_ids""", """attention_mask"""]
__lowerCAmelCase : List[int] = []
__lowerCAmelCase : List[int] = []
def __init__( self , snake_case_ , snake_case_ , snake_case_=None , snake_case_=None , snake_case_="<s>" , snake_case_="</s>" , snake_case_="</s>" , snake_case_="<pad>" , snake_case_="<unk>" , snake_case_="m2m100" , snake_case_ = None , snake_case_=8 , **snake_case_ , ):
_lowerCAmelCase : Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
_lowerCAmelCase : Optional[Any] = language_codes
_lowerCAmelCase : Tuple = FAIRSEQ_LANGUAGE_CODES[language_codes]
_lowerCAmelCase : str = {lang_code: f'__{lang_code}__' for lang_code in fairseq_language_code}
_lowerCAmelCase : int = kwargs.get("""additional_special_tokens""" , [] )
kwargs["additional_special_tokens"] += [
self.get_lang_token(snake_case_ )
for lang_code in fairseq_language_code
if self.get_lang_token(snake_case_ ) not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=snake_case_ , tgt_lang=snake_case_ , bos_token=snake_case_ , eos_token=snake_case_ , sep_token=snake_case_ , unk_token=snake_case_ , pad_token=snake_case_ , language_codes=snake_case_ , sp_model_kwargs=self.sp_model_kwargs , num_madeup_words=snake_case_ , **snake_case_ , )
_lowerCAmelCase : Optional[int] = vocab_file
_lowerCAmelCase : Any = load_json(snake_case_ )
_lowerCAmelCase : str = {v: k for k, v in self.encoder.items()}
_lowerCAmelCase : Union[str, Any] = spm_file
_lowerCAmelCase : Tuple = load_spm(snake_case_ , self.sp_model_kwargs )
_lowerCAmelCase : int = len(self.encoder )
_lowerCAmelCase : Union[str, Any] = {
self.get_lang_token(snake_case_ ): self.encoder_size + i for i, lang_code in enumerate(snake_case_ )
}
_lowerCAmelCase : List[str] = {lang_code: self.encoder_size + i for i, lang_code in enumerate(snake_case_ )}
_lowerCAmelCase : Optional[Any] = {v: k for k, v in self.lang_token_to_id.items()}
_lowerCAmelCase : Any = src_lang if src_lang is not None else """en"""
_lowerCAmelCase : Optional[int] = tgt_lang
_lowerCAmelCase : Tuple = self.get_lang_id(self._src_lang )
self.set_src_lang_special_tokens(self._src_lang )
_lowerCAmelCase : List[Any] = num_madeup_words
@property
def __UpperCamelCase ( self ):
return len(self.encoder ) + len(self.lang_token_to_id )
@property
def __UpperCamelCase ( self ):
return self._src_lang
@src_lang.setter
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : Any = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __UpperCamelCase ( self , snake_case_ ):
return self.sp_model.encode(snake_case_ , out_type=snake_case_ )
def __UpperCamelCase ( self , snake_case_ ):
if token in self.lang_token_to_id:
return self.lang_token_to_id[token]
return self.encoder.get(snake_case_ , self.encoder[self.unk_token] )
def __UpperCamelCase ( self , snake_case_ ):
if index in self.id_to_lang_token:
return self.id_to_lang_token[index]
return self.decoder.get(snake_case_ , self.unk_token )
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : Any = []
_lowerCAmelCase : Optional[int] = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(snake_case_ ) + token
_lowerCAmelCase : Optional[Any] = []
else:
current_sub_tokens.append(snake_case_ )
out_string += self.sp_model.decode(snake_case_ )
return out_string.strip()
def __UpperCamelCase ( self , snake_case_ , snake_case_ = None , snake_case_ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case_ , token_ids_a=snake_case_ , already_has_special_tokens=snake_case_ )
_lowerCAmelCase : List[Any] = [1] * len(self.prefix_tokens )
_lowerCAmelCase : Dict = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(snake_case_ )) + suffix_ones
return prefix_ones + ([0] * len(snake_case_ )) + ([0] * len(snake_case_ )) + suffix_ones
def __UpperCamelCase ( self , snake_case_ , snake_case_ = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __UpperCamelCase ( self ):
_lowerCAmelCase : Union[str, Any] = {self.convert_ids_to_tokens(snake_case_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
_lowerCAmelCase : int = self.__dict__.copy()
_lowerCAmelCase : str = None
return state
def __setstate__( self , snake_case_ ):
_lowerCAmelCase : List[str] = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
_lowerCAmelCase : str = {}
_lowerCAmelCase : str = load_spm(self.spm_file , self.sp_model_kwargs )
def __UpperCamelCase ( self , snake_case_ , snake_case_ = None ):
_lowerCAmelCase : Dict = Path(snake_case_ )
if not save_dir.is_dir():
raise OSError(f'{save_directory} should be a directory' )
_lowerCAmelCase : Any = save_dir / (
(filename_prefix + """-""" if filename_prefix else """""") + self.vocab_files_names["""vocab_file"""]
)
_lowerCAmelCase : Any = save_dir / (
(filename_prefix + """-""" if filename_prefix else """""") + self.vocab_files_names["""spm_file"""]
)
save_json(self.encoder , snake_case_ )
if os.path.abspath(self.spm_file ) != os.path.abspath(snake_case_ ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , snake_case_ )
elif not os.path.isfile(self.spm_file ):
with open(snake_case_ , """wb""" ) as fi:
_lowerCAmelCase : List[str] = self.sp_model.serialized_model_proto()
fi.write(snake_case_ )
return (str(snake_case_ ), str(snake_case_ ))
def __UpperCamelCase ( self , snake_case_ , snake_case_ = "en" , snake_case_ = None , snake_case_ = "ro" , **snake_case_ , ):
_lowerCAmelCase : Union[str, Any] = src_lang
_lowerCAmelCase : Optional[Any] = tgt_lang
self.set_src_lang_special_tokens(self.src_lang )
return super().prepare_seqaseq_batch(snake_case_ , snake_case_ , **snake_case_ )
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ , **snake_case_ ):
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
_lowerCAmelCase : Dict = src_lang
_lowerCAmelCase : str = self(snake_case_ , add_special_tokens=snake_case_ , **snake_case_ )
_lowerCAmelCase : Union[str, Any] = self.get_lang_id(snake_case_ )
_lowerCAmelCase : Tuple = tgt_lang_id
return inputs
def __UpperCamelCase ( self ):
self.set_src_lang_special_tokens(self.src_lang )
def __UpperCamelCase ( self ):
self.set_tgt_lang_special_tokens(self.tgt_lang )
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : Optional[Any] = self.get_lang_token(snake_case_ )
_lowerCAmelCase : List[Any] = self.lang_token_to_id[lang_token]
_lowerCAmelCase : Any = [self.cur_lang_id]
_lowerCAmelCase : Any = [self.eos_token_id]
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : Any = self.get_lang_token(snake_case_ )
_lowerCAmelCase : int = self.lang_token_to_id[lang_token]
_lowerCAmelCase : str = [self.cur_lang_id]
_lowerCAmelCase : str = [self.eos_token_id]
def __UpperCamelCase ( self , snake_case_ ):
return self.lang_code_to_token[lang]
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : List[str] = self.get_lang_token(snake_case_ )
return self.lang_token_to_id[lang_token]
def _UpperCAmelCase ( _lowerCamelCase : str , _lowerCamelCase : Dict[str, Any] ) -> sentencepiece.SentencePieceProcessor:
_lowerCAmelCase : Optional[Any] = sentencepiece.SentencePieceProcessor(**_lowerCamelCase )
spm.Load(str(_lowerCamelCase ) )
return spm
def _UpperCAmelCase ( _lowerCamelCase : str ) -> Union[Dict, List]:
with open(_lowerCamelCase , """r""" ) as f:
return json.load(_lowerCamelCase )
def _UpperCAmelCase ( _lowerCamelCase : Tuple , _lowerCamelCase : str ) -> None:
with open(_lowerCamelCase , """w""" ) as f:
json.dump(_lowerCamelCase , _lowerCamelCase , indent=2 )
| 309
| 0
|
import json
import os
from typing import Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
}
UpperCAmelCase_ = {
"""vocab_file""": {"""ctrl""": """https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json"""},
"""merges_file""": {"""ctrl""": """https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt"""},
}
UpperCAmelCase_ = {
"""ctrl""": 256,
}
UpperCAmelCase_ = {
"""Pregnancy""": 168_629,
"""Christianity""": 7_675,
"""Explain""": 106_423,
"""Fitness""": 63_440,
"""Saving""": 63_163,
"""Ask""": 27_171,
"""Ass""": 95_985,
"""Joke""": 163_509,
"""Questions""": 45_622,
"""Thoughts""": 49_605,
"""Retail""": 52_342,
"""Feminism""": 164_338,
"""Writing""": 11_992,
"""Atheism""": 192_263,
"""Netflix""": 48_616,
"""Computing""": 39_639,
"""Opinion""": 43_213,
"""Alone""": 44_967,
"""Funny""": 58_917,
"""Gaming""": 40_358,
"""Human""": 4_088,
"""India""": 1_331,
"""Joker""": 77_138,
"""Diet""": 36_206,
"""Legal""": 11_859,
"""Norman""": 4_939,
"""Tip""": 72_689,
"""Weight""": 52_343,
"""Movies""": 46_273,
"""Running""": 23_425,
"""Science""": 2_090,
"""Horror""": 37_793,
"""Confession""": 60_572,
"""Finance""": 12_250,
"""Politics""": 16_360,
"""Scary""": 191_985,
"""Support""": 12_654,
"""Technologies""": 32_516,
"""Teenage""": 66_160,
"""Event""": 32_769,
"""Learned""": 67_460,
"""Notion""": 182_770,
"""Wikipedia""": 37_583,
"""Books""": 6_665,
"""Extract""": 76_050,
"""Confessions""": 102_701,
"""Conspiracy""": 75_932,
"""Links""": 63_674,
"""Narcissus""": 150_425,
"""Relationship""": 54_766,
"""Relationships""": 134_796,
"""Reviews""": 41_671,
"""News""": 4_256,
"""Translation""": 26_820,
"""multilingual""": 128_406,
}
def lowerCamelCase__ ( A__ : str ):
'''simple docstring'''
__lowerCamelCase = set()
__lowerCamelCase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__lowerCamelCase = char
__lowerCamelCase = set(_UpperCAmelCase )
return pairs
class lowerCamelCase__( __UpperCamelCase):
UpperCAmelCase__ : Optional[int] = VOCAB_FILES_NAMES
UpperCAmelCase__ : List[str] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ : List[str] = CONTROL_CODES
def __init__( self: List[Any] , UpperCamelCase_: List[str] , UpperCamelCase_: List[str] , UpperCamelCase_: List[str]="<unk>" , **UpperCamelCase_: List[Any] ):
super().__init__(unk_token=UpperCamelCase_ , **UpperCamelCase_ )
with open(UpperCamelCase_ , encoding="""utf-8""" ) as vocab_handle:
__lowerCamelCase = json.load(UpperCamelCase_ )
__lowerCamelCase = {v: k for k, v in self.encoder.items()}
with open(UpperCamelCase_ , encoding="""utf-8""" ) as merges_handle:
__lowerCamelCase = merges_handle.read().split("""\n""" )[1:-1]
__lowerCamelCase = [tuple(merge.split() ) for merge in merges]
__lowerCamelCase = dict(zip(UpperCamelCase_ , range(len(UpperCamelCase_ ) ) ) )
__lowerCamelCase = {}
@property
def lowerCAmelCase__ ( self: int ):
return len(self.encoder )
def lowerCAmelCase__ ( self: List[str] ):
return dict(self.encoder , **self.added_tokens_encoder )
def lowerCAmelCase__ ( self: Any , UpperCamelCase_: Any ):
if token in self.cache:
return self.cache[token]
__lowerCamelCase = tuple(UpperCamelCase_ )
__lowerCamelCase = tuple(list(word[:-1] ) + [word[-1] + """</w>"""] )
__lowerCamelCase = get_pairs(UpperCamelCase_ )
if not pairs:
return token
while True:
__lowerCamelCase = min(UpperCamelCase_ , key=lambda UpperCamelCase_ : self.bpe_ranks.get(UpperCamelCase_ , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
__lowerCamelCase = bigram
__lowerCamelCase = []
__lowerCamelCase = 0
while i < len(UpperCamelCase_ ):
try:
__lowerCamelCase = word.index(UpperCamelCase_ , UpperCamelCase_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__lowerCamelCase = j
if word[i] == first and i < len(UpperCamelCase_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__lowerCamelCase = tuple(UpperCamelCase_ )
__lowerCamelCase = new_word
if len(UpperCamelCase_ ) == 1:
break
else:
__lowerCamelCase = get_pairs(UpperCamelCase_ )
__lowerCamelCase = '@@ '.join(UpperCamelCase_ )
__lowerCamelCase = word[:-4]
__lowerCamelCase = word
return word
def lowerCAmelCase__ ( self: Dict , UpperCamelCase_: Optional[int] ):
__lowerCamelCase = []
__lowerCamelCase = re.findall(r"""\S+\n?""" , UpperCamelCase_ )
for token in words:
split_tokens.extend(list(self.bpe(UpperCamelCase_ ).split(""" """ ) ) )
return split_tokens
def lowerCAmelCase__ ( self: str , UpperCamelCase_: Union[str, Any] ):
return self.encoder.get(UpperCamelCase_ , self.encoder.get(self.unk_token ) )
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: Union[str, Any] ):
return self.decoder.get(UpperCamelCase_ , self.unk_token )
def lowerCAmelCase__ ( self: str , UpperCamelCase_: Tuple ):
__lowerCamelCase = ' '.join(UpperCamelCase_ ).replace("""@@ """ , """""" ).strip()
return out_string
def lowerCAmelCase__ ( self: Any , UpperCamelCase_: str , UpperCamelCase_: Optional[str] = None ):
if not os.path.isdir(UpperCamelCase_ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
__lowerCamelCase = os.path.join(
UpperCamelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
__lowerCamelCase = os.path.join(
UpperCamelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(UpperCamelCase_ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCamelCase_ , ensure_ascii=UpperCamelCase_ ) + """\n""" )
__lowerCamelCase = 0
with open(UpperCamelCase_ , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCamelCase_ : kv[1] ):
if index != token_index:
logger.warning(
F'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
""" Please check that the tokenizer is not corrupted!""" )
__lowerCamelCase = token_index
writer.write(""" """.join(UpperCamelCase_ ) + """\n""" )
index += 1
return vocab_file, merge_file
# def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True):
# filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens))
# tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens)
# tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far)
# return ''.join(tokens_generated_so_far)
| 350
|
def lowerCamelCase__ ( A__ : int ):
'''simple docstring'''
__lowerCamelCase = [[0 for _ in range(A__ )] for _ in range(m + 1 )]
for i in range(m + 1 ):
__lowerCamelCase = 1
for n in range(m + 1 ):
for k in range(1 , A__ ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
UpperCAmelCase_ = int(input('Enter a number: ').strip())
print(partition(n))
except ValueError:
print('Please enter a number.')
else:
try:
UpperCAmelCase_ = int(sys.argv[1])
print(partition(n))
except ValueError:
print('Please pass a number.')
| 29
| 0
|
'''simple docstring'''
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowercase : List[Any] = logging.get_logger(__name__)
lowercase : Any = {'vocab_file': 'spiece.model'}
lowercase : int = {
'vocab_file': {
'AI-Sweden/gpt-sw3-126m': 'https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-350m': 'https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-1.6b': 'https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-6.7b': 'https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-20b': 'https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model',
}
}
lowercase : List[str] = {
'AI-Sweden/gpt-sw3-126m': 20_48,
'AI-Sweden/gpt-sw3-350m': 20_48,
'AI-Sweden/gpt-sw3-1.6b': 20_48,
'AI-Sweden/gpt-sw3-6.7b': 20_48,
'AI-Sweden/gpt-sw3-20b': 20_48,
}
class A ( __snake_case ):
__magic_name__ = VOCAB_FILES_NAMES
__magic_name__ = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ = ['''input_ids''', '''attention_mask''']
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE , ) -> None:
"""simple docstring"""
A : List[str] = {} if sp_model_kwargs is None else sp_model_kwargs
A : Optional[int] = kwargs.get('''name_or_path''' )
if name_or_path is None:
logger.warning(
'''name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,'''
''' you are testing the model, this can safely be ignored''' )
A : List[Any] = '''None'''
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
A : int = '''<|endoftext|>''' if eos_token is None else eos_token
A : Dict = '''<unk>''' if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
A : List[Any] = unk_token if pad_token is None else pad_token
A : List[str] = eos_token if bos_token is None else bos_token
else:
A : Optional[Any] = '''<pad>''' if pad_token is None else pad_token
A : Optional[int] = '''<s>''' if bos_token is None else bos_token
super().__init__(
do_lower_case=SCREAMING_SNAKE_CASE , remove_space=SCREAMING_SNAKE_CASE , keep_accents=SCREAMING_SNAKE_CASE , bos_token=SCREAMING_SNAKE_CASE , eos_token=SCREAMING_SNAKE_CASE , unk_token=SCREAMING_SNAKE_CASE , pad_token=SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **SCREAMING_SNAKE_CASE , )
A : List[str] = do_lower_case
A : List[str] = remove_space
A : Union[str, Any] = keep_accents
A : Union[str, Any] = vocab_file
A : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(SCREAMING_SNAKE_CASE )
# Used for whitespace normalization in input texts
# fmt : off
A : Any = {''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', '''''', ''''''}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
A : List[str] = re.compile(
F'[{"".join(map(SCREAMING_SNAKE_CASE , list(range(0 , 9 ) ) + list(range(11 , 32 ) ) + list(range(127 , 160 ) ) + [160, 173, 8203] ) )}]' )
def __getstate__( self ) -> Dict:
"""simple docstring"""
A : Tuple = self.__dict__.copy()
A : List[Any] = None
return state
def __setstate__( self , SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
A : Tuple = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
A : Any = {}
A : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def __lowerCAmelCase ( self ) -> int:
"""simple docstring"""
return len(self.sp_model )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
A : List[str] = self.non_printing_characters_re.sub('''''' , SCREAMING_SNAKE_CASE )
# Normalize whitespaces
A : Union[str, Any] = ''''''.join([char if char not in self.whitespaces else ''' ''' for char in text] )
# NFC Unicode normalization
A : Optional[int] = unicodedata.normalize('''NFC''' , SCREAMING_SNAKE_CASE )
return text
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
A : Optional[int] = self.preprocess_text(SCREAMING_SNAKE_CASE )
return self.sp_model.encode(SCREAMING_SNAKE_CASE , out_type=SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
return self.sp_model.PieceToId(SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
return self.sp_model.IdToPiece(SCREAMING_SNAKE_CASE )
@staticmethod
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
return out_string
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
A : Optional[int] = []
A : Optional[Any] = ''''''
A : List[str] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE ) + token
A : Dict = True
A : int = []
else:
current_sub_tokens.append(SCREAMING_SNAKE_CASE )
A : List[Any] = False
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE )
return out_string
def __lowerCAmelCase ( self ) -> Dict[str, int]:
"""simple docstring"""
A : Tuple = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(SCREAMING_SNAKE_CASE ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
A : Tuple = os.path.join(
SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.vocab_file ):
with open(SCREAMING_SNAKE_CASE , '''wb''' ) as fi:
A : List[str] = self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = False ) -> Union[List[int], List[List[int]], "torch.Tensor"]:
"""simple docstring"""
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
A : List[str] = self.preprocess_text(SCREAMING_SNAKE_CASE )
A : Any = self.sp_model.encode(SCREAMING_SNAKE_CASE )
else:
A : Any = [self.preprocess_text(SCREAMING_SNAKE_CASE ) for t in text]
A : Optional[int] = self.sp_model.encode(SCREAMING_SNAKE_CASE )
if return_tensors is True or return_tensors == "pt":
A : Union[str, Any] = torch.tensor(SCREAMING_SNAKE_CASE )
return token_ids
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
return self.sp_model.decode(SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> List[int]:
"""simple docstring"""
A : int = [F'User: {text}' if is_user else F'Bot: {text}' for is_user, text in conversation.iter_texts()]
A : Tuple = (
F'{self.eos_token}{self.bos_token}' + F'{self.bos_token}'.join(SCREAMING_SNAKE_CASE ) + F'{self.bos_token}Bot:'
)
return self.encode(text=SCREAMING_SNAKE_CASE )
| 3
|
"""simple docstring"""
import argparse
import torch
from transformers import FunnelBaseModel, FunnelConfig, FunnelModel, load_tf_weights_in_funnel
from transformers.utils import logging
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]:
# Initialise PyTorch model
lowercase__: Optional[Any] = FunnelConfig.from_json_file(__UpperCAmelCase )
print(F"""Building PyTorch model from configuration: {config}""" )
lowercase__: List[Any] = FunnelBaseModel(__UpperCAmelCase ) if base_model else FunnelModel(__UpperCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_funnel(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , __UpperCAmelCase )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--base_model", action="store_true", help="Whether you want just the base model (no decoder) or not."
)
__A = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.base_model
)
| 177
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase = {'''configuration_xglm''': ['''XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XGLMConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = ['''XGLMTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = ['''XGLMTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = [
'''XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XGLMForCausalLM''',
'''XGLMModel''',
'''XGLMPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = [
'''FlaxXGLMForCausalLM''',
'''FlaxXGLMModel''',
'''FlaxXGLMPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = [
'''TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXGLMForCausalLM''',
'''TFXGLMModel''',
'''TFXGLMPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
lowerCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 355
|
from math import isqrt
def lowerCamelCase_ ( _a ):
"""simple docstring"""
lowerCAmelCase__ : Dict = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , _a , _a ):
lowerCAmelCase__ : int = False
return [i for i in range(2 , _a ) if is_prime[i]]
def lowerCamelCase_ ( _a = 10**8 ):
"""simple docstring"""
lowerCAmelCase__ : Any = calculate_prime_numbers(max_number // 2 )
lowerCAmelCase__ : Tuple = 0
lowerCAmelCase__ : List[Any] = 0
lowerCAmelCase__ : Optional[int] = len(_a ) - 1
while left <= right:
while prime_numbers[left] * prime_numbers[right] >= max_number:
right -= 1
semiprimes_count += right - left + 1
left += 1
return semiprimes_count
if __name__ == "__main__":
print(f'''{solution() = }''')
| 211
| 0
|
'''simple docstring'''
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
__a = datasets.utils.logging.get_logger(__name__)
__a = ['names', 'prefix']
__a = ['warn_bad_lines', 'error_bad_lines', 'mangle_dupe_cols']
__a = ['encoding_errors', 'on_bad_lines']
__a = ['date_format']
@dataclass
class A__ ( datasets.BuilderConfig ):
"""simple docstring"""
UpperCamelCase_ : str = ","
UpperCamelCase_ : Optional[str] = None
UpperCamelCase_ : Optional[Union[int, List[int], str]] = "infer"
UpperCamelCase_ : Optional[List[str]] = None
UpperCamelCase_ : Optional[List[str]] = None
UpperCamelCase_ : Optional[Union[int, str, List[int], List[str]]] = None
UpperCamelCase_ : Optional[Union[List[int], List[str]]] = None
UpperCamelCase_ : Optional[str] = None
UpperCamelCase_ : bool = True
UpperCamelCase_ : Optional[Literal["c", "python", "pyarrow"]] = None
UpperCamelCase_ : Dict[Union[int, str], Callable[[Any], Any]] = None
UpperCamelCase_ : Optional[list] = None
UpperCamelCase_ : Optional[list] = None
UpperCamelCase_ : bool = False
UpperCamelCase_ : Optional[Union[int, List[int]]] = None
UpperCamelCase_ : Optional[int] = None
UpperCamelCase_ : Optional[Union[str, List[str]]] = None
UpperCamelCase_ : bool = True
UpperCamelCase_ : bool = True
UpperCamelCase_ : bool = False
UpperCamelCase_ : bool = True
UpperCamelCase_ : Optional[str] = None
UpperCamelCase_ : str = "."
UpperCamelCase_ : Optional[str] = None
UpperCamelCase_ : str = '"'
UpperCamelCase_ : int = 0
UpperCamelCase_ : Optional[str] = None
UpperCamelCase_ : Optional[str] = None
UpperCamelCase_ : Optional[str] = None
UpperCamelCase_ : Optional[str] = None
UpperCamelCase_ : bool = True
UpperCamelCase_ : bool = True
UpperCamelCase_ : int = 0
UpperCamelCase_ : bool = True
UpperCamelCase_ : bool = False
UpperCamelCase_ : Optional[str] = None
UpperCamelCase_ : int = 1_00_00
UpperCamelCase_ : Optional[datasets.Features] = None
UpperCamelCase_ : Optional[str] = "strict"
UpperCamelCase_ : Literal["error", "warn", "skip"] = "error"
UpperCamelCase_ : Optional[str] = None
def _lowerCAmelCase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
if self.delimiter is not None:
_UpperCAmelCase : List[Any] = self.delimiter
if self.column_names is not None:
_UpperCAmelCase : Union[str, Any] = self.column_names
@property
def _lowerCAmelCase ( self : Dict ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase : Any = {
"sep": self.sep,
"header": self.header,
"names": self.names,
"index_col": self.index_col,
"usecols": self.usecols,
"prefix": self.prefix,
"mangle_dupe_cols": self.mangle_dupe_cols,
"engine": self.engine,
"converters": self.converters,
"true_values": self.true_values,
"false_values": self.false_values,
"skipinitialspace": self.skipinitialspace,
"skiprows": self.skiprows,
"nrows": self.nrows,
"na_values": self.na_values,
"keep_default_na": self.keep_default_na,
"na_filter": self.na_filter,
"verbose": self.verbose,
"skip_blank_lines": self.skip_blank_lines,
"thousands": self.thousands,
"decimal": self.decimal,
"lineterminator": self.lineterminator,
"quotechar": self.quotechar,
"quoting": self.quoting,
"escapechar": self.escapechar,
"comment": self.comment,
"encoding": self.encoding,
"dialect": self.dialect,
"error_bad_lines": self.error_bad_lines,
"warn_bad_lines": self.warn_bad_lines,
"skipfooter": self.skipfooter,
"doublequote": self.doublequote,
"memory_map": self.memory_map,
"float_precision": self.float_precision,
"chunksize": self.chunksize,
"encoding_errors": self.encoding_errors,
"on_bad_lines": self.on_bad_lines,
"date_format": self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , lowerCAmelCase__ ):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class A__ ( datasets.ArrowBasedBuilder ):
"""simple docstring"""
UpperCamelCase_ : Tuple = CsvConfig
def _lowerCAmelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
return datasets.DatasetInfo(features=self.config.features )
def _lowerCAmelCase ( self : int , lowerCAmelCase__ : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
if not self.config.data_files:
raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
_UpperCAmelCase : Optional[Any] = dl_manager.download_and_extract(self.config.data_files )
if isinstance(lowerCAmelCase__ , (str, list, tuple) ):
_UpperCAmelCase : Tuple = data_files
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCAmelCase : Any = [files]
_UpperCAmelCase : Union[str, Any] = [dl_manager.iter_files(lowerCAmelCase__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )]
_UpperCAmelCase : Tuple = []
for split_name, files in data_files.items():
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCAmelCase : Dict = [files]
_UpperCAmelCase : Any = [dl_manager.iter_files(lowerCAmelCase__ ) for file in files]
splits.append(datasets.SplitGenerator(name=lowerCAmelCase__ , gen_kwargs={"files": files} ) )
return splits
def _lowerCAmelCase ( self : Dict , lowerCAmelCase__ : pa.Table ) -> pa.Table:
"""simple docstring"""
if self.config.features is not None:
_UpperCAmelCase : List[str] = self.config.features.arrow_schema
if all(not require_storage_cast(lowerCAmelCase__ ) for feature in self.config.features.values() ):
# cheaper cast
_UpperCAmelCase : Optional[Any] = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=lowerCAmelCase__ )
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
_UpperCAmelCase : List[Any] = table_cast(lowerCAmelCase__ , lowerCAmelCase__ )
return pa_table
def _lowerCAmelCase ( self : Any , lowerCAmelCase__ : int ) -> str:
"""simple docstring"""
_UpperCAmelCase : Tuple = self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
_UpperCAmelCase : Tuple = (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(lowerCAmelCase__ ) else object
for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() )
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(lowerCAmelCase__ ) ):
_UpperCAmelCase : Tuple = pd.read_csv(lowerCAmelCase__ , iterator=lowerCAmelCase__ , dtype=lowerCAmelCase__ , **self.config.pd_read_csv_kwargs )
try:
for batch_idx, df in enumerate(lowerCAmelCase__ ):
_UpperCAmelCase : Union[str, Any] = pa.Table.from_pandas(lowerCAmelCase__ )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(lowerCAmelCase__ )
except ValueError as e:
logger.error(F"""Failed to read file '{file}' with error {type(lowerCAmelCase__ )}: {e}""" )
raise
| 145
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
'kssteven/ibert-roberta-base': 'https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json',
'kssteven/ibert-roberta-large': 'https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json',
'kssteven/ibert-roberta-large-mnli': (
'https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json'
),
}
class A__ ( UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ : Union[str, Any] = '''ibert'''
def __init__( self : int , lowerCAmelCase__ : Any=3_0_5_2_2 , lowerCAmelCase__ : List[Any]=7_6_8 , lowerCAmelCase__ : int=1_2 , lowerCAmelCase__ : List[Any]=1_2 , lowerCAmelCase__ : Optional[int]=3_0_7_2 , lowerCAmelCase__ : List[str]="gelu" , lowerCAmelCase__ : str=0.1 , lowerCAmelCase__ : Optional[Any]=0.1 , lowerCAmelCase__ : Optional[Any]=5_1_2 , lowerCAmelCase__ : List[str]=2 , lowerCAmelCase__ : Any=0.02 , lowerCAmelCase__ : Union[str, Any]=1e-12 , lowerCAmelCase__ : Optional[Any]=1 , lowerCAmelCase__ : Optional[Any]=0 , lowerCAmelCase__ : Tuple=2 , lowerCAmelCase__ : Optional[Any]="absolute" , lowerCAmelCase__ : Optional[Any]=False , lowerCAmelCase__ : Tuple="none" , **lowerCAmelCase__ : Optional[Any] , ) -> Optional[int]:
"""simple docstring"""
super().__init__(pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
_UpperCAmelCase : Any = vocab_size
_UpperCAmelCase : Tuple = hidden_size
_UpperCAmelCase : Optional[int] = num_hidden_layers
_UpperCAmelCase : Union[str, Any] = num_attention_heads
_UpperCAmelCase : Any = hidden_act
_UpperCAmelCase : Any = intermediate_size
_UpperCAmelCase : Tuple = hidden_dropout_prob
_UpperCAmelCase : str = attention_probs_dropout_prob
_UpperCAmelCase : Optional[int] = max_position_embeddings
_UpperCAmelCase : Union[str, Any] = type_vocab_size
_UpperCAmelCase : Optional[int] = initializer_range
_UpperCAmelCase : Tuple = layer_norm_eps
_UpperCAmelCase : Optional[int] = position_embedding_type
_UpperCAmelCase : Any = quant_mode
_UpperCAmelCase : Optional[Any] = force_dequant
class A__ ( UpperCamelCase ):
"""simple docstring"""
@property
def _lowerCAmelCase ( self : Tuple ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
_UpperCAmelCase : Union[str, Any] = {0: "batch", 1: "choice", 2: "sequence"}
else:
_UpperCAmelCase : Dict = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 145
| 1
|
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def UpperCAmelCase ( a_ = "AAPL" ) -> str:
"""simple docstring"""
A_ : List[str] = F"https://in.finance.yahoo.com/quote/{symbol}?s={symbol}"
A_ : Union[str, Any] = BeautifulSoup(requests.get(a_ ).text , """html.parser""" )
A_ : str = """My(6px) Pos(r) smartphone_Mt(6px)"""
return soup.find("""div""" , class_=class_ ).find("""span""" ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(f'Current {symbol:<4} stock price is {stock_price(symbol):>8}')
| 164
|
'''simple docstring'''
import numpy as np
import pandas as pd
from sklearn.preprocessing import Normalizer
from sklearn.svm import SVR
from statsmodels.tsa.statespace.sarimax import SARIMAX
def UpperCAmelCase ( a_ , a_ , a_ , a_ , a_ ) -> float:
"""simple docstring"""
A_ : Optional[Any] = np.array([[1, item, train_mtch[i]] for i, item in enumerate(a_ )] )
A_ : Optional[Any] = np.array(a_ )
A_ : Optional[int] = np.dot(np.dot(np.linalg.inv(np.dot(x.transpose() , a_ ) ) , x.transpose() ) , a_ )
return abs(beta[0] + test_dt[0] * beta[1] + test_mtch[0] + beta[2] )
def UpperCAmelCase ( a_ , a_ , a_ ) -> float:
"""simple docstring"""
A_ : List[str] = (1, 2, 1)
A_ : Tuple = (1, 1, 0, 7)
A_ : List[Any] = SARIMAX(
a_ , exog=a_ , order=a_ , seasonal_order=a_ )
A_ : Tuple = model.fit(disp=a_ , maxiter=6_0_0 , method="""nm""" )
A_ : List[Any] = model_fit.predict(1 , len(a_ ) , exog=[test_match] )
return result[0]
def UpperCAmelCase ( a_ , a_ , a_ ) -> float:
"""simple docstring"""
A_ : int = SVR(kernel="""rbf""" , C=1 , gamma=0.1 , epsilon=0.1 )
regressor.fit(a_ , a_ )
A_ : Tuple = regressor.predict(a_ )
return y_pred[0]
def UpperCAmelCase ( a_ ) -> float:
"""simple docstring"""
train_user.sort()
A_ : Any = np.percentile(a_ , 2_5 )
A_ : Union[str, Any] = np.percentile(a_ , 7_5 )
A_ : str = qa - qa
A_ : List[Any] = qa - (iqr * 0.1)
return low_lim
def UpperCAmelCase ( a_ , a_ ) -> bool:
"""simple docstring"""
A_ : Dict = 0
A_ : Optional[Any] = 0
for i in list_vote:
if i > actual_result:
A_ : Optional[Any] = not_safe + 1
else:
if abs(abs(a_ ) - abs(a_ ) ) <= 0.1:
safe += 1
else:
not_safe += 1
return safe > not_safe
if __name__ == "__main__":
# data_input_df = pd.read_csv("ex_data.csv", header=None)
UpperCamelCase__ : List[str] = [[18_231, 0.0, 1], [22_621, 1.0, 2], [15_675, 0.0, 3], [23_583, 1.0, 4]]
UpperCamelCase__ : Optional[Any] = pd.DataFrame(
data_input, columns=['total_user', 'total_even', 'days']
)
UpperCamelCase__ : Union[str, Any] = Normalizer().fit_transform(data_input_df.values)
# split data
UpperCamelCase__ : List[Any] = normalize_df[:, 2].tolist()
UpperCamelCase__ : Tuple = normalize_df[:, 0].tolist()
UpperCamelCase__ : Union[str, Any] = normalize_df[:, 1].tolist()
# for svr (input variable = total date and total match)
UpperCamelCase__ : Any = normalize_df[:, [1, 2]].tolist()
UpperCamelCase__ : Optional[int] = x[: len(x) - 1]
UpperCamelCase__ : Optional[Any] = x[len(x) - 1 :]
# for linear regression & sarimax
UpperCamelCase__ : Optional[int] = total_date[: len(total_date) - 1]
UpperCamelCase__ : str = total_user[: len(total_user) - 1]
UpperCamelCase__ : Tuple = total_match[: len(total_match) - 1]
UpperCamelCase__ : List[str] = total_date[len(total_date) - 1 :]
UpperCamelCase__ : List[Any] = total_user[len(total_user) - 1 :]
UpperCamelCase__ : Dict = total_match[len(total_match) - 1 :]
# voting system with forecasting
UpperCamelCase__ : List[Any] = [
linear_regression_prediction(
trn_date, trn_user, trn_match, tst_date, tst_match
),
sarimax_predictor(trn_user, trn_match, tst_match),
support_vector_regressor(x_train, x_test, trn_user),
]
# check the safety of today's data
UpperCamelCase__ : Tuple = '' if data_safety_checker(res_vote, tst_user) else 'not '
print('Today\'s data is {not_str}safe.')
| 164
| 1
|
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase : List[Any] =logging.get_logger(__name__)
class a_ ( __lowercase ):
__A = """encoder-decoder"""
__A = True
def __init__( self : Union[str, Any] , **lowercase : Any ):
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE__ )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
lowercase_ :Optional[int] = kwargs.pop("encoder" )
lowercase_ :int = encoder_config.pop("model_type" )
lowercase_ :Any = kwargs.pop("decoder" )
lowercase_ :int = decoder_config.pop("model_type" )
from ..auto.configuration_auto import AutoConfig
lowercase_ :List[Any] = AutoConfig.for_model(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
lowercase_ :Optional[Any] = AutoConfig.for_model(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
lowercase_ :List[Any] = True
@classmethod
def lowercase__ ( cls : Tuple , lowercase : PretrainedConfig , lowercase : PretrainedConfig , **lowercase : Dict ):
"""simple docstring"""
logger.info("Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config" )
lowercase_ :Union[str, Any] = True
lowercase_ :str = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **SCREAMING_SNAKE_CASE__ )
def lowercase__ ( self : Any ):
"""simple docstring"""
lowercase_ :List[Any] = copy.deepcopy(self.__dict__ )
lowercase_ :List[str] = self.encoder.to_dict()
lowercase_ :Union[str, Any] = self.decoder.to_dict()
lowercase_ :Any = self.__class__.model_type
return output
| 223
|
from __future__ import annotations
from sys import maxsize
from typing import Generic, TypeVar
SCREAMING_SNAKE_CASE__ : List[Any] = TypeVar("T")
def __magic_name__ ( __lowerCAmelCase : int ) -> int:
return (position - 1) // 2
def __magic_name__ ( __lowerCAmelCase : int ) -> int:
return (2 * position) + 1
def __magic_name__ ( __lowerCAmelCase : int ) -> int:
return (2 * position) + 2
class lowerCAmelCase__ ( Generic[T] ):
def __init__( self : List[str] ) -> None:
__lowerCamelCase = []
__lowerCamelCase = {}
__lowerCamelCase = 0
def __len__( self : Optional[int] ) -> int:
return self.elements
def __repr__( self : Optional[int] ) -> str:
return str(self.heap )
def __A ( self : Union[str, Any] ) -> bool:
# Check if the priority queue is empty
return self.elements == 0
def __A ( self : str , SCREAMING_SNAKE_CASE__ : T , SCREAMING_SNAKE_CASE__ : int ) -> None:
# Add an element with given priority to the queue
self.heap.append((elem, weight) )
__lowerCamelCase = self.elements
self.elements += 1
self._bubble_up(SCREAMING_SNAKE_CASE__ )
def __A ( self : Any ) -> T:
# Remove and return the element with lowest weight (highest priority)
if self.elements > 1:
self._swap_nodes(0 , self.elements - 1 )
__lowerCamelCase , __lowerCamelCase = self.heap.pop()
del self.position_map[elem]
self.elements -= 1
if self.elements > 0:
__lowerCamelCase , __lowerCamelCase = self.heap[0]
self._bubble_down(SCREAMING_SNAKE_CASE__ )
return elem
def __A ( self : Dict , SCREAMING_SNAKE_CASE__ : T , SCREAMING_SNAKE_CASE__ : int ) -> None:
# Update the weight of the given key
__lowerCamelCase = self.position_map[elem]
__lowerCamelCase = (elem, weight)
if position > 0:
__lowerCamelCase = get_parent_position(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase , __lowerCamelCase = self.heap[parent_position]
if parent_weight > weight:
self._bubble_up(SCREAMING_SNAKE_CASE__ )
else:
self._bubble_down(SCREAMING_SNAKE_CASE__ )
else:
self._bubble_down(SCREAMING_SNAKE_CASE__ )
def __A ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : T ) -> None:
# Place a node at the proper position (upward movement) [to be used internally
# only]
__lowerCamelCase = self.position_map[elem]
if curr_pos == 0:
return None
__lowerCamelCase = get_parent_position(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase , __lowerCamelCase = self.heap[curr_pos]
__lowerCamelCase , __lowerCamelCase = self.heap[parent_position]
if parent_weight > weight:
self._swap_nodes(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return self._bubble_up(SCREAMING_SNAKE_CASE__ )
return None
def __A ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : T ) -> None:
# Place a node at the proper position (downward movement) [to be used
# internally only]
__lowerCamelCase = self.position_map[elem]
__lowerCamelCase , __lowerCamelCase = self.heap[curr_pos]
__lowerCamelCase = get_child_left_position(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = get_child_right_position(SCREAMING_SNAKE_CASE__ )
if child_left_position < self.elements and child_right_position < self.elements:
__lowerCamelCase , __lowerCamelCase = self.heap[child_left_position]
__lowerCamelCase , __lowerCamelCase = self.heap[child_right_position]
if child_right_weight < child_left_weight and child_right_weight < weight:
self._swap_nodes(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return self._bubble_down(SCREAMING_SNAKE_CASE__ )
if child_left_position < self.elements:
__lowerCamelCase , __lowerCamelCase = self.heap[child_left_position]
if child_left_weight < weight:
self._swap_nodes(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return self._bubble_down(SCREAMING_SNAKE_CASE__ )
else:
return None
if child_right_position < self.elements:
__lowerCamelCase , __lowerCamelCase = self.heap[child_right_position]
if child_right_weight < weight:
self._swap_nodes(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return self._bubble_down(SCREAMING_SNAKE_CASE__ )
return None
def __A ( self : List[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ) -> None:
# Swap the nodes at the given positions
__lowerCamelCase = self.heap[nodea_pos][0]
__lowerCamelCase = self.heap[nodea_pos][0]
__lowerCamelCase , __lowerCamelCase = (
self.heap[nodea_pos],
self.heap[nodea_pos],
)
__lowerCamelCase = nodea_pos
__lowerCamelCase = nodea_pos
class lowerCAmelCase__ ( Generic[T] ):
def __init__( self : Tuple ) -> None:
__lowerCamelCase = {}
__lowerCamelCase = 0
def __repr__( self : Optional[int] ) -> str:
return str(self.connections )
def __len__( self : List[str] ) -> int:
return self.nodes
def __A ( self : List[str] , SCREAMING_SNAKE_CASE__ : T ) -> None:
# Add a node in the graph if it is not in the graph
if node not in self.connections:
__lowerCamelCase = {}
self.nodes += 1
def __A ( self : Dict , SCREAMING_SNAKE_CASE__ : T , SCREAMING_SNAKE_CASE__ : T , SCREAMING_SNAKE_CASE__ : int ) -> None:
# Add an edge between 2 nodes in the graph
self.add_node(SCREAMING_SNAKE_CASE__ )
self.add_node(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = weight
__lowerCamelCase = weight
def __magic_name__ ( __lowerCAmelCase : GraphUndirectedWeighted[T] , ) -> tuple[dict[T, int], dict[T, T | None]]:
__lowerCamelCase = {node: maxsize for node in graph.connections}
__lowerCamelCase = {node: None for node in graph.connections}
__lowerCamelCase = MinPriorityQueue()
for node, weight in dist.items():
priority_queue.push(__lowerCAmelCase , __lowerCAmelCase )
if priority_queue.is_empty():
return dist, parent
# initialization
__lowerCamelCase = priority_queue.extract_min()
__lowerCamelCase = 0
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
__lowerCamelCase = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(__lowerCAmelCase , dist[neighbour] )
__lowerCamelCase = node
# running prim's algorithm
while not priority_queue.is_empty():
__lowerCamelCase = priority_queue.extract_min()
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
__lowerCamelCase = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(__lowerCAmelCase , dist[neighbour] )
__lowerCamelCase = node
return dist, parent
| 270
| 0
|
from math import factorial
lowerCAmelCase = {str(d): factorial(d) for d in range(1_0)}
def _lowerCamelCase( lowercase__ ) -> List[str]:
'''simple docstring'''
return sum(DIGIT_FACTORIAL[d] for d in str(A_ ) )
def _lowerCamelCase( ) -> Tuple:
'''simple docstring'''
__lowercase= 7 * factorial(9 ) + 1
return sum(i for i in range(3 , A_ ) if sum_of_digit_factorial(A_ ) == i )
if __name__ == "__main__":
print(F'{solution() = }')
| 358
|
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
lowerCAmelCase = logging.get_logger(__name__)
class A ( A_ ):
UpperCamelCase_ : Dict =['''audio_values''', '''audio_mask''']
def __init__(self , lowerCAmelCase=2_0_4_8 , lowerCAmelCase=1 , lowerCAmelCase=[1_6, 1_6] , lowerCAmelCase=1_2_8 , lowerCAmelCase=4_4_1_0_0 , lowerCAmelCase=8_6 , lowerCAmelCase=2_0_4_8 , lowerCAmelCase=0.0 , **lowerCAmelCase , ):
super().__init__(
feature_size=lowerCAmelCase , sampling_rate=lowerCAmelCase , padding_value=lowerCAmelCase , **lowerCAmelCase , )
__lowercase= spectrogram_length
__lowercase= num_channels
__lowercase= patch_size
__lowercase= feature_size // self.patch_size[1]
__lowercase= n_fft
__lowercase= sampling_rate // hop_length_to_sampling_rate
__lowercase= sampling_rate
__lowercase= padding_value
__lowercase= mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=lowerCAmelCase , min_frequency=0.0 , max_frequency=2_20_50.0 , sampling_rate=lowerCAmelCase , norm='slaney' , mel_scale='slaney' , ).T
def _A (self , lowerCAmelCase ):
__lowercase= spectrogram(
lowerCAmelCase , window_function(self.n_fft , 'hann' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel='dB' , db_range=80.0 , )
__lowercase= log_spec[:, :-1]
__lowercase= log_spec - 20.0
__lowercase= np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__(self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = True , lowerCAmelCase = None , lowerCAmelCase = False , lowerCAmelCase = False , **lowerCAmelCase , ):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
'This feature extractor is set to support sampling rate'
f' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled'
f' with {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
__lowercase= isinstance(lowerCAmelCase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'Only mono-channel audio is supported for input to {self}' )
__lowercase= is_batched_numpy or (
isinstance(lowerCAmelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
__lowercase= [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(lowerCAmelCase , np.ndarray ):
__lowercase= np.asarray(lowerCAmelCase , dtype=np.floataa )
elif isinstance(lowerCAmelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
__lowercase= raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
__lowercase= [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
__lowercase= [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , lowerCAmelCase ):
__lowercase= [np.asarray(lowerCAmelCase , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
__lowercase= max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
__lowercase= [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
__lowercase= np.array(lowerCAmelCase ).astype(np.floataa )
# convert into correct format for padding
__lowercase= max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
__lowercase= np.ones([len(lowerCAmelCase ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
__lowercase= padded_audio_features * self.padding_value
for i in range(len(lowerCAmelCase ) ):
__lowercase= audio_features[i]
__lowercase= feature
# return as BatchFeature
if return_attention_mask:
__lowercase= {'audio_values': padded_audio_features, 'audio_mask': audio_mask}
else:
__lowercase= {'audio_values': padded_audio_features}
__lowercase= BatchFeature(data=lowerCAmelCase , tensor_type=lowerCAmelCase )
return encoded_inputs
| 304
| 0
|
"""simple docstring"""
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[int] ,_lowerCamelCase : Union[str, Any]=0.9_99 ,_lowerCamelCase : Optional[int]="cosine" ,) -> Optional[Any]:
if alpha_transform_type == "cosine":
def alpha_bar_fn(_lowerCamelCase : List[Any] ):
return math.cos((t + 0.0_08) / 1.0_08 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(_lowerCamelCase : List[str] ):
return math.exp(t * -12.0 )
else:
raise ValueError(f"Unsupported alpha_tranform_type: {alpha_transform_type}" )
_lowerCAmelCase : str = []
for i in range(_lowerCamelCase ):
_lowerCAmelCase : Optional[Any] = i / num_diffusion_timesteps
_lowerCAmelCase : str = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(_lowerCamelCase ) / alpha_bar_fn(_lowerCamelCase ) ,_lowerCamelCase ) )
return torch.tensor(_lowerCamelCase ,dtype=torch.floataa )
class __A ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Union[str, Any] = [e.name for e in KarrasDiffusionSchedulers]
_UpperCamelCase : Dict = 2
@register_to_config
def __init__( self , a__ = 1000 , a__ = 0.0_0_0_8_5 , a__ = 0.0_1_2 , a__ = "linear" , a__ = None , a__ = "epsilon" , a__ = "linspace" , a__ = 0 , ):
if trained_betas is not None:
_lowerCAmelCase : str = torch.tensor(a__ , dtype=torch.floataa )
elif beta_schedule == "linear":
_lowerCAmelCase : Optional[Any] = torch.linspace(a__ , a__ , a__ , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
_lowerCAmelCase : int = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , a__ , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
_lowerCAmelCase : Optional[Any] = betas_for_alpha_bar(a__ )
else:
raise NotImplementedError(F"{beta_schedule} does is not implemented for {self.__class__}" )
_lowerCAmelCase : List[str] = 1.0 - self.betas
_lowerCAmelCase : Optional[int] = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(a__ , a__ , a__ )
def __A ( self , a__ , a__=None ):
if schedule_timesteps is None:
_lowerCAmelCase : Dict = self.timesteps
_lowerCAmelCase : int = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
_lowerCAmelCase : List[str] = 1 if len(a__ ) > 1 else 0
else:
_lowerCAmelCase : Optional[int] = timestep.cpu().item() if torch.is_tensor(a__ ) else timestep
_lowerCAmelCase : Optional[int] = self._index_counter[timestep_int]
return indices[pos].item()
@property
def __A ( self ):
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def __A ( self , a__ , a__ , ):
_lowerCAmelCase : Union[str, Any] = self.index_for_timestep(a__ )
if self.state_in_first_order:
_lowerCAmelCase : Optional[int] = self.sigmas[step_index]
else:
_lowerCAmelCase : Optional[Any] = self.sigmas_interpol[step_index]
_lowerCAmelCase : List[str] = sample / ((sigma**2 + 1) ** 0.5)
return sample
def __A ( self , a__ , a__ = None , a__ = None , ):
_lowerCAmelCase : List[Any] = num_inference_steps
_lowerCAmelCase : Tuple = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
_lowerCAmelCase : List[str] = np.linspace(0 , num_train_timesteps - 1 , a__ , dtype=a__ )[::-1].copy()
elif self.config.timestep_spacing == "leading":
_lowerCAmelCase : Union[str, Any] = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_lowerCAmelCase : List[str] = (np.arange(0 , a__ ) * step_ratio).round()[::-1].copy().astype(a__ )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
_lowerCAmelCase : Union[str, Any] = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_lowerCAmelCase : str = (np.arange(a__ , 0 , -step_ratio )).round().copy().astype(a__ )
timesteps -= 1
else:
raise ValueError(
F"{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'." )
_lowerCAmelCase : Dict = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
_lowerCAmelCase : List[str] = torch.from_numpy(np.log(a__ ) ).to(a__ )
_lowerCAmelCase : List[str] = np.interp(a__ , np.arange(0 , len(a__ ) ) , a__ )
_lowerCAmelCase : Dict = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
_lowerCAmelCase : Any = torch.from_numpy(a__ ).to(device=a__ )
# interpolate sigmas
_lowerCAmelCase : List[str] = sigmas.log().lerp(sigmas.roll(1 ).log() , 0.5 ).exp()
_lowerCAmelCase : Dict = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] )
_lowerCAmelCase : Union[str, Any] = torch.cat(
[sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] )
if str(a__ ).startswith("""mps""" ):
# mps does not support float64
_lowerCAmelCase : List[Any] = torch.from_numpy(a__ ).to(a__ , dtype=torch.floataa )
else:
_lowerCAmelCase : List[str] = torch.from_numpy(a__ ).to(a__ )
# interpolate timesteps
_lowerCAmelCase : List[Any] = self.sigma_to_t(a__ ).to(a__ , dtype=timesteps.dtype )
_lowerCAmelCase : Optional[Any] = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) , dim=-1 ).flatten()
_lowerCAmelCase : str = torch.cat([timesteps[:1], interleaved_timesteps] )
_lowerCAmelCase : Optional[Any] = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
_lowerCAmelCase : Tuple = defaultdict(a__ )
def __A ( self , a__ ):
# get log sigma
_lowerCAmelCase : str = sigma.log()
# get distribution
_lowerCAmelCase : List[str] = log_sigma - self.log_sigmas[:, None]
# get sigmas range
_lowerCAmelCase : Optional[Any] = dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 )
_lowerCAmelCase : List[str] = low_idx + 1
_lowerCAmelCase : str = self.log_sigmas[low_idx]
_lowerCAmelCase : Union[str, Any] = self.log_sigmas[high_idx]
# interpolate sigmas
_lowerCAmelCase : List[Any] = (low - log_sigma) / (low - high)
_lowerCAmelCase : List[str] = w.clamp(0 , 1 )
# transform interpolation to time range
_lowerCAmelCase : Optional[Any] = (1 - w) * low_idx + w * high_idx
_lowerCAmelCase : Optional[int] = t.view(sigma.shape )
return t
@property
def __A ( self ):
return self.sample is None
def __A ( self , a__ , a__ , a__ , a__ = True , ):
_lowerCAmelCase : List[str] = self.index_for_timestep(a__ )
# advance index counter by 1
_lowerCAmelCase : str = timestep.cpu().item() if torch.is_tensor(a__ ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
_lowerCAmelCase : Tuple = self.sigmas[step_index]
_lowerCAmelCase : Optional[int] = self.sigmas_interpol[step_index + 1]
_lowerCAmelCase : Any = self.sigmas[step_index + 1]
else:
# 2nd order / KDPM2's method
_lowerCAmelCase : int = self.sigmas[step_index - 1]
_lowerCAmelCase : Any = self.sigmas_interpol[step_index]
_lowerCAmelCase : List[str] = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
_lowerCAmelCase : int = 0
_lowerCAmelCase : Union[str, Any] = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
_lowerCAmelCase : List[str] = sigma_hat if self.state_in_first_order else sigma_interpol
_lowerCAmelCase : int = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
_lowerCAmelCase : List[str] = sigma_hat if self.state_in_first_order else sigma_interpol
_lowerCAmelCase : List[str] = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
raise NotImplementedError("""prediction_type not implemented yet: sample""" )
else:
raise ValueError(
F"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`" )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
_lowerCAmelCase : Optional[int] = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
_lowerCAmelCase : Optional[Any] = sigma_interpol - sigma_hat
# store for 2nd order step
_lowerCAmelCase : Tuple = sample
else:
# DPM-Solver-2
# 2. Convert to an ODE derivative for 2nd order
_lowerCAmelCase : List[Any] = (sample - pred_original_sample) / sigma_interpol
# 3. delta timestep
_lowerCAmelCase : Union[str, Any] = sigma_next - sigma_hat
_lowerCAmelCase : int = self.sample
_lowerCAmelCase : List[Any] = None
_lowerCAmelCase : Tuple = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=a__ )
def __A ( self , a__ , a__ , a__ , ):
# Make sure sigmas and timesteps have the same device and dtype as original_samples
_lowerCAmelCase : Optional[Any] = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(a__ ):
# mps does not support float64
_lowerCAmelCase : str = self.timesteps.to(original_samples.device , dtype=torch.floataa )
_lowerCAmelCase : List[Any] = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
_lowerCAmelCase : Any = self.timesteps.to(original_samples.device )
_lowerCAmelCase : int = timesteps.to(original_samples.device )
_lowerCAmelCase : str = [self.index_for_timestep(a__ , a__ ) for t in timesteps]
_lowerCAmelCase : Union[str, Any] = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
_lowerCAmelCase : str = sigma.unsqueeze(-1 )
_lowerCAmelCase : Tuple = original_samples + noise * sigma
return noisy_samples
def __len__( self ):
return self.config.num_train_timesteps
| 44
|
def lowercase__ ( __snake_case : list ):
'''simple docstring'''
for i in range(len(__snake_case ) - 1 , 0 , -1 ):
UpperCAmelCase_ : Dict = False
for j in range(__snake_case , 0 , -1 ):
if unsorted[j] < unsorted[j - 1]:
UpperCAmelCase_ , UpperCAmelCase_ : Any = unsorted[j - 1], unsorted[j]
UpperCAmelCase_ : int = True
for j in range(__snake_case ):
if unsorted[j] > unsorted[j + 1]:
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = unsorted[j + 1], unsorted[j]
UpperCAmelCase_ : Any = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCAmelCase = input('Enter numbers separated by a comma:\n').strip()
__UpperCAmelCase = [int(item) for item in user_input.split(',')]
print(F'{cocktail_shaker_sort(unsorted) = }')
| 29
| 0
|
"""simple docstring"""
from __future__ import annotations
from bisect import bisect_left
from functools import total_ordering
from heapq import merge
@total_ordering
class __UpperCamelCase ( _A ):
def __lt__(self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Any):
return self[-1] < other[-1]
def __eq__(self : Union[str, Any] , __SCREAMING_SNAKE_CASE : int):
return self[-1] == other[-1]
def __SCREAMING_SNAKE_CASE ( lowercase__ ):
"""simple docstring"""
A = []
# sort into stacks
for element in collection:
A = Stack([element] )
A = bisect_left(lowercase__ , lowercase__ )
if i != len(lowercase__ ):
stacks[i].append(lowercase__ )
else:
stacks.append(lowercase__ )
# use a heap-based merge to merge stack efficiently
A = merge(*(reversed(lowercase__ ) for stack in stacks) )
return collection
if __name__ == "__main__":
__A : Union[str, Any] = input('Enter numbers separated by a comma:\n').strip()
__A : List[Any] = [int(item) for item in user_input.split(',')]
print(patience_sort(unsorted))
| 353
|
"""simple docstring"""
import enum
import os
from hashlib import shaaaa
from typing import Optional
from .. import config
from .logging import get_logger
__A : Union[str, Any] = get_logger(__name__)
class __UpperCamelCase ( enum.Enum ):
SCREAMING_SNAKE_CASE = "all_checks"
SCREAMING_SNAKE_CASE = "basic_checks"
SCREAMING_SNAKE_CASE = "no_checks"
class __UpperCamelCase ( _A ):
pass
class __UpperCamelCase ( _A ):
pass
class __UpperCamelCase ( _A ):
pass
class __UpperCamelCase ( _A ):
pass
def __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ , lowercase__=None ):
"""simple docstring"""
if expected_checksums is None:
logger.info("Unable to verify checksums." )
return
if len(set(lowercase__ ) - set(lowercase__ ) ) > 0:
raise ExpectedMoreDownloadedFiles(str(set(lowercase__ ) - set(lowercase__ ) ) )
if len(set(lowercase__ ) - set(lowercase__ ) ) > 0:
raise UnexpectedDownloadedFile(str(set(lowercase__ ) - set(lowercase__ ) ) )
A = [url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]]
A = " for " + verification_name if verification_name is not None else ""
if len(lowercase__ ) > 0:
raise NonMatchingChecksumError(
F"""Checksums didn't match{for_verification_name}:\n"""
F"""{bad_urls}\n"""
"Set `verification_mode='no_checks'` to skip checksums verification and ignore this error" )
logger.info("All the checksums matched successfully" + for_verification_name )
class __UpperCamelCase ( _A ):
pass
class __UpperCamelCase ( _A ):
pass
class __UpperCamelCase ( _A ):
pass
class __UpperCamelCase ( _A ):
pass
def __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ ):
"""simple docstring"""
if expected_splits is None:
logger.info("Unable to verify splits sizes." )
return
if len(set(lowercase__ ) - set(lowercase__ ) ) > 0:
raise ExpectedMoreSplits(str(set(lowercase__ ) - set(lowercase__ ) ) )
if len(set(lowercase__ ) - set(lowercase__ ) ) > 0:
raise UnexpectedSplits(str(set(lowercase__ ) - set(lowercase__ ) ) )
A = [
{"expected": expected_splits[name], "recorded": recorded_splits[name]}
for name in expected_splits
if expected_splits[name].num_examples != recorded_splits[name].num_examples
]
if len(lowercase__ ) > 0:
raise NonMatchingSplitsSizesError(str(lowercase__ ) )
logger.info("All the splits matched successfully." )
def __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ = True ):
"""simple docstring"""
if record_checksum:
A = shaaaa()
with open(lowercase__ , "rb" ) as f:
for chunk in iter(lambda: f.read(1 << 20 ) , B"" ):
m.update(lowercase__ )
A = m.hexdigest()
else:
A = None
return {"num_bytes": os.path.getsize(lowercase__ ), "checksum": checksum}
def __SCREAMING_SNAKE_CASE ( lowercase__ ):
"""simple docstring"""
if dataset_size and config.IN_MEMORY_MAX_SIZE:
return dataset_size < config.IN_MEMORY_MAX_SIZE
else:
return False
| 57
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A : str = {
'configuration_blip_2': [
'BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Blip2Config',
'Blip2QFormerConfig',
'Blip2VisionConfig',
],
'processing_blip_2': ['Blip2Processor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[str] = [
'BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST',
'Blip2Model',
'Blip2QFormerModel',
'Blip2PreTrainedModel',
'Blip2ForConditionalGeneration',
'Blip2VisionModel',
]
if TYPE_CHECKING:
from .configuration_blip_a import (
BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlipaConfig,
BlipaQFormerConfig,
BlipaVisionConfig,
)
from .processing_blip_a import BlipaProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip_a import (
BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipaForConditionalGeneration,
BlipaModel,
BlipaPreTrainedModel,
BlipaQFormerModel,
BlipaVisionModel,
)
else:
import sys
__A : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 154
|
'''simple docstring'''
import random
def lowerCAmelCase (__A):
"""simple docstring"""
_a = num - 1
_a = 0
while s % 2 == 0:
_a = s // 2
t += 1
for _ in range(5):
_a = random.randrange(2 , num - 1)
_a = pow(__A , __A , __A)
if v != 1:
_a = 0
while v != (num - 1):
if i == t - 1:
return False
else:
_a = i + 1
_a = (v**2) % num
return True
def lowerCAmelCase (__A):
"""simple docstring"""
if num < 2:
return False
_a = [
2,
3,
5,
7,
11,
13,
17,
19,
23,
29,
31,
37,
41,
43,
47,
53,
59,
61,
67,
71,
73,
79,
83,
89,
97,
101,
103,
107,
109,
113,
127,
131,
137,
139,
149,
151,
157,
163,
167,
173,
179,
181,
191,
193,
197,
199,
211,
223,
227,
229,
233,
239,
241,
251,
257,
263,
269,
271,
277,
281,
283,
293,
307,
311,
313,
317,
331,
337,
347,
349,
353,
359,
367,
373,
379,
383,
389,
397,
401,
409,
419,
421,
431,
433,
439,
443,
449,
457,
461,
463,
467,
479,
487,
491,
499,
503,
509,
521,
523,
541,
547,
557,
563,
569,
571,
577,
587,
593,
599,
601,
607,
613,
617,
619,
631,
641,
643,
647,
653,
659,
661,
673,
677,
683,
691,
701,
709,
719,
727,
733,
739,
743,
751,
757,
761,
769,
773,
787,
797,
809,
811,
821,
823,
827,
829,
839,
853,
857,
859,
863,
877,
881,
883,
887,
907,
911,
919,
929,
937,
941,
947,
953,
967,
971,
977,
983,
991,
997,
]
if num in low_primes:
return True
for prime in low_primes:
if (num % prime) == 0:
return False
return rabin_miller(__A)
def lowerCAmelCase (__A = 1_024):
"""simple docstring"""
while True:
_a = random.randrange(2 ** (keysize - 1) , 2 ** (keysize))
if is_prime_low_num(__A):
return num
if __name__ == "__main__":
lowercase_ = generate_large_prime()
print(("Prime number:", num))
print(("is_prime_low_num:", is_prime_low_num(num)))
| 211
| 0
|
import math
def lowerCAmelCase_ ( __A, __A ) -> int:
'''simple docstring'''
if 0 not in (x, y):
# We use the relation x^y = y*log10(x), where 10 is the base.
return y * math.logaa(__A )
else:
if x == 0: # 0 raised to any number is 0
return 0
elif y == 0:
return 1 # any number raised to 0 is 1
raise AssertionError("This should never happen" )
if __name__ == "__main__": # Main function
# Read two numbers from input and typecast them to int using map function.
# Here x is the base and y is the power.
UpperCamelCase__ = "Enter the base and the power separated by a comma: "
UpperCamelCase__ , UpperCamelCase__ = map(int, input(prompt).split(","))
UpperCamelCase__ , UpperCamelCase__ = map(int, input(prompt).split(","))
# We find the log of each number, using the function res(), which takes two
# arguments.
UpperCamelCase__ = res(xa, ya)
UpperCamelCase__ = res(xa, ya)
# We check for the largest number
if resa > resa:
print("Largest number is", xa, "^", ya)
elif resa > resa:
print("Largest number is", xa, "^", ya)
else:
print("Both are equal")
| 354
|
import unittest
from transformers import BigBirdTokenizer, BigBirdTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase__ = '▁'
UpperCamelCase__ = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class A ( UpperCAmelCase_ , unittest.TestCase ):
__UpperCAmelCase : int = BigBirdTokenizer
__UpperCAmelCase : Optional[int] = BigBirdTokenizerFast
__UpperCAmelCase : Union[str, Any] = True
__UpperCAmelCase : List[Any] = True
def lowercase_ (self : Dict ) -> List[str]:
"""simple docstring"""
super().setUp()
UpperCAmelCase__ = self.tokenizer_class(__UpperCAmelCase , keep_accents=__UpperCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def lowercase_ (self : int ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = "<s>"
UpperCAmelCase__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCAmelCase ) , __UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCAmelCase ) , __UpperCAmelCase )
def lowercase_ (self : Any ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<unk>" )
self.assertEqual(vocab_keys[1] , "<s>" )
self.assertEqual(vocab_keys[-1] , "[MASK]" )
self.assertEqual(len(__UpperCAmelCase ) , 1_0_0_4 )
def lowercase_ (self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_0 )
def lowercase_ (self : Union[str, Any] ) -> Any:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
UpperCAmelCase__ = self.get_tokenizer()
UpperCAmelCase__ = self.get_rust_tokenizer()
UpperCAmelCase__ = "I was born in 92000, and this is falsé."
UpperCAmelCase__ = tokenizer.tokenize(__UpperCAmelCase )
UpperCAmelCase__ = rust_tokenizer.tokenize(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
UpperCAmelCase__ = tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
UpperCAmelCase__ = rust_tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
UpperCAmelCase__ = self.get_rust_tokenizer()
UpperCAmelCase__ = tokenizer.encode(__UpperCAmelCase )
UpperCAmelCase__ = rust_tokenizer.encode(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
def lowercase_ (self : str ) -> Tuple:
"""simple docstring"""
UpperCAmelCase__ = BigBirdTokenizer(__UpperCAmelCase , keep_accents=__UpperCAmelCase )
UpperCAmelCase__ = tokenizer.tokenize("This is a test" )
self.assertListEqual(__UpperCAmelCase , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2] , )
UpperCAmelCase__ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
__UpperCAmelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
UpperCAmelCase__ = tokenizer.convert_tokens_to_ids(__UpperCAmelCase )
self.assertListEqual(
__UpperCAmelCase , [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 0, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 0, 4] , )
UpperCAmelCase__ = tokenizer.convert_ids_to_tokens(__UpperCAmelCase )
self.assertListEqual(
__UpperCAmelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
@cached_property
def lowercase_ (self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
return BigBirdTokenizer.from_pretrained("google/bigbird-roberta-base" )
@slow
def lowercase_ (self : str ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase__ = "Hello World!"
UpperCAmelCase__ = [6_5, 1_8_5_3_6, 2_2_6_0, 1_0_1, 6_6]
self.assertListEqual(__UpperCAmelCase , self.big_tokenizer.encode(__UpperCAmelCase ) )
@slow
def lowercase_ (self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"
)
# fmt: off
UpperCAmelCase__ = [6_5, 8_7_1, 4_1_9, 3_5_8, 9_4_6, 9_9_1, 2_5_2_1, 4_5_2, 3_5_8, 1_3_5_7, 3_8_7, 7_7_5_1, 3_5_3_6, 1_1_2, 9_8_5, 4_5_6, 1_2_6, 8_6_5, 9_3_8, 5_4_0_0, 5_7_3_4, 4_5_8, 1_3_6_8, 4_6_7, 7_8_6, 2_4_6_2, 5_2_4_6, 1_1_5_9, 6_3_3, 8_6_5, 4_5_1_9, 4_5_7, 5_8_2, 8_5_2, 2_5_5_7, 4_2_7, 9_1_6, 5_0_8, 4_0_5, 3_4_3_2_4, 4_9_7, 3_9_1, 4_0_8, 1_1_3_4_2, 1_2_4_4, 3_8_5, 1_0_0, 9_3_8, 9_8_5, 4_5_6, 5_7_4, 3_6_2, 1_2_5_9_7, 3_2_0_0, 3_1_2_9, 1_1_7_2, 6_6] # noqa: E231
# fmt: on
self.assertListEqual(__UpperCAmelCase , self.big_tokenizer.encode(__UpperCAmelCase ) )
@require_torch
@slow
def lowercase_ (self : List[str] ) -> int:
"""simple docstring"""
import torch
from transformers import BigBirdConfig, BigBirdModel
# Build sequence
UpperCAmelCase__ = list(self.big_tokenizer.get_vocab().keys() )[:1_0]
UpperCAmelCase__ = " ".join(__UpperCAmelCase )
UpperCAmelCase__ = self.big_tokenizer.encode_plus(__UpperCAmelCase , return_tensors="pt" , return_token_type_ids=__UpperCAmelCase )
UpperCAmelCase__ = self.big_tokenizer.batch_encode_plus(
[sequence + " " + sequence] , return_tensors="pt" , return_token_type_ids=__UpperCAmelCase )
UpperCAmelCase__ = BigBirdConfig(attention_type="original_full" )
UpperCAmelCase__ = BigBirdModel(__UpperCAmelCase )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**__UpperCAmelCase )
model(**__UpperCAmelCase )
@slow
def lowercase_ (self : str ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase__ = BigBirdTokenizer.from_pretrained("google/bigbird-roberta-base" )
UpperCAmelCase__ = tokenizer.decode(tokenizer("Paris is the [MASK]." ).input_ids )
self.assertTrue(decoded_text == "[CLS] Paris is the[MASK].[SEP]" )
@slow
def lowercase_ (self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase__ = {"input_ids": [[6_5, 3_9_2_8_6, 4_5_8, 3_6_3_3_5, 2_0_0_1, 4_5_6, 1_3_0_7_3, 1_3_2_6_6, 4_5_5, 1_1_3, 7_7_4_6, 1_7_4_1, 1_1_1_5_7, 3_9_1, 1_3_0_7_3, 1_3_2_6_6, 4_5_5, 1_1_3, 3_9_6_7, 3_5_4_1_2, 1_1_3, 4_9_3_6, 1_0_9, 3_8_7_0, 2_3_7_7, 1_1_3, 3_0_0_8_4, 4_5_7_2_0, 4_5_8, 1_3_4, 1_7_4_9_6, 1_1_2, 5_0_3, 1_1_6_7_2, 1_1_3, 1_1_8, 1_1_2, 5_6_6_5, 1_3_3_4_7, 3_8_6_8_7, 1_1_2, 1_4_9_6, 3_1_3_8_9, 1_1_2, 3_2_6_8, 4_7_2_6_4, 1_3_4, 9_6_2, 1_1_2, 1_6_3_7_7, 8_0_3_5, 2_3_1_3_0, 4_3_0, 1_2_1_6_9, 1_5_5_1_8, 2_8_5_9_2, 4_5_8, 1_4_6, 4_1_6_9_7, 1_0_9, 3_9_1, 1_2_1_6_9, 1_5_5_1_8, 1_6_6_8_9, 4_5_8, 1_4_6, 4_1_3_5_8, 1_0_9, 4_5_2, 7_2_6, 4_0_3_4, 1_1_1, 7_6_3, 3_5_4_1_2, 5_0_8_2, 3_8_8, 1_9_0_3, 1_1_1, 9_0_5_1, 3_9_1, 2_8_7_0, 4_8_9_1_8, 1_9_0_0, 1_1_2_3, 5_5_0, 9_9_8, 1_1_2, 9_5_8_6, 1_5_9_8_5, 4_5_5, 3_9_1, 4_1_0, 2_2_9_5_5, 3_7_6_3_6, 1_1_4, 6_6], [6_5, 4_4_8, 1_7_4_9_6, 4_1_9, 3_6_6_3, 3_8_5, 7_6_3, 1_1_3, 2_7_5_3_3, 2_8_7_0, 3_2_8_3, 1_3_0_4_3, 1_6_3_9, 2_4_7_1_3, 5_2_3, 6_5_6, 2_4_0_1_3, 1_8_5_5_0, 2_5_2_1, 5_1_7, 2_7_0_1_4, 2_1_2_4_4, 4_2_0, 1_2_1_2, 1_4_6_5, 3_9_1, 9_2_7, 4_8_3_3, 3_8_8, 5_7_8, 1_1_7_8_6, 1_1_4, 6_6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [6_5, 4_8_4, 2_1_6_9, 7_6_8_7, 2_1_9_3_2, 1_8_1_4_6, 7_2_6, 3_6_3, 1_7_0_3_2, 3_3_9_1, 1_1_4, 6_6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__UpperCAmelCase , model_name="google/bigbird-roberta-base" , revision="215c99f1600e06f83acce68422f2035b2b5c3510" , )
| 143
| 0
|
'''simple docstring'''
from __future__ import annotations
from collections.abc import Iterator
from typing import Any
class A :
def __init__( self , lowerCamelCase__ ) -> str:
'''simple docstring'''
lowercase__ = data
lowercase__ = None
class A :
def __init__( self ) -> Any:
'''simple docstring'''
lowercase__ = None
lowercase__ = None
def __iter__( self ) -> Iterator[Any]:
'''simple docstring'''
lowercase__ = self.head
while self.head:
yield node.data
lowercase__ = node.next
if node == self.head:
break
def __len__( self ) -> int:
'''simple docstring'''
return sum(1 for _ in self )
def __repr__( self ) -> Optional[Any]:
'''simple docstring'''
return "->".join(str(lowerCamelCase__ ) for item in iter(self ) )
def A__ ( self , lowerCamelCase__ ) -> None:
'''simple docstring'''
self.insert_nth(len(self ) , lowerCamelCase__ )
def A__ ( self , lowerCamelCase__ ) -> None:
'''simple docstring'''
self.insert_nth(0 , lowerCamelCase__ )
def A__ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> None:
'''simple docstring'''
if index < 0 or index > len(self ):
raise IndexError("""list index out of range.""" )
lowercase__ = Node(lowerCamelCase__ )
if self.head is None:
lowercase__ = new_node # first node points itself
lowercase__ = lowercase__ = new_node
elif index == 0: # insert at head
lowercase__ = self.head
lowercase__ = lowercase__ = new_node
else:
lowercase__ = self.head
for _ in range(index - 1 ):
lowercase__ = temp.next
lowercase__ = temp.next
lowercase__ = new_node
if index == len(self ) - 1: # insert at tail
lowercase__ = new_node
def A__ ( self ) -> Dict:
'''simple docstring'''
return self.delete_nth(0 )
def A__ ( self ) -> Any:
'''simple docstring'''
return self.delete_nth(len(self ) - 1 )
def A__ ( self , lowerCamelCase__ = 0 ) -> Any:
'''simple docstring'''
if not 0 <= index < len(self ):
raise IndexError("""list index out of range.""" )
lowercase__ = self.head
if self.head == self.tail: # just one node
lowercase__ = lowercase__ = None
elif index == 0: # delete head node
lowercase__ = self.tail.next.next
lowercase__ = self.head.next
else:
lowercase__ = self.head
for _ in range(index - 1 ):
lowercase__ = temp.next
lowercase__ = temp.next
lowercase__ = temp.next.next
if index == len(self ) - 1: # delete at tail
lowercase__ = temp
return delete_node.data
def A__ ( self ) -> bool:
'''simple docstring'''
return len(self ) == 0
def _A ( ):
lowercase__ = CircularLinkedList()
assert len(lowercase__ ) == 0
assert circular_linked_list.is_empty() is True
assert str(lowercase__ ) == ""
try:
circular_linked_list.delete_front()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_tail()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_nth(-1 )
raise AssertionError
except IndexError:
assert True
try:
circular_linked_list.delete_nth(0 )
raise AssertionError
except IndexError:
assert True
assert circular_linked_list.is_empty() is True
for i in range(5 ):
assert len(lowercase__ ) == i
circular_linked_list.insert_nth(lowercase__ , i + 1 )
assert str(lowercase__ ) == "->".join(str(lowercase__ ) for i in range(1 , 6 ) )
circular_linked_list.insert_tail(6 )
assert str(lowercase__ ) == "->".join(str(lowercase__ ) for i in range(1 , 7 ) )
circular_linked_list.insert_head(0 )
assert str(lowercase__ ) == "->".join(str(lowercase__ ) for i in range(0 , 7 ) )
assert circular_linked_list.delete_front() == 0
assert circular_linked_list.delete_tail() == 6
assert str(lowercase__ ) == "->".join(str(lowercase__ ) for i in range(1 , 6 ) )
assert circular_linked_list.delete_nth(2 ) == 3
circular_linked_list.insert_nth(2 , 3 )
assert str(lowercase__ ) == "->".join(str(lowercase__ ) for i in range(1 , 6 ) )
assert circular_linked_list.is_empty() is False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 164
|
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
import torch.nn.functional as F
from transformers import (
ClapTextConfig,
ClapTextModelWithProjection,
RobertaTokenizer,
SpeechTaHifiGan,
SpeechTaHifiGanConfig,
)
from diffusers import (
AudioLDMPipeline,
AutoencoderKL,
DDIMScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class A ( __UpperCAmelCase , unittest.TestCase ):
lowerCamelCase : List[str] = AudioLDMPipeline
lowerCamelCase : Union[str, Any] = TEXT_TO_AUDIO_PARAMS
lowerCamelCase : Tuple = TEXT_TO_AUDIO_BATCH_PARAMS
lowerCamelCase : Optional[int] = frozenset(
[
"""num_inference_steps""",
"""num_waveforms_per_prompt""",
"""generator""",
"""latents""",
"""output_type""",
"""return_dict""",
"""callback""",
"""callback_steps""",
] )
def A__ ( self ) -> List[str]:
'''simple docstring'''
torch.manual_seed(0 )
lowercase__ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=(32, 64) , class_embed_type="""simple_projection""" , projection_class_embeddings_input_dim=32 , class_embeddings_concat=lowerCamelCase__ , )
lowercase__ = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="""scaled_linear""" , clip_sample=lowerCamelCase__ , set_alpha_to_one=lowerCamelCase__ , )
torch.manual_seed(0 )
lowercase__ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=1 , out_channels=1 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
lowercase__ = ClapTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , projection_dim=32 , )
lowercase__ = ClapTextModelWithProjection(lowerCamelCase__ )
lowercase__ = RobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-random-roberta""" , model_max_length=77 )
lowercase__ = SpeechTaHifiGanConfig(
model_in_dim=8 , sampling_rate=16_000 , upsample_initial_channel=16 , upsample_rates=[2, 2] , upsample_kernel_sizes=[4, 4] , resblock_kernel_sizes=[3, 7] , resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] , normalize_before=lowerCamelCase__ , )
lowercase__ = SpeechTaHifiGan(lowerCamelCase__ )
lowercase__ = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""vocoder""": vocoder,
}
return components
def A__ ( self , lowerCamelCase__ , lowerCamelCase__=0 ) -> Tuple:
'''simple docstring'''
if str(lowerCamelCase__ ).startswith("""mps""" ):
lowercase__ = torch.manual_seed(lowerCamelCase__ )
else:
lowercase__ = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
lowercase__ = {
"""prompt""": """A hammer hitting a wooden surface""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
}
return inputs
def A__ ( self ) -> Any:
'''simple docstring'''
lowercase__ = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowercase__ = self.get_dummy_components()
lowercase__ = AudioLDMPipeline(**lowerCamelCase__ )
lowercase__ = audioldm_pipe.to(lowerCamelCase__ )
audioldm_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
lowercase__ = self.get_dummy_inputs(lowerCamelCase__ )
lowercase__ = audioldm_pipe(**lowerCamelCase__ )
lowercase__ = output.audios[0]
assert audio.ndim == 1
assert len(lowerCamelCase__ ) == 256
lowercase__ = audio[:10]
lowercase__ = np.array(
[-0.00_50, 0.00_50, -0.00_60, 0.00_33, -0.00_26, 0.00_33, -0.00_27, 0.00_33, -0.00_28, 0.00_33] )
assert np.abs(audio_slice - expected_slice ).max() < 1e-2
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
lowercase__ = self.get_dummy_components()
lowercase__ = AudioLDMPipeline(**lowerCamelCase__ )
lowercase__ = audioldm_pipe.to(lowerCamelCase__ )
lowercase__ = audioldm_pipe.to(lowerCamelCase__ )
audioldm_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
lowercase__ = self.get_dummy_inputs(lowerCamelCase__ )
lowercase__ = 3 * [inputs["""prompt"""]]
# forward
lowercase__ = audioldm_pipe(**lowerCamelCase__ )
lowercase__ = output.audios[0]
lowercase__ = self.get_dummy_inputs(lowerCamelCase__ )
lowercase__ = 3 * [inputs.pop("""prompt""" )]
lowercase__ = audioldm_pipe.tokenizer(
lowerCamelCase__ , padding="""max_length""" , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=lowerCamelCase__ , return_tensors="""pt""" , )
lowercase__ = text_inputs["""input_ids"""].to(lowerCamelCase__ )
lowercase__ = audioldm_pipe.text_encoder(
lowerCamelCase__ , )
lowercase__ = prompt_embeds.text_embeds
# additional L_2 normalization over each hidden-state
lowercase__ = F.normalize(lowerCamelCase__ , dim=-1 )
lowercase__ = prompt_embeds
# forward
lowercase__ = audioldm_pipe(**lowerCamelCase__ )
lowercase__ = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1e-2
def A__ ( self ) -> List[Any]:
'''simple docstring'''
lowercase__ = self.get_dummy_components()
lowercase__ = AudioLDMPipeline(**lowerCamelCase__ )
lowercase__ = audioldm_pipe.to(lowerCamelCase__ )
lowercase__ = audioldm_pipe.to(lowerCamelCase__ )
audioldm_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
lowercase__ = self.get_dummy_inputs(lowerCamelCase__ )
lowercase__ = 3 * ["""this is a negative prompt"""]
lowercase__ = negative_prompt
lowercase__ = 3 * [inputs["""prompt"""]]
# forward
lowercase__ = audioldm_pipe(**lowerCamelCase__ )
lowercase__ = output.audios[0]
lowercase__ = self.get_dummy_inputs(lowerCamelCase__ )
lowercase__ = 3 * [inputs.pop("""prompt""" )]
lowercase__ = []
for p in [prompt, negative_prompt]:
lowercase__ = audioldm_pipe.tokenizer(
lowerCamelCase__ , padding="""max_length""" , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=lowerCamelCase__ , return_tensors="""pt""" , )
lowercase__ = text_inputs["""input_ids"""].to(lowerCamelCase__ )
lowercase__ = audioldm_pipe.text_encoder(
lowerCamelCase__ , )
lowercase__ = text_embeds.text_embeds
# additional L_2 normalization over each hidden-state
lowercase__ = F.normalize(lowerCamelCase__ , dim=-1 )
embeds.append(lowerCamelCase__ )
lowercase__ , lowercase__ = embeds
# forward
lowercase__ = audioldm_pipe(**lowerCamelCase__ )
lowercase__ = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1e-2
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase__ = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowercase__ = self.get_dummy_components()
lowercase__ = PNDMScheduler(skip_prk_steps=lowerCamelCase__ )
lowercase__ = AudioLDMPipeline(**lowerCamelCase__ )
lowercase__ = audioldm_pipe.to(lowerCamelCase__ )
audioldm_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
lowercase__ = self.get_dummy_inputs(lowerCamelCase__ )
lowercase__ = """egg cracking"""
lowercase__ = audioldm_pipe(**lowerCamelCase__ , negative_prompt=lowerCamelCase__ )
lowercase__ = output.audios[0]
assert audio.ndim == 1
assert len(lowerCamelCase__ ) == 256
lowercase__ = audio[:10]
lowercase__ = np.array(
[-0.00_51, 0.00_50, -0.00_60, 0.00_34, -0.00_26, 0.00_33, -0.00_27, 0.00_33, -0.00_28, 0.00_32] )
assert np.abs(audio_slice - expected_slice ).max() < 1e-2
def A__ ( self ) -> int:
'''simple docstring'''
lowercase__ = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowercase__ = self.get_dummy_components()
lowercase__ = PNDMScheduler(skip_prk_steps=lowerCamelCase__ )
lowercase__ = AudioLDMPipeline(**lowerCamelCase__ )
lowercase__ = audioldm_pipe.to(lowerCamelCase__ )
audioldm_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
lowercase__ = """A hammer hitting a wooden surface"""
# test num_waveforms_per_prompt=1 (default)
lowercase__ = audioldm_pipe(lowerCamelCase__ , num_inference_steps=2 ).audios
assert audios.shape == (1, 256)
# test num_waveforms_per_prompt=1 (default) for batch of prompts
lowercase__ = 2
lowercase__ = audioldm_pipe([prompt] * batch_size , num_inference_steps=2 ).audios
assert audios.shape == (batch_size, 256)
# test num_waveforms_per_prompt for single prompt
lowercase__ = 2
lowercase__ = audioldm_pipe(lowerCamelCase__ , num_inference_steps=2 , num_waveforms_per_prompt=lowerCamelCase__ ).audios
assert audios.shape == (num_waveforms_per_prompt, 256)
# test num_waveforms_per_prompt for batch of prompts
lowercase__ = 2
lowercase__ = audioldm_pipe(
[prompt] * batch_size , num_inference_steps=2 , num_waveforms_per_prompt=lowerCamelCase__ ).audios
assert audios.shape == (batch_size * num_waveforms_per_prompt, 256)
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
lowercase__ = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowercase__ = self.get_dummy_components()
lowercase__ = AudioLDMPipeline(**lowerCamelCase__ )
lowercase__ = audioldm_pipe.to(lowerCamelCase__ )
audioldm_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
lowercase__ = audioldm_pipe.vocoder.config.sampling_rate
lowercase__ = self.get_dummy_inputs(lowerCamelCase__ )
lowercase__ = audioldm_pipe(audio_length_in_s=0.0_16 , **lowerCamelCase__ )
lowercase__ = output.audios[0]
assert audio.ndim == 1
assert len(lowerCamelCase__ ) / vocoder_sampling_rate == 0.0_16
lowercase__ = audioldm_pipe(audio_length_in_s=0.0_32 , **lowerCamelCase__ )
lowercase__ = output.audios[0]
assert audio.ndim == 1
assert len(lowerCamelCase__ ) / vocoder_sampling_rate == 0.0_32
def A__ ( self ) -> str:
'''simple docstring'''
lowercase__ = self.get_dummy_components()
lowercase__ = AudioLDMPipeline(**lowerCamelCase__ )
lowercase__ = audioldm_pipe.to(lowerCamelCase__ )
audioldm_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
lowercase__ = ["""hey"""]
lowercase__ = audioldm_pipe(lowerCamelCase__ , num_inference_steps=1 )
lowercase__ = output.audios.shape
assert audio_shape == (1, 256)
lowercase__ = audioldm_pipe.vocoder.config
config.model_in_dim *= 2
lowercase__ = SpeechTaHifiGan(lowerCamelCase__ ).to(lowerCamelCase__ )
lowercase__ = audioldm_pipe(lowerCamelCase__ , num_inference_steps=1 )
lowercase__ = output.audios.shape
# waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram
assert audio_shape == (1, 256)
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=lowerCamelCase__ )
def A__ ( self ) -> int:
'''simple docstring'''
self._test_inference_batch_single_identical(test_mean_pixel_difference=lowerCamelCase__ )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def A__ ( self ) -> Any:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=lowerCamelCase__ )
@slow
class A ( unittest.TestCase ):
def A__ ( self ) -> List[str]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self , lowerCamelCase__ , lowerCamelCase__="cpu" , lowerCamelCase__=torch.floataa , lowerCamelCase__=0 ) -> int:
'''simple docstring'''
lowercase__ = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
lowercase__ = np.random.RandomState(lowerCamelCase__ ).standard_normal((1, 8, 128, 16) )
lowercase__ = torch.from_numpy(lowerCamelCase__ ).to(device=lowerCamelCase__ , dtype=lowerCamelCase__ )
lowercase__ = {
"""prompt""": """A hammer hitting a wooden surface""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 2.5,
}
return inputs
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase__ = AudioLDMPipeline.from_pretrained("""cvssp/audioldm""" )
lowercase__ = audioldm_pipe.to(lowerCamelCase__ )
audioldm_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
lowercase__ = self.get_inputs(lowerCamelCase__ )
lowercase__ = 25
lowercase__ = audioldm_pipe(**lowerCamelCase__ ).audios[0]
assert audio.ndim == 1
assert len(lowerCamelCase__ ) == 81_920
lowercase__ = audio[77_230:77_240]
lowercase__ = np.array(
[-0.48_84, -0.46_07, 0.00_23, 0.50_07, 0.58_96, 0.51_51, 0.38_13, -0.02_08, -0.36_87, -0.43_15] )
lowercase__ = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 1e-2
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
lowercase__ = AudioLDMPipeline.from_pretrained("""cvssp/audioldm""" )
lowercase__ = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config )
lowercase__ = audioldm_pipe.to(lowerCamelCase__ )
audioldm_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
lowercase__ = self.get_inputs(lowerCamelCase__ )
lowercase__ = audioldm_pipe(**lowerCamelCase__ ).audios[0]
assert audio.ndim == 1
assert len(lowerCamelCase__ ) == 81_920
lowercase__ = audio[27_780:27_790]
lowercase__ = np.array([-0.21_31, -0.08_73, -0.01_24, -0.01_89, 0.05_69, 0.13_73, 0.18_83, 0.28_86, 0.32_97, 0.22_12] )
lowercase__ = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 3e-2
| 164
| 1
|
'''simple docstring'''
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
lowerCAmelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowercase_ (__lowerCAmelCase ):
"""simple docstring"""
def __init__( self : Dict ,lowercase__ : WhisperForConditionalGeneration ,lowercase__ : WhisperProcessor ,lowercase__ : AutoencoderKL ,lowercase__ : CLIPTextModel ,lowercase__ : CLIPTokenizer ,lowercase__ : UNetaDConditionModel ,lowercase__ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] ,lowercase__ : StableDiffusionSafetyChecker ,lowercase__ : CLIPImageProcessor ,):
super().__init__()
if safety_checker is None:
logger.warning(
F"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
''' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'''
''' results in services or applications open to the public. Both the diffusers team and Hugging Face'''
''' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'''
''' it only for use-cases that involve analyzing network behavior or auditing its results. For more'''
''' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .''' )
self.register_modules(
speech_model=lowerCamelCase__ ,speech_processor=lowerCamelCase__ ,vae=lowerCamelCase__ ,text_encoder=lowerCamelCase__ ,tokenizer=lowerCamelCase__ ,unet=lowerCamelCase__ ,scheduler=lowerCamelCase__ ,feature_extractor=lowerCamelCase__ ,)
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : Optional[Union[str, int]] = "auto" ):
if slice_size == "auto":
__lowercase = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowerCamelCase__ )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
self.enable_attention_slicing(lowerCamelCase__ )
@torch.no_grad()
def __call__( self : int ,lowercase__ : Union[str, Any] ,lowercase__ : List[str]=1_6_0_0_0 ,lowercase__ : int = 5_1_2 ,lowercase__ : int = 5_1_2 ,lowercase__ : int = 5_0 ,lowercase__ : float = 7.5 ,lowercase__ : Optional[Union[str, List[str]]] = None ,lowercase__ : Optional[int] = 1 ,lowercase__ : float = 0.0 ,lowercase__ : Optional[torch.Generator] = None ,lowercase__ : Optional[torch.FloatTensor] = None ,lowercase__ : Optional[str] = "pil" ,lowercase__ : bool = True ,lowercase__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None ,lowercase__ : int = 1 ,**lowercase__ : List[str] ,):
__lowercase = self.speech_processor.feature_extractor(
lowerCamelCase__ ,return_tensors='''pt''' ,sampling_rate=lowerCamelCase__ ).input_features.to(self.device )
__lowercase = self.speech_model.generate(lowerCamelCase__ ,max_length=4_8_0_0_0_0 )
__lowercase = self.speech_processor.tokenizer.batch_decode(lowerCamelCase__ ,skip_special_tokens=lowerCamelCase__ ,normalize=lowerCamelCase__ )[
0
]
if isinstance(lowerCamelCase__ ,lowerCamelCase__ ):
__lowercase = 1
elif isinstance(lowerCamelCase__ ,lowerCamelCase__ ):
__lowercase = len(lowerCamelCase__ )
else:
raise ValueError(F"`prompt` has to be of type `str` or `list` but is {type(lowerCamelCase__ )}" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"`height` and `width` have to be divisible by 8 but are {height} and {width}." )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(lowerCamelCase__ ,lowerCamelCase__ ) or callback_steps <= 0)
):
raise ValueError(
F"`callback_steps` has to be a positive integer but is {callback_steps} of type"
F" {type(lowerCamelCase__ )}." )
# get prompt text embeddings
__lowercase = self.tokenizer(
lowerCamelCase__ ,padding='''max_length''' ,max_length=self.tokenizer.model_max_length ,return_tensors='''pt''' ,)
__lowercase = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
__lowercase = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'''The following part of your input was truncated because CLIP can only handle sequences up to'''
F" {self.tokenizer.model_max_length} tokens: {removed_text}" )
__lowercase = text_input_ids[:, : self.tokenizer.model_max_length]
__lowercase = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
__lowercase = text_embeddings.shape
__lowercase = text_embeddings.repeat(1 ,lowerCamelCase__ ,1 )
__lowercase = text_embeddings.view(bs_embed * num_images_per_prompt ,lowerCamelCase__ ,-1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
__lowercase = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
__lowercase = 42
if negative_prompt is None:
__lowercase = [''''''] * batch_size
elif type(lowerCamelCase__ ) is not type(lowerCamelCase__ ):
raise TypeError(
F"`negative_prompt` should be the same type to `prompt`, but got {type(lowerCamelCase__ )} !="
F" {type(lowerCamelCase__ )}." )
elif isinstance(lowerCamelCase__ ,lowerCamelCase__ ):
__lowercase = [negative_prompt]
elif batch_size != len(lowerCamelCase__ ):
raise ValueError(
F"`negative_prompt`: {negative_prompt} has batch size {len(lowerCamelCase__ )}, but `prompt`:"
F" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
''' the batch size of `prompt`.''' )
else:
__lowercase = negative_prompt
__lowercase = text_input_ids.shape[-1]
__lowercase = self.tokenizer(
lowerCamelCase__ ,padding='''max_length''' ,max_length=lowerCamelCase__ ,truncation=lowerCamelCase__ ,return_tensors='''pt''' ,)
__lowercase = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
__lowercase = uncond_embeddings.shape[1]
__lowercase = uncond_embeddings.repeat(1 ,lowerCamelCase__ ,1 )
__lowercase = uncond_embeddings.view(batch_size * num_images_per_prompt ,lowerCamelCase__ ,-1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__lowercase = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
__lowercase = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
__lowercase = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
__lowercase = torch.randn(lowerCamelCase__ ,generator=lowerCamelCase__ ,device='''cpu''' ,dtype=lowerCamelCase__ ).to(
self.device )
else:
__lowercase = torch.randn(lowerCamelCase__ ,generator=lowerCamelCase__ ,device=self.device ,dtype=lowerCamelCase__ )
else:
if latents.shape != latents_shape:
raise ValueError(F"Unexpected latents shape, got {latents.shape}, expected {latents_shape}" )
__lowercase = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(lowerCamelCase__ )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
__lowercase = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
__lowercase = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__lowercase = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
__lowercase = {}
if accepts_eta:
__lowercase = eta
for i, t in enumerate(self.progress_bar(lowerCamelCase__ ) ):
# expand the latents if we are doing classifier free guidance
__lowercase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__lowercase = self.scheduler.scale_model_input(lowerCamelCase__ ,lowerCamelCase__ )
# predict the noise residual
__lowercase = self.unet(lowerCamelCase__ ,lowerCamelCase__ ,encoder_hidden_states=lowerCamelCase__ ).sample
# perform guidance
if do_classifier_free_guidance:
__lowercase = noise_pred.chunk(2 )
__lowercase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
__lowercase = self.scheduler.step(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ )
__lowercase = 1 / 0.1_8_2_1_5 * latents
__lowercase = self.vae.decode(lowerCamelCase__ ).sample
__lowercase = (image / 2 + 0.5).clamp(0 ,1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
__lowercase = image.cpu().permute(0 ,2 ,3 ,1 ).float().numpy()
if output_type == "pil":
__lowercase = self.numpy_to_pil(lowerCamelCase__ )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=lowerCamelCase__ ,nsfw_content_detected=lowerCamelCase__ )
| 357
|
'''simple docstring'''
import string
def _A ( A__ ):
"""simple docstring"""
for key in range(len(string.ascii_uppercase ) ):
__lowercase = ''''''
for symbol in message:
if symbol in string.ascii_uppercase:
__lowercase = string.ascii_uppercase.find(A__ )
__lowercase = num - key
if num < 0:
__lowercase = num + len(string.ascii_uppercase )
__lowercase = translated + string.ascii_uppercase[num]
else:
__lowercase = translated + symbol
print(F"Decryption using Key #{key}: {translated}" )
def _A ( ):
"""simple docstring"""
__lowercase = input('''Encrypted message: ''' )
__lowercase = message.upper()
decrypt(A__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 52
| 0
|
'''simple docstring'''
import json
import os
import unittest
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class a__ ( UpperCAmelCase__ , unittest.TestCase ):
lowerCamelCase : Optional[int] =XLMTokenizer
lowerCamelCase : List[Any] =False
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__lowerCamelCase = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
__lowerCamelCase = dict(zip(a , range(len(a ) ) ) )
__lowerCamelCase = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', '''''']
__lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
__lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' ) as fp:
fp.write(json.dumps(a ) )
with open(self.merges_file , '''w''' ) as fp:
fp.write('''\n'''.join(a ) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , a : Any ):
"""simple docstring"""
__lowerCamelCase = '''lower newer'''
__lowerCamelCase = '''lower newer'''
return input_text, output_text
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
__lowerCamelCase = XLMTokenizer(self.vocab_file , self.merges_file )
__lowerCamelCase = '''lower'''
__lowerCamelCase = ['''low''', '''er</w>''']
__lowerCamelCase = tokenizer.tokenize(a )
self.assertListEqual(a , a )
__lowerCamelCase = tokens + ['''<unk>''']
__lowerCamelCase = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a ) , a )
@slow
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
"""simple docstring"""
__lowerCamelCase = XLMTokenizer.from_pretrained('''xlm-mlm-en-2048''' )
__lowerCamelCase = tokenizer.encode('''sequence builders''' , add_special_tokens=a )
__lowerCamelCase = tokenizer.encode('''multi-sequence build''' , add_special_tokens=a )
__lowerCamelCase = tokenizer.build_inputs_with_special_tokens(a )
__lowerCamelCase = tokenizer.build_inputs_with_special_tokens(a , a )
assert encoded_sentence == [0] + text + [1]
assert encoded_pair == [0] + text + [1] + text_a + [1]
| 67
|
'''simple docstring'''
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class snake_case__ ( unittest.TestCase):
def __init__( self : int , _A : List[str] , _A : Dict=7 , _A : List[str]=3 , _A : List[str]=18 , _A : Dict=30 , _A : Union[str, Any]=4_00 , _A : List[str]=True , _A : List[str]=None , _A : int=True , _A : Tuple=None , _A : Union[str, Any]=True , _A : Tuple=[0.5, 0.5, 0.5] , _A : Union[str, Any]=[0.5, 0.5, 0.5] , _A : Tuple=False , ) -> List[Any]:
UpperCAmelCase_ : Union[str, Any] = size if size is not None else {'''height''': 20, '''width''': 20}
UpperCAmelCase_ : List[Any] = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
UpperCAmelCase_ : Tuple = parent
UpperCAmelCase_ : Optional[int] = batch_size
UpperCAmelCase_ : Any = num_channels
UpperCAmelCase_ : Optional[Any] = image_size
UpperCAmelCase_ : Tuple = min_resolution
UpperCAmelCase_ : Tuple = max_resolution
UpperCAmelCase_ : Optional[int] = do_resize
UpperCAmelCase_ : Tuple = size
UpperCAmelCase_ : Optional[Any] = do_center_crop
UpperCAmelCase_ : Optional[int] = crop_size
UpperCAmelCase_ : Tuple = do_normalize
UpperCAmelCase_ : Optional[Any] = image_mean
UpperCAmelCase_ : int = image_std
UpperCAmelCase_ : List[Any] = do_reduce_labels
def A ( self : Union[str, Any] ) -> str:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_reduce_labels": self.do_reduce_labels,
}
def __UpperCAmelCase ( ) -> Optional[Any]:
UpperCAmelCase_ : Union[str, Any] = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
UpperCAmelCase_ : Optional[Any] = Image.open(dataset[0]['''file'''] )
UpperCAmelCase_ : str = Image.open(dataset[1]['''file'''] )
return image, map
def __UpperCAmelCase ( ) -> Any:
UpperCAmelCase_ : int = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
UpperCAmelCase_ : int = Image.open(ds[0]['''file'''] )
UpperCAmelCase_ : Optional[Any] = Image.open(ds[1]['''file'''] )
UpperCAmelCase_ : Dict = Image.open(ds[2]['''file'''] )
UpperCAmelCase_ : List[str] = Image.open(ds[3]['''file'''] )
return [imagea, imagea], [mapa, mapa]
@require_torch
@require_vision
class snake_case__ ( UpperCamelCase , unittest.TestCase):
a_ = BeitImageProcessor if is_vision_available() else None
def A ( self : Optional[Any] ) -> Union[str, Any]:
UpperCAmelCase_ : Tuple = BeitImageProcessingTester(self )
@property
def A ( self : List[Any] ) -> Tuple:
return self.image_processor_tester.prepare_image_processor_dict()
def A ( self : List[Any] ) -> Optional[Any]:
UpperCAmelCase_ : Dict = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_A , '''do_resize''' ) )
self.assertTrue(hasattr(_A , '''size''' ) )
self.assertTrue(hasattr(_A , '''do_center_crop''' ) )
self.assertTrue(hasattr(_A , '''center_crop''' ) )
self.assertTrue(hasattr(_A , '''do_normalize''' ) )
self.assertTrue(hasattr(_A , '''image_mean''' ) )
self.assertTrue(hasattr(_A , '''image_std''' ) )
def A ( self : List[str] ) -> Optional[int]:
UpperCAmelCase_ : List[str] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 20, '''width''': 20} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
self.assertEqual(image_processor.do_reduce_labels , _A )
UpperCAmelCase_ : Union[str, Any] = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , crop_size=84 , reduce_labels=_A )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
self.assertEqual(image_processor.do_reduce_labels , _A )
def A ( self : Optional[Any] ) -> Any:
pass
def A ( self : List[str] ) -> Optional[int]:
# Initialize image_processing
UpperCAmelCase_ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase_ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A )
for image in image_inputs:
self.assertIsInstance(_A , Image.Image )
# Test not batched input
UpperCAmelCase_ : Tuple = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
UpperCAmelCase_ : Any = image_processing(_A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def A ( self : Union[str, Any] ) -> Union[str, Any]:
# Initialize image_processing
UpperCAmelCase_ : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase_ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , numpify=_A )
for image in image_inputs:
self.assertIsInstance(_A , np.ndarray )
# Test not batched input
UpperCAmelCase_ : List[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
UpperCAmelCase_ : int = image_processing(_A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def A ( self : Optional[int] ) -> str:
# Initialize image_processing
UpperCAmelCase_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase_ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , torchify=_A )
for image in image_inputs:
self.assertIsInstance(_A , torch.Tensor )
# Test not batched input
UpperCAmelCase_ : Any = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
UpperCAmelCase_ : int = image_processing(_A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def A ( self : Any ) -> Optional[Any]:
# Initialize image_processing
UpperCAmelCase_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase_ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , torchify=_A )
UpperCAmelCase_ : Union[str, Any] = []
for image in image_inputs:
self.assertIsInstance(_A , torch.Tensor )
maps.append(torch.zeros(image.shape[-2:] ).long() )
# Test not batched input
UpperCAmelCase_ : str = image_processing(image_inputs[0] , maps[0] , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
1,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 2_55 )
# Test batched
UpperCAmelCase_ : List[Any] = image_processing(_A , _A , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 2_55 )
# Test not batched input (PIL images)
UpperCAmelCase_ , UpperCAmelCase_ : Any = prepare_semantic_single_inputs()
UpperCAmelCase_ : List[str] = image_processing(_A , _A , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
1,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 2_55 )
# Test batched input (PIL images)
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = prepare_semantic_batch_inputs()
UpperCAmelCase_ : int = image_processing(_A , _A , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
2,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
2,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 2_55 )
def A ( self : List[Any] ) -> Union[str, Any]:
# Initialize image_processing
UpperCAmelCase_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150
UpperCAmelCase_ , UpperCAmelCase_ : Any = prepare_semantic_single_inputs()
UpperCAmelCase_ : Dict = image_processing(_A , _A , return_tensors='''pt''' )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 1_50 )
UpperCAmelCase_ : int = True
UpperCAmelCase_ : Dict = image_processing(_A , _A , return_tensors='''pt''' )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 2_55 )
| 304
| 0
|
from __future__ import annotations
from typing import Any
def a ( SCREAMING_SNAKE_CASE_ : list ):
"""simple docstring"""
if not postfix_notation:
return 0
UpperCamelCase : Optional[Any] = {'''+''', '''-''', '''*''', '''/'''}
UpperCamelCase : list[Any] = []
for token in postfix_notation:
if token in operations:
UpperCamelCase , UpperCamelCase : Optional[Any] = stack.pop(), stack.pop()
if token == "+":
stack.append(a + b )
elif token == "-":
stack.append(a - b )
elif token == "*":
stack.append(a * b )
else:
if a * b < 0 and a % b != 0:
stack.append(a // b + 1 )
else:
stack.append(a // b )
else:
stack.append(int(SCREAMING_SNAKE_CASE_ ) )
return stack.pop()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 315
|
def a ( SCREAMING_SNAKE_CASE_ : list[list[float]] ):
"""simple docstring"""
UpperCamelCase : list[list[float]] = []
for data in source_data:
for i, el in enumerate(SCREAMING_SNAKE_CASE_ ):
if len(SCREAMING_SNAKE_CASE_ ) < i + 1:
data_lists.append([] )
data_lists[i].append(float(SCREAMING_SNAKE_CASE_ ) )
return data_lists
def a ( SCREAMING_SNAKE_CASE_ : list[list[float]] , SCREAMING_SNAKE_CASE_ : list[int] ):
"""simple docstring"""
UpperCamelCase : list[list[float]] = []
for dlist, weight in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Union[str, Any] = min(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Tuple = max(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : list[float] = []
# for weight 0 score is 1 - actual score
if weight == 0:
for item in dlist:
try:
score.append(1 - ((item - mind) / (maxd - mind)) )
except ZeroDivisionError:
score.append(1 )
elif weight == 1:
for item in dlist:
try:
score.append((item - mind) / (maxd - mind) )
except ZeroDivisionError:
score.append(0 )
# weight not 0 or 1
else:
UpperCamelCase : Dict = F"""Invalid weight of {weight:f} provided"""
raise ValueError(SCREAMING_SNAKE_CASE_ )
score_lists.append(SCREAMING_SNAKE_CASE_ )
return score_lists
def a ( SCREAMING_SNAKE_CASE_ : list[list[float]] ):
"""simple docstring"""
UpperCamelCase : list[float] = [0 for i in range(len(score_lists[0] ) )]
for slist in score_lists:
for j, ele in enumerate(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : str = final_scores[j] + ele
return final_scores
def a ( SCREAMING_SNAKE_CASE_ : list[list[float]] , SCREAMING_SNAKE_CASE_ : list[int] ):
"""simple docstring"""
UpperCamelCase : str = get_data(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = calculate_each_score(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = generate_final_scores(SCREAMING_SNAKE_CASE_ )
# append scores to source data
for i, ele in enumerate(SCREAMING_SNAKE_CASE_ ):
source_data[i].append(SCREAMING_SNAKE_CASE_ )
return source_data
| 315
| 1
|
import inspect
import unittest
import numpy as np
from transformers import ViTConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel
class A ( unittest.TestCase ):
def __init__( self : Optional[Any] , lowercase_ : List[str] , lowercase_ : List[Any]=13 , lowercase_ : int=30 , lowercase_ : Any=2 , lowercase_ : Dict=3 , lowercase_ : Any=True , lowercase_ : Optional[int]=True , lowercase_ : Tuple=32 , lowercase_ : List[Any]=5 , lowercase_ : Union[str, Any]=4 , lowercase_ : Optional[int]=37 , lowercase_ : List[str]="gelu" , lowercase_ : List[Any]=0.1 , lowercase_ : str=0.1 , lowercase_ : Tuple=10 , lowercase_ : List[Any]=0.02 , ) -> Union[str, Any]:
"""simple docstring"""
_lowerCamelCase : Optional[int] =parent
_lowerCamelCase : str =batch_size
_lowerCamelCase : Optional[Any] =image_size
_lowerCamelCase : Any =patch_size
_lowerCamelCase : Dict =num_channels
_lowerCamelCase : List[str] =is_training
_lowerCamelCase : int =use_labels
_lowerCamelCase : Optional[int] =hidden_size
_lowerCamelCase : Any =num_hidden_layers
_lowerCamelCase : str =num_attention_heads
_lowerCamelCase : Any =intermediate_size
_lowerCamelCase : Dict =hidden_act
_lowerCamelCase : int =hidden_dropout_prob
_lowerCamelCase : List[str] =attention_probs_dropout_prob
_lowerCamelCase : Optional[int] =type_sequence_label_size
_lowerCamelCase : List[Any] =initializer_range
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_lowerCamelCase : Optional[Any] =(image_size // patch_size) ** 2
_lowerCamelCase : str =num_patches + 1
def lowerCamelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
_lowerCamelCase : Optional[int] =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCamelCase : Optional[int] =ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__a , initializer_range=self.initializer_range , )
return config, pixel_values
def lowerCamelCase ( self : str , lowercase_ : str , lowercase_ : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] =FlaxViTModel(config=__a )
_lowerCamelCase : str =model(__a )
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token)
_lowerCamelCase : Optional[int] =(self.image_size, self.image_size)
_lowerCamelCase : List[str] =(self.patch_size, self.patch_size)
_lowerCamelCase : List[str] =(image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, num_patches + 1, self.hidden_size) )
def lowerCamelCase ( self : List[str] , lowercase_ : Optional[int] , lowercase_ : Any ) -> Optional[Any]:
"""simple docstring"""
_lowerCamelCase : Optional[int] =self.type_sequence_label_size
_lowerCamelCase : List[Any] =FlaxViTForImageClassification(config=__a )
_lowerCamelCase : Tuple =model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_lowerCamelCase : Union[str, Any] =1
_lowerCamelCase : str =FlaxViTForImageClassification(__a )
_lowerCamelCase : Optional[Any] =floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCamelCase : Dict =model(__a )
def lowerCamelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
_lowerCamelCase : Optional[int] =self.prepare_config_and_inputs()
(
(
_lowerCamelCase
) , (
_lowerCamelCase
) ,
) : Any =config_and_inputs
_lowerCamelCase : Optional[Any] ={'pixel_values': pixel_values}
return config, inputs_dict
@require_flax
class A ( lowerCAmelCase__ , unittest.TestCase ):
UpperCamelCase__ : int =(FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else ()
def lowerCamelCase ( self : int ) -> Optional[int]:
"""simple docstring"""
_lowerCamelCase : Optional[int] =FlaxViTModelTester(self )
_lowerCamelCase : Union[str, Any] =ConfigTester(self , config_class=__a , has_text_modality=__a , hidden_size=37 )
def lowerCamelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCamelCase ( self : Optional[Any] ) -> Any:
"""simple docstring"""
_lowerCamelCase : Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def lowerCamelCase ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
_lowerCamelCase : List[str] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a )
def lowerCamelCase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : Optional[Any] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : Dict =model_class(__a )
_lowerCamelCase : Dict =inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCamelCase : Optional[Any] =[*signature.parameters.keys()]
_lowerCamelCase : str =['pixel_values']
self.assertListEqual(arg_names[:1] , __a )
def lowerCamelCase ( self : Dict ) -> Dict:
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : Optional[Any] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_lowerCamelCase : Dict =self._prepare_for_class(__a , __a )
_lowerCamelCase : Optional[int] =model_class(__a )
@jax.jit
def model_jitted(lowercase_ : Optional[int] , **lowercase_ : List[str] ):
return model(pixel_values=__a , **__a )
with self.subTest('JIT Enabled' ):
_lowerCamelCase : int =model_jitted(**__a ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
_lowerCamelCase : Optional[Any] =model_jitted(**__a ).to_tuple()
self.assertEqual(len(__a ) , len(__a ) )
for jitted_output, output in zip(__a , __a ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def lowerCamelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
for model_class_name in self.all_model_classes:
_lowerCamelCase : Optional[Any] =model_class_name.from_pretrained('google/vit-base-patch16-224' )
_lowerCamelCase : str =model(np.ones((1, 3, 224, 224) ) )
self.assertIsNotNone(__a )
| 199
|
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] =["""image_processor""", """tokenizer"""]
__UpperCAmelCase : Optional[Any] ="""CLIPImageProcessor"""
__UpperCAmelCase : Union[str, Any] =("""XLMRobertaTokenizer""", """XLMRobertaTokenizerFast""")
def __init__( self , __a=None , __a=None , **__a ):
__lowerCAmelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __a , )
__lowerCAmelCase = kwargs.pop("feature_extractor" )
__lowerCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(__a , __a )
def __call__( self , __a=None , __a=None , __a=None , **__a ):
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
__lowerCAmelCase = self.tokenizer(__a , return_tensors=__a , **__a )
if images is not None:
__lowerCAmelCase = self.image_processor(__a , return_tensors=__a , **__a )
if text is not None and images is not None:
__lowerCAmelCase = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__a ) , tensor_type=__a )
def snake_case ( self , *__a , **__a ):
return self.tokenizer.batch_decode(*__a , **__a )
def snake_case ( self , *__a , **__a ):
return self.tokenizer.decode(*__a , **__a )
@property
def snake_case ( self ):
__lowerCAmelCase = self.tokenizer.model_input_names
__lowerCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 57
| 0
|
"""simple docstring"""
class a_ :
'''simple docstring'''
def __init__(self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Tuple = len(lowerCamelCase_ )
lowerCamelCase__ : Any = [0] * len_array
if len_array > 0:
lowerCamelCase__ : Union[str, Any] = array[0]
for i in range(1, lowerCamelCase_ ):
lowerCamelCase__ : Optional[int] = self.prefix_sum[i - 1] + array[i]
def a__ (self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Dict = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(lowerCamelCase_ )
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 316
|
"""simple docstring"""
import cva
import numpy as np
class a_ :
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
if k in (0.04, 0.06):
lowerCamelCase__ : Tuple = k
lowerCamelCase__ : Optional[Any] = window_size
else:
raise ValueError('invalid k value' )
def __str__(self ):
'''simple docstring'''
return str(self.k )
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = cva.imread(lowerCamelCase_, 0 )
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = img.shape
lowerCamelCase__ : list[list[int]] = []
lowerCamelCase__ : Optional[Any] = img.copy()
lowerCamelCase__ : Optional[Any] = cva.cvtColor(lowerCamelCase_, cva.COLOR_GRAY2RGB )
lowerCamelCase__ , lowerCamelCase__ : Any = np.gradient(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = dx**2
lowerCamelCase__ : List[Any] = dy**2
lowerCamelCase__ : List[str] = dx * dy
lowerCamelCase__ : Tuple = 0.04
lowerCamelCase__ : List[Any] = self.window_size // 2
for y in range(lowerCamelCase_, h - offset ):
for x in range(lowerCamelCase_, w - offset ):
lowerCamelCase__ : Union[str, Any] = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowerCamelCase__ : Optional[Any] = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowerCamelCase__ : List[Any] = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowerCamelCase__ : str = (wxx * wyy) - (wxy**2)
lowerCamelCase__ : Dict = wxx + wyy
lowerCamelCase__ : Union[str, Any] = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0), 0 )
color_img.itemset((y, x, 1), 0 )
color_img.itemset((y, x, 2), 2_5_5 )
return color_img, corner_list
if __name__ == "__main__":
A_ : Optional[Any] = HarrisCorner(0.04, 3)
A_, A_ : List[Any] = edge_detect.detect("path_to_image")
cva.imwrite("detect.png", color_img)
| 316
| 1
|
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class __UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self : str , _A : List[str] ):
"""simple docstring"""
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['''bs'''] , model_result['''ss'''] ):
__SCREAMING_SNAKE_CASE : Optional[int] = model_result['result'][batch_size][sequence_length]
self.assertIsNotNone(__UpperCamelCase )
def UpperCAmelCase__ ( self : List[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = 'sshleifer/tiny-gpt2'
__SCREAMING_SNAKE_CASE : str = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCamelCase , inference=__UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCamelCase , )
__SCREAMING_SNAKE_CASE : Optional[Any] = PyTorchBenchmark(__UpperCamelCase )
__SCREAMING_SNAKE_CASE : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCAmelCase__ ( self : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = 'sgugger/tiny-distilbert-classification'
__SCREAMING_SNAKE_CASE : str = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCamelCase , inference=__UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCamelCase , only_pretrain_model=__UpperCamelCase , )
__SCREAMING_SNAKE_CASE : Dict = PyTorchBenchmark(__UpperCamelCase )
__SCREAMING_SNAKE_CASE : List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = 'sshleifer/tiny-gpt2'
__SCREAMING_SNAKE_CASE : List[str] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCamelCase , inference=__UpperCamelCase , torchscript=__UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCamelCase , )
__SCREAMING_SNAKE_CASE : Optional[Any] = PyTorchBenchmark(__UpperCamelCase )
__SCREAMING_SNAKE_CASE : str = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(torch_device == '''cpu''' , '''Cant do half precision''' )
def UpperCAmelCase__ ( self : int ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = 'sshleifer/tiny-gpt2'
__SCREAMING_SNAKE_CASE : Dict = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCamelCase , inference=__UpperCamelCase , fpaa=__UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCamelCase , )
__SCREAMING_SNAKE_CASE : Optional[int] = PyTorchBenchmark(__UpperCamelCase )
__SCREAMING_SNAKE_CASE : List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCAmelCase__ ( self : Dict ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = 'sshleifer/tiny-gpt2'
__SCREAMING_SNAKE_CASE : Optional[int] = AutoConfig.from_pretrained(__UpperCamelCase )
# set architectures equal to `None`
__SCREAMING_SNAKE_CASE : Union[str, Any] = None
__SCREAMING_SNAKE_CASE : List[str] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCamelCase , inference=__UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCamelCase , )
__SCREAMING_SNAKE_CASE : List[Any] = PyTorchBenchmark(__UpperCamelCase , configs=[config] )
__SCREAMING_SNAKE_CASE : List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCAmelCase__ ( self : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = 'sshleifer/tiny-gpt2'
__SCREAMING_SNAKE_CASE : str = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCamelCase , inference=__UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCamelCase , )
__SCREAMING_SNAKE_CASE : str = PyTorchBenchmark(__UpperCamelCase )
__SCREAMING_SNAKE_CASE : Optional[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
@unittest.skipIf(torch_device == '''cpu''' , '''Can\'t do half precision''' )
def UpperCAmelCase__ ( self : Any ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = 'sshleifer/tiny-gpt2'
__SCREAMING_SNAKE_CASE : Dict = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCamelCase , inference=__UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , fpaa=__UpperCamelCase , multi_process=__UpperCamelCase , )
__SCREAMING_SNAKE_CASE : List[Any] = PyTorchBenchmark(__UpperCamelCase )
__SCREAMING_SNAKE_CASE : int = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def UpperCAmelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = 'sshleifer/tiny-gpt2'
__SCREAMING_SNAKE_CASE : Optional[Any] = AutoConfig.from_pretrained(__UpperCamelCase )
__SCREAMING_SNAKE_CASE : str = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCamelCase , inference=__UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCamelCase , )
__SCREAMING_SNAKE_CASE : List[str] = PyTorchBenchmark(__UpperCamelCase , configs=[config] )
__SCREAMING_SNAKE_CASE : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCAmelCase__ ( self : Dict ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = 'sshleifer/tinier_bart'
__SCREAMING_SNAKE_CASE : List[str] = AutoConfig.from_pretrained(__UpperCamelCase )
__SCREAMING_SNAKE_CASE : Any = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCamelCase , inference=__UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCamelCase , )
__SCREAMING_SNAKE_CASE : int = PyTorchBenchmark(__UpperCamelCase , configs=[config] )
__SCREAMING_SNAKE_CASE : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCAmelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = 'sshleifer/tiny-gpt2'
__SCREAMING_SNAKE_CASE : Tuple = AutoConfig.from_pretrained(__UpperCamelCase )
__SCREAMING_SNAKE_CASE : List[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCamelCase , inference=__UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCamelCase , )
__SCREAMING_SNAKE_CASE : int = PyTorchBenchmark(__UpperCamelCase , configs=[config] )
__SCREAMING_SNAKE_CASE : Any = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = 'sshleifer/tinier_bart'
__SCREAMING_SNAKE_CASE : Union[str, Any] = AutoConfig.from_pretrained(__UpperCamelCase )
__SCREAMING_SNAKE_CASE : Union[str, Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCamelCase , inference=__UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCamelCase , )
__SCREAMING_SNAKE_CASE : Union[str, Any] = PyTorchBenchmark(__UpperCamelCase , configs=[config] )
__SCREAMING_SNAKE_CASE : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def UpperCAmelCase__ ( self : List[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = 'sshleifer/tiny-gpt2'
with tempfile.TemporaryDirectory() as tmp_dir:
__SCREAMING_SNAKE_CASE : List[str] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCamelCase , inference=__UpperCamelCase , save_to_csv=__UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(__UpperCamelCase , '''inf_time.csv''' ) , train_memory_csv_file=os.path.join(__UpperCamelCase , '''train_mem.csv''' ) , inference_memory_csv_file=os.path.join(__UpperCamelCase , '''inf_mem.csv''' ) , train_time_csv_file=os.path.join(__UpperCamelCase , '''train_time.csv''' ) , env_info_csv_file=os.path.join(__UpperCamelCase , '''env.csv''' ) , multi_process=__UpperCamelCase , )
__SCREAMING_SNAKE_CASE : Union[str, Any] = PyTorchBenchmark(__UpperCamelCase )
benchmark.run()
self.assertTrue(Path(os.path.join(__UpperCamelCase , '''inf_time.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(__UpperCamelCase , '''train_time.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(__UpperCamelCase , '''inf_mem.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(__UpperCamelCase , '''train_mem.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(__UpperCamelCase , '''env.csv''' ) ).exists() )
def UpperCAmelCase__ ( self : Any ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = 'sshleifer/tiny-gpt2'
def _check_summary_is_not_empty(_A : List[str] ):
self.assertTrue(hasattr(__UpperCamelCase , '''sequential''' ) )
self.assertTrue(hasattr(__UpperCamelCase , '''cumulative''' ) )
self.assertTrue(hasattr(__UpperCamelCase , '''current''' ) )
self.assertTrue(hasattr(__UpperCamelCase , '''total''' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
__SCREAMING_SNAKE_CASE : Tuple = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCamelCase , inference=__UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(__UpperCamelCase , '''log.txt''' ) , log_print=__UpperCamelCase , trace_memory_line_by_line=__UpperCamelCase , multi_process=__UpperCamelCase , )
__SCREAMING_SNAKE_CASE : int = PyTorchBenchmark(__UpperCamelCase )
__SCREAMING_SNAKE_CASE : Optional[Any] = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
_check_summary_is_not_empty(result.train_summary )
self.assertTrue(Path(os.path.join(__UpperCamelCase , '''log.txt''' ) ).exists() )
| 303
|
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class __snake_case ( _lowerCamelCase ):
@staticmethod
@abstractmethod
def __a ( __UpperCamelCase ) -> Dict:
'''simple docstring'''
raise NotImplementedError()
@abstractmethod
def __a ( self ) -> Optional[int]:
'''simple docstring'''
raise NotImplementedError()
| 143
| 0
|
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def SCREAMING_SNAKE_CASE__ ( ) -> int:
snake_case : Any = """https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png"""
snake_case : int = Image.open(requests.get(lowercase ,stream=lowercase ).raw ).convert("""RGB""" )
return image
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> int:
snake_case : Any = []
# fmt: off
# vision encoder
rename_keys.append(("""visual_encoder.cls_token""", """vision_model.embeddings.class_embedding""") )
rename_keys.append(("""visual_encoder.pos_embed""", """vision_model.embeddings.position_embedding""") )
rename_keys.append(("""visual_encoder.patch_embed.proj.weight""", """vision_model.embeddings.patch_embedding.weight""") )
rename_keys.append(("""visual_encoder.patch_embed.proj.bias""", """vision_model.embeddings.patch_embedding.bias""") )
rename_keys.append(("""ln_vision.weight""", """vision_model.post_layernorm.weight""") )
rename_keys.append(("""ln_vision.bias""", """vision_model.post_layernorm.bias""") )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((f"""visual_encoder.blocks.{i}.norm1.weight""", f"""vision_model.encoder.layers.{i}.layer_norm1.weight""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.norm1.bias""", f"""vision_model.encoder.layers.{i}.layer_norm1.bias""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.norm2.weight""", f"""vision_model.encoder.layers.{i}.layer_norm2.weight""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.norm2.bias""", f"""vision_model.encoder.layers.{i}.layer_norm2.bias""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.attn.qkv.weight""", f"""vision_model.encoder.layers.{i}.self_attn.qkv.weight""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.attn.proj.weight""", f"""vision_model.encoder.layers.{i}.self_attn.projection.weight""",) )
rename_keys.append((f"""visual_encoder.blocks.{i}.attn.proj.bias""", f"""vision_model.encoder.layers.{i}.self_attn.projection.bias""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.mlp.fc1.weight""", f"""vision_model.encoder.layers.{i}.mlp.fc1.weight""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.mlp.fc1.bias""", f"""vision_model.encoder.layers.{i}.mlp.fc1.bias""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.mlp.fc2.weight""", f"""vision_model.encoder.layers.{i}.mlp.fc2.weight""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.mlp.fc2.bias""", f"""vision_model.encoder.layers.{i}.mlp.fc2.bias""") )
# QFormer
rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.weight""", """qformer.layernorm.weight""") )
rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.bias""", """qformer.layernorm.bias""") )
# fmt: on
return rename_keys
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ) -> Optional[int]:
snake_case : List[str] = dct.pop(lowercase )
snake_case : Dict = val
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> Union[str, Any]:
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
snake_case : Dict = state_dict.pop(f"""visual_encoder.blocks.{i}.attn.q_bias""" )
snake_case : str = state_dict.pop(f"""visual_encoder.blocks.{i}.attn.v_bias""" )
# next, set bias in the state dict
snake_case : List[str] = torch.cat((q_bias, torch.zeros_like(lowercase ,requires_grad=lowercase ), v_bias) )
snake_case : List[Any] = qkv_bias
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> Optional[Any]:
snake_case : List[Any] = 364 if """coco""" in model_name else 224
snake_case : List[str] = BlipaVisionConfig(image_size=lowercase ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
snake_case : List[Any] = OPTConfig.from_pretrained("""facebook/opt-2.7b""" ,eos_token_id=lowercase ).to_dict()
elif "opt-6.7b" in model_name:
snake_case : List[str] = OPTConfig.from_pretrained("""facebook/opt-6.7b""" ,eos_token_id=lowercase ).to_dict()
elif "t5-xl" in model_name:
snake_case : Union[str, Any] = TaConfig.from_pretrained("""google/flan-t5-xl""" ,dense_act_fn="""gelu""" ,bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
snake_case : Optional[int] = TaConfig.from_pretrained("""google/flan-t5-xxl""" ,dense_act_fn="""gelu""" ,bos_token_id=1 ).to_dict()
snake_case : Tuple = BlipaConfig(vision_config=lowercase ,text_config=lowercase )
return config, image_size
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase=None ,lowercase=False ) -> List[str]:
snake_case : Any = (
AutoTokenizer.from_pretrained("""facebook/opt-2.7b""" )
if """opt""" in model_name
else AutoTokenizer.from_pretrained("""google/flan-t5-xl""" )
)
snake_case : Tuple = tokenizer("""\n""" ,add_special_tokens=lowercase ).input_ids[0]
snake_case , snake_case : Tuple = get_blipa_config(lowercase ,eos_token_id=lowercase )
snake_case : List[str] = BlipaForConditionalGeneration(lowercase ).eval()
snake_case : Optional[Any] = {
"""blip2-opt-2.7b""": ("""blip2_opt""", """pretrain_opt2.7b"""),
"""blip2-opt-6.7b""": ("""blip2_opt""", """pretrain_opt6.7b"""),
"""blip2-opt-2.7b-coco""": ("""blip2_opt""", """caption_coco_opt2.7b"""),
"""blip2-opt-6.7b-coco""": ("""blip2_opt""", """caption_coco_opt6.7b"""),
"""blip2-flan-t5-xl""": ("""blip2_t5""", """pretrain_flant5xl"""),
"""blip2-flan-t5-xl-coco""": ("""blip2_t5""", """caption_coco_flant5xl"""),
"""blip2-flan-t5-xxl""": ("""blip2_t5""", """pretrain_flant5xxl"""),
}
snake_case , snake_case : Optional[Any] = model_name_to_original[model_name]
# load original model
print("""Loading original model...""" )
snake_case : int = """cuda""" if torch.cuda.is_available() else """cpu"""
snake_case , snake_case , snake_case : Union[str, Any] = load_model_and_preprocess(
name=lowercase ,model_type=lowercase ,is_eval=lowercase ,device=lowercase )
original_model.eval()
print("""Done!""" )
# update state dict keys
snake_case : int = original_model.state_dict()
snake_case : Optional[int] = create_rename_keys(lowercase )
for src, dest in rename_keys:
rename_key(lowercase ,lowercase ,lowercase )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
snake_case : List[str] = state_dict.pop(lowercase )
if key.startswith("""Qformer.bert""" ):
snake_case : Optional[Any] = key.replace("""Qformer.bert""" ,"""qformer""" )
if "attention.self" in key:
snake_case : List[str] = key.replace("""self""" ,"""attention""" )
if "opt_proj" in key:
snake_case : int = key.replace("""opt_proj""" ,"""language_projection""" )
if "t5_proj" in key:
snake_case : Any = key.replace("""t5_proj""" ,"""language_projection""" )
if key.startswith("""opt""" ):
snake_case : Optional[int] = key.replace("""opt""" ,"""language""" )
if key.startswith("""t5""" ):
snake_case : Union[str, Any] = key.replace("""t5""" ,"""language""" )
snake_case : str = val
# read in qv biases
read_in_q_v_bias(lowercase ,lowercase )
snake_case , snake_case : str = hf_model.load_state_dict(lowercase ,strict=lowercase )
assert len(lowercase ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
snake_case : List[Any] = load_demo_image()
snake_case : Dict = vis_processors["""eval"""](lowercase ).unsqueeze(0 ).to(lowercase )
snake_case : Optional[Any] = tokenizer(["""\n"""] ,return_tensors="""pt""" ).input_ids.to(lowercase )
# create processor
snake_case : Tuple = BlipImageProcessor(
size={"""height""": image_size, """width""": image_size} ,image_mean=lowercase ,image_std=lowercase )
snake_case : Dict = BlipaProcessor(image_processor=lowercase ,tokenizer=lowercase )
snake_case : Optional[int] = processor(images=lowercase ,return_tensors="""pt""" ).pixel_values.to(lowercase )
# make sure processor creates exact same pixel values
assert torch.allclose(lowercase ,lowercase )
original_model.to(lowercase )
hf_model.to(lowercase )
with torch.no_grad():
if "opt" in model_name:
snake_case : Tuple = original_model({"""image""": original_pixel_values, """text_input""": [""""""]} ).logits
snake_case : str = hf_model(lowercase ,lowercase ).logits
else:
snake_case : Optional[int] = original_model(
{"""image""": original_pixel_values, """text_input""": ["""\n"""], """text_output""": ["""\n"""]} ).logits
snake_case : List[str] = input_ids.masked_fill(input_ids == tokenizer.pad_token_id ,-100 )
snake_case : Union[str, Any] = hf_model(lowercase ,lowercase ,labels=lowercase ).logits
assert original_logits.shape == logits.shape
print("""First values of original logits:""" ,original_logits[0, :3, :3] )
print("""First values of HF logits:""" ,logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
snake_case : Optional[Any] = torch.tensor(
[[-41.5850, -4.4440, -8.9922], [-47.4322, -5.9143, -1.7340]] ,device=lowercase )
assert torch.allclose(logits[0, :3, :3] ,lowercase ,atol=1E-4 )
elif model_name == "blip2-flan-t5-xl-coco":
snake_case : Union[str, Any] = torch.tensor(
[[-57.0109, -9.8967, -12.6280], [-68.6578, -12.7191, -10.5065]] ,device=lowercase )
else:
# cast to same type
snake_case : List[Any] = logits.dtype
assert torch.allclose(original_logits.to(lowercase ) ,lowercase ,atol=1E-2 )
print("""Looks ok!""" )
print("""Generating a caption...""" )
snake_case : Union[str, Any] = """"""
snake_case : Dict = tokenizer(lowercase ,return_tensors="""pt""" ).input_ids.to(lowercase )
snake_case : Optional[int] = original_model.generate({"""image""": original_pixel_values} )
snake_case : int = hf_model.generate(
lowercase ,lowercase ,do_sample=lowercase ,num_beams=5 ,max_length=30 ,min_length=1 ,top_p=0.9 ,repetition_penalty=1.0 ,length_penalty=1.0 ,temperature=1 ,)
print("""Original generation:""" ,lowercase )
snake_case : Dict = input_ids.shape[1]
snake_case : List[Any] = processor.batch_decode(outputs[:, prompt_length:] ,skip_special_tokens=lowercase )
snake_case : List[str] = [text.strip() for text in output_text]
print("""HF generation:""" ,lowercase )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(lowercase )
hf_model.save_pretrained(lowercase )
if push_to_hub:
processor.push_to_hub(f"""nielsr/{model_name}""" )
hf_model.push_to_hub(f"""nielsr/{model_name}""" )
if __name__ == "__main__":
lowerCamelCase : Dict = argparse.ArgumentParser()
lowerCamelCase : List[Any] = [
'blip2-opt-2.7b',
'blip2-opt-6.7b',
'blip2-opt-2.7b-coco',
'blip2-opt-6.7b-coco',
'blip2-flan-t5-xl',
'blip2-flan-t5-xl-coco',
'blip2-flan-t5-xxl',
]
parser.add_argument(
'--model_name',
default='blip2-opt-2.7b',
choices=choices,
type=str,
help='Path to hf config.json of model to convert',
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub after converting',
)
lowerCamelCase : Tuple = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 176
|
from collections import defaultdict
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> bool:
snake_case : List[str] = first_str.lower().strip()
snake_case : List[str] = second_str.lower().strip()
# Remove whitespace
snake_case : Any = first_str.replace(""" """ ,"""""" )
snake_case : List[str] = second_str.replace(""" """ ,"""""" )
# Strings of different lengths are not anagrams
if len(lowercase ) != len(lowercase ):
return False
# Default values for count should be 0
snake_case : defaultdict[str, int] = defaultdict(lowercase )
# For each character in input strings,
# increment count in the corresponding
for i in range(len(lowercase ) ):
count[first_str[i]] += 1
count[second_str[i]] -= 1
return all(_count == 0 for _count in count.values() )
if __name__ == "__main__":
from doctest import testmod
testmod()
lowerCamelCase : List[Any] = input('Enter the first string ').strip()
lowerCamelCase : Optional[int] = input('Enter the second string ').strip()
lowerCamelCase : Optional[Any] = check_anagrams(input_a, input_b)
print(f"""{input_a} and {input_b} are {"" if status else "not "}anagrams.""")
| 176
| 1
|
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
__snake_case :Optional[int] = '''src/diffusers'''
__snake_case :Any = '''.'''
# This is to make sure the diffusers module imported is the one in the repo.
__snake_case :List[Any] = importlib.util.spec_from_file_location(
'''diffusers''',
os.path.join(DIFFUSERS_PATH, '''__init__.py'''),
submodule_search_locations=[DIFFUSERS_PATH],
)
__snake_case :int = spec.loader.load_module()
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
return line.startswith(_UpperCAmelCase ) or len(_UpperCAmelCase ) <= 1 or re.search(R'''^\s*\)(\s*->.*:|:)\s*$''' , _UpperCAmelCase ) is not None
def __snake_case ( _UpperCAmelCase ):
__a = object_name.split('''.''' )
__a = 0
# First let's find the module where our object lives.
__a = parts[i]
while i < len(_UpperCAmelCase ) and not os.path.isfile(os.path.join(_UpperCAmelCase , f'{module}.py' ) ):
i += 1
if i < len(_UpperCAmelCase ):
__a = os.path.join(_UpperCAmelCase , parts[i] )
if i >= len(_UpperCAmelCase ):
raise ValueError(f'`object_name` should begin with the name of a module of diffusers but got {object_name}.' )
with open(os.path.join(_UpperCAmelCase , f'{module}.py' ) , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
__a = f.readlines()
# Now let's find the class / func in the code!
__a = ''''''
__a = 0
for name in parts[i + 1 :]:
while (
line_index < len(_UpperCAmelCase ) and re.search(Rf'^{indent}(class|def)\s+{name}(\(|\:)' , lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(_UpperCAmelCase ):
raise ValueError(f' {object_name} does not match any function or class in {module}.' )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
__a = line_index
while line_index < len(_UpperCAmelCase ) and _should_continue(lines[line_index] , _UpperCAmelCase ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
__a = lines[start_index:line_index]
return "".join(_UpperCAmelCase )
__snake_case :List[Any] = re.compile(r'''^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)''')
__snake_case :Tuple = re.compile(r'''^\s*(\S+)->(\S+)(\s+.*|$)''')
__snake_case :Union[str, Any] = re.compile(r'''<FILL\s+[^>]*>''')
def __snake_case ( _UpperCAmelCase ):
__a = code.split('''\n''' )
__a = 0
while idx < len(_UpperCAmelCase ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(_UpperCAmelCase ):
return re.search(R'''^(\s*)\S''' , lines[idx] ).groups()[0]
return ""
def __snake_case ( _UpperCAmelCase ):
__a = len(get_indent(_UpperCAmelCase ) ) > 0
if has_indent:
__a = f'class Bla:\n{code}'
__a = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 , preview=_UpperCAmelCase )
__a = black.format_str(_UpperCAmelCase , mode=_UpperCAmelCase )
__a , __a = style_docstrings_in_code(_UpperCAmelCase )
return result[len('''class Bla:\n''' ) :] if has_indent else result
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase=False ):
with open(_UpperCAmelCase , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
__a = f.readlines()
__a = []
__a = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(_UpperCAmelCase ):
__a = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
__a , __a , __a = search.groups()
__a = find_code_in_diffusers(_UpperCAmelCase )
__a = get_indent(_UpperCAmelCase )
__a = line_index + 1 if indent == theoretical_indent else line_index + 2
__a = theoretical_indent
__a = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
__a = True
while line_index < len(_UpperCAmelCase ) and should_continue:
line_index += 1
if line_index >= len(_UpperCAmelCase ):
break
__a = lines[line_index]
__a = _should_continue(_UpperCAmelCase , _UpperCAmelCase ) and re.search(f'^{indent}# End copy' , _UpperCAmelCase ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
__a = lines[start_index:line_index]
__a = ''''''.join(_UpperCAmelCase )
# Remove any nested `Copied from` comments to avoid circular copies
__a = [line for line in theoretical_code.split('''\n''' ) if _re_copy_warning.search(_UpperCAmelCase ) is None]
__a = '''\n'''.join(_UpperCAmelCase )
# Before comparing, use the `replace_pattern` on the original code.
if len(_UpperCAmelCase ) > 0:
__a = replace_pattern.replace('''with''' , '''''' ).split(''',''' )
__a = [_re_replace_pattern.search(_UpperCAmelCase ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
__a , __a , __a = pattern.groups()
__a = re.sub(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
if option.strip() == "all-casing":
__a = re.sub(obja.lower() , obja.lower() , _UpperCAmelCase )
__a = re.sub(obja.upper() , obja.upper() , _UpperCAmelCase )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
__a = blackify(lines[start_index - 1] + theoretical_code )
__a = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
__a = lines[:start_index] + [theoretical_code] + lines[line_index:]
__a = start_index + 1
if overwrite and len(_UpperCAmelCase ) > 0:
# Warn the user a file has been modified.
print(f'Detected changes, rewriting {filename}.' )
with open(_UpperCAmelCase , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(_UpperCAmelCase )
return diffs
def __snake_case ( _UpperCAmelCase = False ):
__a = glob.glob(os.path.join(_UpperCAmelCase , '''**/*.py''' ) , recursive=_UpperCAmelCase )
__a = []
for filename in all_files:
__a = is_copy_consistent(_UpperCAmelCase , _UpperCAmelCase )
diffs += [f'- {filename}: copy does not match {d[0]} at line {d[1]}' for d in new_diffs]
if not overwrite and len(_UpperCAmelCase ) > 0:
__a = '''\n'''.join(_UpperCAmelCase )
raise Exception(
'''Found the following copy inconsistencies:\n'''
+ diff
+ '''\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.''' )
if __name__ == "__main__":
__snake_case :List[str] = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
__snake_case :int = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 49
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCamelCase : Any = {
"""configuration_electra""": ["""ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ElectraConfig""", """ElectraOnnxConfig"""],
"""tokenization_electra""": ["""ElectraTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Dict = ["""ElectraTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Tuple = [
"""ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ElectraForCausalLM""",
"""ElectraForMaskedLM""",
"""ElectraForMultipleChoice""",
"""ElectraForPreTraining""",
"""ElectraForQuestionAnswering""",
"""ElectraForSequenceClassification""",
"""ElectraForTokenClassification""",
"""ElectraModel""",
"""ElectraPreTrainedModel""",
"""load_tf_weights_in_electra""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : List[Any] = [
"""TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFElectraForMaskedLM""",
"""TFElectraForMultipleChoice""",
"""TFElectraForPreTraining""",
"""TFElectraForQuestionAnswering""",
"""TFElectraForSequenceClassification""",
"""TFElectraForTokenClassification""",
"""TFElectraModel""",
"""TFElectraPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : List[str] = [
"""FlaxElectraForCausalLM""",
"""FlaxElectraForMaskedLM""",
"""FlaxElectraForMultipleChoice""",
"""FlaxElectraForPreTraining""",
"""FlaxElectraForQuestionAnswering""",
"""FlaxElectraForSequenceClassification""",
"""FlaxElectraForTokenClassification""",
"""FlaxElectraModel""",
"""FlaxElectraPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
__lowerCamelCase : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 52
| 0
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
_lowerCAmelCase : Dict = {
"Acehnese Arabic": "ace_Arab",
"Acehnese Latin": "ace_Latn",
"Mesopotamian Arabic": "acm_Arab",
"Ta'izzi-Adeni Arabic": "acq_Arab",
"Tunisian Arabic": "aeb_Arab",
"Afrikaans": "afr_Latn",
"South Levantine Arabic": "ajp_Arab",
"Akan": "aka_Latn",
"Amharic": "amh_Ethi",
"North Levantine Arabic": "apc_Arab",
"Modern Standard Arabic": "arb_Arab",
"Modern Standard Arabic Romanized": "arb_Latn",
"Najdi Arabic": "ars_Arab",
"Moroccan Arabic": "ary_Arab",
"Egyptian Arabic": "arz_Arab",
"Assamese": "asm_Beng",
"Asturian": "ast_Latn",
"Awadhi": "awa_Deva",
"Central Aymara": "ayr_Latn",
"South Azerbaijani": "azb_Arab",
"North Azerbaijani": "azj_Latn",
"Bashkir": "bak_Cyrl",
"Bambara": "bam_Latn",
"Balinese": "ban_Latn",
"Belarusian": "bel_Cyrl",
"Bemba": "bem_Latn",
"Bengali": "ben_Beng",
"Bhojpuri": "bho_Deva",
"Banjar Arabic": "bjn_Arab",
"Banjar Latin": "bjn_Latn",
"Standard Tibetan": "bod_Tibt",
"Bosnian": "bos_Latn",
"Buginese": "bug_Latn",
"Bulgarian": "bul_Cyrl",
"Catalan": "cat_Latn",
"Cebuano": "ceb_Latn",
"Czech": "ces_Latn",
"Chokwe": "cjk_Latn",
"Central Kurdish": "ckb_Arab",
"Crimean Tatar": "crh_Latn",
"Welsh": "cym_Latn",
"Danish": "dan_Latn",
"German": "deu_Latn",
"Southwestern Dinka": "dik_Latn",
"Dyula": "dyu_Latn",
"Dzongkha": "dzo_Tibt",
"Greek": "ell_Grek",
"English": "eng_Latn",
"Esperanto": "epo_Latn",
"Estonian": "est_Latn",
"Basque": "eus_Latn",
"Ewe": "ewe_Latn",
"Faroese": "fao_Latn",
"Fijian": "fij_Latn",
"Finnish": "fin_Latn",
"Fon": "fon_Latn",
"French": "fra_Latn",
"Friulian": "fur_Latn",
"Nigerian Fulfulde": "fuv_Latn",
"Scottish Gaelic": "gla_Latn",
"Irish": "gle_Latn",
"Galician": "glg_Latn",
"Guarani": "grn_Latn",
"Gujarati": "guj_Gujr",
"Haitian Creole": "hat_Latn",
"Hausa": "hau_Latn",
"Hebrew": "heb_Hebr",
"Hindi": "hin_Deva",
"Chhattisgarhi": "hne_Deva",
"Croatian": "hrv_Latn",
"Hungarian": "hun_Latn",
"Armenian": "hye_Armn",
"Igbo": "ibo_Latn",
"Ilocano": "ilo_Latn",
"Indonesian": "ind_Latn",
"Icelandic": "isl_Latn",
"Italian": "ita_Latn",
"Javanese": "jav_Latn",
"Japanese": "jpn_Jpan",
"Kabyle": "kab_Latn",
"Jingpho": "kac_Latn",
"Kamba": "kam_Latn",
"Kannada": "kan_Knda",
"Kashmiri Arabic": "kas_Arab",
"Kashmiri Devanagari": "kas_Deva",
"Georgian": "kat_Geor",
"Central Kanuri Arabic": "knc_Arab",
"Central Kanuri Latin": "knc_Latn",
"Kazakh": "kaz_Cyrl",
"Kabiyè": "kbp_Latn",
"Kabuverdianu": "kea_Latn",
"Khmer": "khm_Khmr",
"Kikuyu": "kik_Latn",
"Kinyarwanda": "kin_Latn",
"Kyrgyz": "kir_Cyrl",
"Kimbundu": "kmb_Latn",
"Northern Kurdish": "kmr_Latn",
"Kikongo": "kon_Latn",
"Korean": "kor_Hang",
"Lao": "lao_Laoo",
"Ligurian": "lij_Latn",
"Limburgish": "lim_Latn",
"Lingala": "lin_Latn",
"Lithuanian": "lit_Latn",
"Lombard": "lmo_Latn",
"Latgalian": "ltg_Latn",
"Luxembourgish": "ltz_Latn",
"Luba-Kasai": "lua_Latn",
"Ganda": "lug_Latn",
"Luo": "luo_Latn",
"Mizo": "lus_Latn",
"Standard Latvian": "lvs_Latn",
"Magahi": "mag_Deva",
"Maithili": "mai_Deva",
"Malayalam": "mal_Mlym",
"Marathi": "mar_Deva",
"Minangkabau Arabic ": "min_Arab",
"Minangkabau Latin": "min_Latn",
"Macedonian": "mkd_Cyrl",
"Plateau Malagasy": "plt_Latn",
"Maltese": "mlt_Latn",
"Meitei Bengali": "mni_Beng",
"Halh Mongolian": "khk_Cyrl",
"Mossi": "mos_Latn",
"Maori": "mri_Latn",
"Burmese": "mya_Mymr",
"Dutch": "nld_Latn",
"Norwegian Nynorsk": "nno_Latn",
"Norwegian Bokmål": "nob_Latn",
"Nepali": "npi_Deva",
"Northern Sotho": "nso_Latn",
"Nuer": "nus_Latn",
"Nyanja": "nya_Latn",
"Occitan": "oci_Latn",
"West Central Oromo": "gaz_Latn",
"Odia": "ory_Orya",
"Pangasinan": "pag_Latn",
"Eastern Panjabi": "pan_Guru",
"Papiamento": "pap_Latn",
"Western Persian": "pes_Arab",
"Polish": "pol_Latn",
"Portuguese": "por_Latn",
"Dari": "prs_Arab",
"Southern Pashto": "pbt_Arab",
"Ayacucho Quechua": "quy_Latn",
"Romanian": "ron_Latn",
"Rundi": "run_Latn",
"Russian": "rus_Cyrl",
"Sango": "sag_Latn",
"Sanskrit": "san_Deva",
"Santali": "sat_Olck",
"Sicilian": "scn_Latn",
"Shan": "shn_Mymr",
"Sinhala": "sin_Sinh",
"Slovak": "slk_Latn",
"Slovenian": "slv_Latn",
"Samoan": "smo_Latn",
"Shona": "sna_Latn",
"Sindhi": "snd_Arab",
"Somali": "som_Latn",
"Southern Sotho": "sot_Latn",
"Spanish": "spa_Latn",
"Tosk Albanian": "als_Latn",
"Sardinian": "srd_Latn",
"Serbian": "srp_Cyrl",
"Swati": "ssw_Latn",
"Sundanese": "sun_Latn",
"Swedish": "swe_Latn",
"Swahili": "swh_Latn",
"Silesian": "szl_Latn",
"Tamil": "tam_Taml",
"Tatar": "tat_Cyrl",
"Telugu": "tel_Telu",
"Tajik": "tgk_Cyrl",
"Tagalog": "tgl_Latn",
"Thai": "tha_Thai",
"Tigrinya": "tir_Ethi",
"Tamasheq Latin": "taq_Latn",
"Tamasheq Tifinagh": "taq_Tfng",
"Tok Pisin": "tpi_Latn",
"Tswana": "tsn_Latn",
"Tsonga": "tso_Latn",
"Turkmen": "tuk_Latn",
"Tumbuka": "tum_Latn",
"Turkish": "tur_Latn",
"Twi": "twi_Latn",
"Central Atlas Tamazight": "tzm_Tfng",
"Uyghur": "uig_Arab",
"Ukrainian": "ukr_Cyrl",
"Umbundu": "umb_Latn",
"Urdu": "urd_Arab",
"Northern Uzbek": "uzn_Latn",
"Venetian": "vec_Latn",
"Vietnamese": "vie_Latn",
"Waray": "war_Latn",
"Wolof": "wol_Latn",
"Xhosa": "xho_Latn",
"Eastern Yiddish": "ydd_Hebr",
"Yoruba": "yor_Latn",
"Yue Chinese": "yue_Hant",
"Chinese Simplified": "zho_Hans",
"Chinese Traditional": "zho_Hant",
"Standard Malay": "zsm_Latn",
"Zulu": "zul_Latn",
}
class UpperCAmelCase_ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE : List[Any] = 'facebook/nllb-200-distilled-600M'
__SCREAMING_SNAKE_CASE : Union[str, Any] = (
'This is a tool that translates text from a language to another. It takes three inputs: `text`, which should '
'be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, '
'which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in '
'plain English, such as \'Romanian\', or \'Albanian\'. It returns the text translated in `tgt_lang`.'
)
__SCREAMING_SNAKE_CASE : List[Any] = 'translator'
__SCREAMING_SNAKE_CASE : Tuple = AutoTokenizer
__SCREAMING_SNAKE_CASE : Any = AutoModelForSeqaSeqLM
__SCREAMING_SNAKE_CASE : int = LANGUAGE_CODES
__SCREAMING_SNAKE_CASE : str = ['text', 'text', 'text']
__SCREAMING_SNAKE_CASE : Tuple = ['text']
def snake_case_ ( self : Optional[int] , A : List[Any] , A : List[Any] , A : Optional[Any] ):
if src_lang not in self.lang_to_code:
raise ValueError(f'{src_lang} is not a supported language.' )
if tgt_lang not in self.lang_to_code:
raise ValueError(f'{tgt_lang} is not a supported language.' )
_UpperCAmelCase : List[str] = self.lang_to_code[src_lang]
_UpperCAmelCase : str = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
A , return_tensors="pt" , src_lang=A , tgt_lang=A )
def snake_case_ ( self : Dict , A : Dict ):
return self.model.generate(**A )
def snake_case_ ( self : Any , A : Optional[Any] ):
return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=A )
| 202
|
"""simple docstring"""
def __snake_case ( SCREAMING_SNAKE_CASE__ : List[str] ) -> str:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : Dict = [], []
while len(SCREAMING_SNAKE_CASE__ ) > 1:
_UpperCAmelCase , _UpperCAmelCase : int = min(SCREAMING_SNAKE_CASE__ ), max(SCREAMING_SNAKE_CASE__ )
start.append(SCREAMING_SNAKE_CASE__ )
end.append(SCREAMING_SNAKE_CASE__ )
collection.remove(SCREAMING_SNAKE_CASE__ )
collection.remove(SCREAMING_SNAKE_CASE__ )
end.reverse()
return start + collection + end
if __name__ == "__main__":
_lowerCAmelCase : int = input("Enter numbers separated by a comma:\n").strip()
_lowerCAmelCase : List[str] = [int(item) for item in user_input.split(",")]
print(*merge_sort(unsorted), sep=",")
| 202
| 1
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a = logging.get_logger(__name__)
a = {
'''kssteven/ibert-roberta-base''': '''https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json''',
'''kssteven/ibert-roberta-large''': '''https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json''',
'''kssteven/ibert-roberta-large-mnli''': (
'''https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json'''
),
}
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : List[str] = '''ibert'''
def __init__( self : Dict , _UpperCAmelCase : List[str]=30_522 , _UpperCAmelCase : List[Any]=768 , _UpperCAmelCase : Any=12 , _UpperCAmelCase : Optional[Any]=12 , _UpperCAmelCase : str=3_072 , _UpperCAmelCase : Dict="gelu" , _UpperCAmelCase : List[Any]=0.1 , _UpperCAmelCase : Union[str, Any]=0.1 , _UpperCAmelCase : List[str]=512 , _UpperCAmelCase : Union[str, Any]=2 , _UpperCAmelCase : List[Any]=0.02 , _UpperCAmelCase : str=1E-1_2 , _UpperCAmelCase : Optional[int]=1 , _UpperCAmelCase : Tuple=0 , _UpperCAmelCase : str=2 , _UpperCAmelCase : Dict="absolute" , _UpperCAmelCase : Dict=False , _UpperCAmelCase : Optional[Any]="none" , **_UpperCAmelCase : List[Any] , ):
super().__init__(pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase )
_A = vocab_size
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = hidden_act
_A = intermediate_size
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = max_position_embeddings
_A = type_vocab_size
_A = initializer_range
_A = layer_norm_eps
_A = position_embedding_type
_A = quant_mode
_A = force_dequant
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
@property
def lowerCAmelCase_ ( self : int ):
if self.task == "multiple-choice":
_A = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_A = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 315
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_imagegpt import ImageGPTImageProcessor
a = logging.get_logger(__name__)
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
def __init__( self : Any , *_UpperCAmelCase : List[str] , **_UpperCAmelCase : Union[str, Any] ):
warnings.warn(
'The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use ImageGPTImageProcessor instead.' , _UpperCAmelCase , )
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
| 315
| 1
|
from collections.abc import Callable
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self: Any , __A: Callable | None = None ) -> None:
# Stores actual heap items.
_A = []
# Stores indexes of each item for supporting updates and deletion.
_A = {}
# Stores current size of heap.
_A = 0
# Stores function used to evaluate the score of an item on which basis ordering
# will be done.
_A = key or (lambda __A : x)
def __A ( self: List[Any] , __A: int ) -> int | None:
return int((i - 1) / 2 ) if i > 0 else None
def __A ( self: Union[str, Any] , __A: int ) -> int | None:
_A = int(2 * i + 1 )
return left if 0 < left < self.size else None
def __A ( self: Optional[Any] , __A: int ) -> int | None:
_A = int(2 * i + 2 )
return right if 0 < right < self.size else None
def __A ( self: List[str] , __A: int , __A: int ) -> None:
_A ,_A = (
self.pos_map[self.arr[j][0]],
self.pos_map[self.arr[i][0]],
)
# Then swap the items in the list.
_A ,_A = self.arr[j], self.arr[i]
def __A ( self: Dict , __A: int , __A: int ) -> bool:
return self.arr[i][1] < self.arr[j][1]
def __A ( self: int , __A: int ) -> int:
_A = self._left(__A )
_A = self._right(__A )
_A = i
if left is not None and not self._cmp(__A , __A ):
_A = left
if right is not None and not self._cmp(__A , __A ):
_A = right
return valid_parent
def __A ( self: List[str] , __A: int ) -> None:
_A = self._parent(__A )
while parent is not None and not self._cmp(__A , __A ):
self._swap(__A , __A )
_A ,_A = parent, self._parent(__A )
def __A ( self: str , __A: int ) -> None:
_A = self._get_valid_parent(__A )
while valid_parent != index:
self._swap(__A , __A )
_A ,_A = valid_parent, self._get_valid_parent(__A )
def __A ( self: Any , __A: int , __A: int ) -> None:
if item not in self.pos_map:
return
_A = self.pos_map[item]
_A = [item, self.key(__A )]
# Make sure heap is right in both up and down direction.
# Ideally only one of them will make any change.
self._heapify_up(__A )
self._heapify_down(__A )
def __A ( self: Any , __A: int ) -> None:
if item not in self.pos_map:
return
_A = self.pos_map[item]
del self.pos_map[item]
_A = self.arr[self.size - 1]
_A = index
self.size -= 1
# Make sure heap is right in both up and down direction. Ideally only one
# of them will make any change- so no performance loss in calling both.
if self.size > index:
self._heapify_up(__A )
self._heapify_down(__A )
def __A ( self: Tuple , __A: int , __A: int ) -> None:
_A = len(self.arr )
if arr_len == self.size:
self.arr.append([item, self.key(__A )] )
else:
_A = [item, self.key(__A )]
_A = self.size
self.size += 1
self._heapify_up(self.size - 1 )
def __A ( self: int ) -> tuple | None:
return self.arr[0] if self.size else None
def __A ( self: List[Any] ) -> tuple | None:
_A = self.get_top()
if top_item_tuple:
self.delete_item(top_item_tuple[0] )
return top_item_tuple
def __A ( ):
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 75
|
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import sys
import transformers
__A = '3'
print('Python version:', sys.version)
print('transformers version:', transformers.__version__)
try:
import torch
print('Torch version:', torch.__version__)
print('Cuda available:', torch.cuda.is_available())
print('Cuda version:', torch.version.cuda)
print('CuDNN version:', torch.backends.cudnn.version())
print('Number of GPUs available:', torch.cuda.device_count())
print('NCCL version:', torch.cuda.nccl.version())
except ImportError:
print('Torch version:', None)
try:
import deepspeed
print('DeepSpeed version:', deepspeed.__version__)
except ImportError:
print('DeepSpeed version:', None)
try:
import tensorflow as tf
print('TensorFlow version:', tf.__version__)
print('TF GPUs available:', bool(tf.config.list_physical_devices('GPU')))
print('Number of TF GPUs available:', len(tf.config.list_physical_devices('GPU')))
except ImportError:
print('TensorFlow version:', None)
| 75
| 1
|
"""simple docstring"""
def UpperCamelCase ( UpperCAmelCase = 1_000 ) ->int:
"""simple docstring"""
return sum(2 * a * ((a - 1) // 2) for a in range(3 , n + 1 ) )
if __name__ == "__main__":
print(solution())
| 243
|
"""simple docstring"""
import logging
import re
import pytorch_quantization
import pytorch_quantization.nn as quant_nn
import torch
from pytorch_quantization import calib
from pytorch_quantization.tensor_quant import QuantDescriptor
a = logging.getLogger(__name__)
a = 50 # max width of layer names
a = 70 # max width of quantizer names
def _snake_case ( _snake_case : int ) -> List[Any]:
'''simple docstring'''
_A = parser.add_argument_group('quant_trainer arguments' )
group.add_argument('--wprec' , type=_snake_case , default=8 , help='weight precision' )
group.add_argument('--aprec' , type=_snake_case , default=8 , help='activation precision' )
group.add_argument('--quant-per-tensor' , action='store_true' , help='per tensor weight scaling' )
group.add_argument('--quant-disable' , action='store_true' , help='disable all quantizers' )
group.add_argument('--quant-disable-embeddings' , action='store_true' , help='disable all embeddings quantizers' )
group.add_argument('--quant-disable-keyword' , type=_snake_case , nargs='+' , help='disable quantizers by keyword' )
group.add_argument('--quant-disable-layer-module' , type=_snake_case , help='disable quantizers by keyword under layer.' )
group.add_argument('--quant-enable-layer-module' , type=_snake_case , help='enable quantizers by keyword under layer' )
group.add_argument('--calibrator' , default='max' , help='which quantization range calibrator to use' )
group.add_argument('--percentile' , default=_snake_case , type=_snake_case , help='percentile for PercentileCalibrator' )
group.add_argument('--fuse-qkv' , action='store_true' , help='use the same scale factor for qkv' )
group.add_argument('--clip-gelu' , metavar='N' , type=_snake_case , help='clip gelu output maximum value to N' )
group.add_argument(
'--recalibrate-weights' , action='store_true' , help=(
'recalibrate weight amaxes by taking the max of the weights.'
' amaxes will be computed with the current quantization granularity (axis).'
) , )
def _snake_case ( _snake_case : Dict ) -> Optional[Any]:
'''simple docstring'''
if args.calibrator == "max":
_A = 'max'
elif args.calibrator == "percentile":
if args.percentile is None:
raise ValueError('Specify --percentile when using percentile calibrator' )
_A = 'histogram'
elif args.calibrator == "mse":
_A = 'histogram'
else:
raise ValueError(F'''Invalid calibrator {args.calibrator}''' )
_A = QuantDescriptor(num_bits=args.aprec , calib_method=_snake_case )
_A = QuantDescriptor(num_bits=args.wprec , axis=(None if args.quant_per_tensor else (0,)) )
quant_nn.QuantLinear.set_default_quant_desc_input(_snake_case )
quant_nn.QuantLinear.set_default_quant_desc_weight(_snake_case )
def _snake_case ( _snake_case : Union[str, Any] , _snake_case : List[Any] , _snake_case : Any=False , _snake_case : Union[str, Any]=False ) -> Optional[int]:
'''simple docstring'''
logger.info('Configuring Model for Quantization' )
logger.info(F'''using quantization package {pytorch_quantization.__file__}''' )
if not calib:
if args.quant_disable_embeddings:
set_quantizer_by_name(_snake_case , ['embeddings'] , which='weight' , _disabled=_snake_case )
if args.quant_disable:
set_quantizer_by_name(_snake_case , [''] , _disabled=_snake_case )
if args.quant_disable_keyword:
set_quantizer_by_name(_snake_case , args.quant_disable_keyword , _disabled=_snake_case )
if args.quant_disable_layer_module:
set_quantizer_by_name(_snake_case , [R'layer.\d+.' + args.quant_disable_layer_module] , _disabled=_snake_case )
if args.quant_enable_layer_module:
set_quantizer_by_name(_snake_case , [R'layer.\d+.' + args.quant_enable_layer_module] , _disabled=_snake_case )
if args.recalibrate_weights:
recalibrate_weights(_snake_case )
if args.fuse_qkv:
fuse_qkv(_snake_case , _snake_case )
if args.clip_gelu:
clip_gelu(_snake_case , args.clip_gelu )
# if args.local_rank in [-1, 0] and not calib:
print_quant_summary(_snake_case )
def _snake_case ( _snake_case : str ) -> Any:
'''simple docstring'''
logger.info('Enabling Calibration' )
for name, module in model.named_modules():
if name.endswith('_quantizer' ):
if module._calibrator is not None:
module.disable_quant()
module.enable_calib()
else:
module.disable()
logger.info(F'''{name:80}: {module}''' )
def _snake_case ( _snake_case : List[Any] , _snake_case : List[Any] ) -> str:
'''simple docstring'''
logger.info('Loading calibrated amax' )
for name, module in model.named_modules():
if name.endswith('_quantizer' ):
if module._calibrator is not None:
if isinstance(module._calibrator , calib.MaxCalibrator ):
module.load_calib_amax()
else:
module.load_calib_amax('percentile' , percentile=args.percentile )
module.enable_quant()
module.disable_calib()
else:
module.enable()
model.cuda()
print_quant_summary(_snake_case )
def _snake_case ( _snake_case : str , _snake_case : int ) -> str:
'''simple docstring'''
def fusea(_snake_case : int , _snake_case : str , _snake_case : Optional[Any] ):
for mod in [qq, qk, qv]:
if not hasattr(_snake_case , '_amax' ):
print(' WARNING: NO AMAX BUFFER' )
return
_A = qq._amax.detach().item()
_A = qk._amax.detach().item()
_A = qv._amax.detach().item()
_A = max(_snake_case , _snake_case , _snake_case )
qq._amax.fill_(_snake_case )
qk._amax.fill_(_snake_case )
qv._amax.fill_(_snake_case )
logger.info(F''' q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}''' )
for name, mod in model.named_modules():
if name.endswith('.attention.self' ):
logger.info(F'''FUSE_QKV: {name:{name_width}}''' )
fusea(mod.matmul_q_input_quantizer , mod.matmul_k_input_quantizer , mod.matmul_v_input_quantizer )
if args.quant_per_tensor:
fusea(mod.query._weight_quantizer , mod.key._weight_quantizer , mod.value._weight_quantizer )
def _snake_case ( _snake_case : int , _snake_case : str ) -> Union[str, Any]:
'''simple docstring'''
for name, mod in model.named_modules():
if name.endswith('.output.dense' ) and not name.endswith('attention.output.dense' ):
_A = mod._input_quantizer._amax.data.detach().item()
mod._input_quantizer._amax.data.detach().clamp_(max=_snake_case )
_A = mod._input_quantizer._amax.data.detach().item()
logger.info(F'''CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}''' )
def _snake_case ( _snake_case : List[str] ) -> List[str]:
'''simple docstring'''
for name, mod in model.named_modules():
if hasattr(_snake_case , '_weight_quantizer' ) and mod._weight_quantizer.axis is not None:
_A = mod.weight.shape[0]
_A = mod._weight_quantizer._amax.detach()
_A = torch.ones(_snake_case , dtype=amax.dtype , device=amax.device ) * amax
print(F'''expanding {name} {amax} -> {mod._weight_quantizer._amax}''' )
def _snake_case ( _snake_case : Dict ) -> Tuple:
'''simple docstring'''
for name, mod in model.named_modules():
if hasattr(_snake_case , '_weight_quantizer' ):
if not hasattr(mod.weight_quantizer , '_amax' ):
print('RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER' )
continue
# determine which axes to reduce across
# e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3)
_A = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis )
_A = set(range(len(mod.weight.size() ) ) ) - axis_set
_A = pytorch_quantization.utils.reduce_amax(mod.weight , axis=_snake_case , keepdims=_snake_case ).detach()
logger.info(F'''RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}''' )
_A = amax
def _snake_case ( _snake_case : Tuple , _snake_case : List[str]=25 , _snake_case : str=1_80 , _snake_case : int=None ) -> List[Any]:
'''simple docstring'''
if ignore is None:
_A = []
elif not isinstance(_snake_case , _snake_case ):
_A = [ignore]
_A = 0
for name, mod in model.named_modules():
if not hasattr(_snake_case , 'weight' ):
continue
_A = max(_snake_case , len(_snake_case ) )
for name, mod in model.named_modules():
_A = getattr(_snake_case , '_input_quantizer' , _snake_case )
_A = getattr(_snake_case , '_weight_quantizer' , _snake_case )
if not hasattr(_snake_case , 'weight' ):
continue
if type(_snake_case ) in ignore:
continue
if [True for s in ignore if type(_snake_case ) is str and s in name]:
continue
_A = F'''Act:{input_q.extra_repr()}'''
_A = F'''Wgt:{weight_q.extra_repr()}'''
_A = F'''{name:{name_width}} {act_str} {wgt_str}'''
if len(_snake_case ) <= line_width:
logger.info(_snake_case )
else:
logger.info(F'''{name:{name_width}} {act_str}''' )
logger.info(F'''{" ":{name_width}} {wgt_str}''' )
def _snake_case ( _snake_case : Dict ) -> int:
'''simple docstring'''
_A = 0
for name, mod in model.named_modules():
if isinstance(_snake_case , pytorch_quantization.nn.TensorQuantizer ):
print(F'''{name:80} {mod}''' )
count += 1
print(F'''{count} TensorQuantizers found in model''' )
def _snake_case ( _snake_case : str , _snake_case : Dict , _snake_case : List[Any] , _snake_case : Union[str, Any] , _snake_case : Any ) -> int:
'''simple docstring'''
_A = getattr(_snake_case , _snake_case , _snake_case )
if quantizer_mod is not None:
assert hasattr(_snake_case , _snake_case )
setattr(_snake_case , _snake_case , _snake_case )
else:
logger.warning(F'''{name} has no {quantizer}''' )
def _snake_case ( _snake_case : Dict , _snake_case : Optional[int] , _snake_case : str="both" , **_snake_case : List[Any] ) -> str:
'''simple docstring'''
_A = F'''Warning: changing {which} quantizers of {name:{qname_width}}'''
for k, v in kwargs.items():
s += F''' {k}={v}'''
if which in ["input", "both"]:
set_quantizer(_snake_case , _snake_case , '_input_quantizer' , _snake_case , _snake_case )
if which in ["weight", "both"]:
set_quantizer(_snake_case , _snake_case , '_weight_quantizer' , _snake_case , _snake_case )
logger.info(_snake_case )
def _snake_case ( _snake_case : Any , _snake_case : int , **_snake_case : Dict ) -> List[str]:
'''simple docstring'''
for name, mod in model.named_modules():
if hasattr(_snake_case , '_input_quantizer' ) or hasattr(_snake_case , '_weight_quantizer' ):
for n in names:
if re.search(_snake_case , _snake_case ):
set_quantizers(_snake_case , _snake_case , **_snake_case )
elif name.endswith('_quantizer' ):
for n in names:
if re.search(_snake_case , _snake_case ):
_A = F'''Warning: changing {name:{name_width}}'''
for k, v in kwargs.items():
s += F''' {k}={v}'''
setattr(_snake_case , _snake_case , _snake_case )
logger.info(_snake_case )
| 315
| 0
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_:int = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_:Optional[Any] = torch.device("""cpu""")
def __UpperCamelCase ( ) -> Optional[int]:
"""simple docstring"""
__UpperCamelCase : int = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__UpperCamelCase : Optional[int] = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw )
return im
def __UpperCamelCase ( _lowerCAmelCase ) -> Dict:
"""simple docstring"""
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.1_703e00, 2.1_107e00, -2.0_811e00, 8.8_685e-01, 2.4_360e-01] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.9_636e-01, 2.3_478e-01, -1.6_963e00, -1.7_381e00, -8.6_337e-01] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.2_768e-01, -4.7_429e-01, -1.0_897e00, -1.0_248e00, 3.5_523e-02] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.5_330e-01, 2.4_211e-01, -6.0_185e-01, -8.2_789e-01, -6.0_446e-02] )
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> str:
"""simple docstring"""
__UpperCamelCase : Optional[Any] = dct.pop(_lowerCAmelCase )
__UpperCamelCase : Optional[int] = val
def __UpperCamelCase ( _lowerCAmelCase ) -> Union[str, Any]:
"""simple docstring"""
__UpperCamelCase : Optional[int] = []
for k in state_dict.keys():
__UpperCamelCase : Optional[Any] = k
if ".pwconv" in k:
__UpperCamelCase : Optional[int] = k_new.replace(""".pwconv""" , """.point_wise_conv""" )
if ".dwconv" in k:
__UpperCamelCase : List[Any] = k_new.replace(""".dwconv""" , """.depth_wise_conv""" )
if ".Proj." in k:
__UpperCamelCase : Tuple = k_new.replace(""".Proj.""" , """.proj.""" )
if "patch_embed" in k_new:
__UpperCamelCase : List[str] = k_new.replace("""patch_embed""" , """swiftformer.patch_embed.patch_embedding""" )
if "network" in k_new:
__UpperCamelCase : Union[str, Any] = k_new.split(""".""" )
if ls[2].isdigit():
__UpperCamelCase : Tuple = """swiftformer.encoder.network.""" + ls[1] + """.blocks.""" + ls[2] + """.""" + """.""".join(ls[3:] )
else:
__UpperCamelCase : List[Any] = k_new.replace("""network""" , """swiftformer.encoder.network""" )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Dict:
"""simple docstring"""
__UpperCamelCase : Tuple = SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
__UpperCamelCase : List[str] = 1000
__UpperCamelCase : Any = """huggingface/label-files"""
__UpperCamelCase : Dict = """imagenet-1k-id2label.json"""
__UpperCamelCase : int = json.load(open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type="""dataset""" ) , """r""" ) )
__UpperCamelCase : List[Any] = {int(_lowerCAmelCase ): v for k, v in idalabel.items()}
__UpperCamelCase : Union[str, Any] = idalabel
__UpperCamelCase : List[Any] = {v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
__UpperCamelCase : str = [3, 3, 6, 4]
__UpperCamelCase : Any = [48, 56, 112, 220]
elif swiftformer_name == "swiftformer_s":
__UpperCamelCase : Optional[int] = [3, 3, 9, 6]
__UpperCamelCase : Tuple = [48, 64, 168, 224]
elif swiftformer_name == "swiftformer_l1":
__UpperCamelCase : Union[str, Any] = [4, 3, 10, 5]
__UpperCamelCase : Optional[int] = [48, 96, 192, 384]
elif swiftformer_name == "swiftformer_l3":
__UpperCamelCase : int = [4, 4, 12, 6]
__UpperCamelCase : Union[str, Any] = [64, 128, 320, 512]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith("""https""" ):
__UpperCamelCase : Optional[int] = torch.hub.load_state_dict_from_url(_lowerCAmelCase , map_location="""cpu""" , check_hash=_lowerCAmelCase )
else:
__UpperCamelCase : Dict = torch.load(_lowerCAmelCase , map_location="""cpu""" )
__UpperCamelCase : Union[str, Any] = checkpoint
__UpperCamelCase : int = create_rename_keys(_lowerCAmelCase )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# load HuggingFace model
__UpperCamelCase : Optional[Any] = SwiftFormerForImageClassification(_lowerCAmelCase ).eval()
hf_model.load_state_dict(_lowerCAmelCase )
# prepare test inputs
__UpperCamelCase : List[Any] = prepare_img()
__UpperCamelCase : int = ViTImageProcessor.from_pretrained("""preprocessor_config""" )
__UpperCamelCase : List[Any] = processor(images=_lowerCAmelCase , return_tensors="""pt""" )
# compare outputs from both models
__UpperCamelCase : Optional[Any] = get_expected_output(_lowerCAmelCase )
__UpperCamelCase : int = hf_model(inputs["""pixel_values"""] ).logits
assert hf_logits.shape == torch.Size([1, 1000] )
assert torch.allclose(hf_logits[0, 0:5] , _lowerCAmelCase , atol=1e-3 )
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
print(f'''Saving model {swiftformer_name} to {pytorch_dump_folder_path}''' )
hf_model.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_:str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--swiftformer_name""",
default="""swiftformer_xs""",
choices=["""swiftformer_xs""", """swiftformer_s""", """swiftformer_l1""", """swiftformer_l3"""],
type=str,
help="""Name of the SwiftFormer model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""./converted_outputs/""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument("""--original_ckpt""", default=None, type=str, help="""Path to the original model checkpoint.""")
SCREAMING_SNAKE_CASE_:List[str] = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 355
|
import argparse
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_dummies.py
SCREAMING_SNAKE_CASE_:Any = """src/diffusers"""
# Matches is_xxx_available()
SCREAMING_SNAKE_CASE_:Optional[Any] = re.compile(R"""is\_([a-z_]*)_available\(\)""")
# Matches from xxx import bla
SCREAMING_SNAKE_CASE_:Union[str, Any] = re.compile(R"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""")
SCREAMING_SNAKE_CASE_:List[Any] = """
{0} = None
"""
SCREAMING_SNAKE_CASE_:Optional[Any] = """
class {0}(metaclass=DummyObject):
_backends = {1}
def __init__(self, *args, **kwargs):
requires_backends(self, {1})
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, {1})
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, {1})
"""
SCREAMING_SNAKE_CASE_:int = """
def {0}(*args, **kwargs):
requires_backends({0}, {1})
"""
def __UpperCamelCase ( _lowerCAmelCase ) -> Tuple:
"""simple docstring"""
A : Union[str, Any] = _re_backend.findall(_lowerCAmelCase )
if len(_lowerCAmelCase ) == 0:
return None
return "_and_".join(_lowerCAmelCase )
def __UpperCamelCase ( ) -> str:
"""simple docstring"""
with open(os.path.join(_lowerCAmelCase , """__init__.py""" ) , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
A : Dict = f.readlines()
# Get to the point we do the actual imports for type checking
A : Dict = 0
A : List[Any] = {}
# Go through the end of the file
while line_index < len(_lowerCAmelCase ):
# If the line contains is_backend_available, we grab all objects associated with the `else` block
A : Optional[int] = find_backend(lines[line_index] )
if backend is not None:
while not lines[line_index].startswith("""else:""" ):
line_index += 1
line_index += 1
A : str = []
# Until we unindent, add backend objects to the list
while line_index < len(_lowerCAmelCase ) and len(lines[line_index] ) > 1:
A : Tuple = lines[line_index]
A : List[str] = _re_single_line_import.search(_lowerCAmelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """ ) )
elif line.startswith(""" """ * 8 ):
objects.append(line[8:-2] )
line_index += 1
if len(_lowerCAmelCase ) > 0:
A : List[str] = objects
else:
line_index += 1
return backend_specific_objects
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> str:
"""simple docstring"""
if name.isupper():
return DUMMY_CONSTANT.format(_lowerCAmelCase )
elif name.islower():
return DUMMY_FUNCTION.format(_lowerCAmelCase , _lowerCAmelCase )
else:
return DUMMY_CLASS.format(_lowerCAmelCase , _lowerCAmelCase )
def __UpperCamelCase ( _lowerCAmelCase=None ) -> Tuple:
"""simple docstring"""
if backend_specific_objects is None:
A : Union[str, Any] = read_init()
# For special correspondence backend to module name as used in the function requires_modulename
A : Any = {}
for backend, objects in backend_specific_objects.items():
A : str = """[""" + """, """.join(f'''"{b}"''' for b in backend.split("""_and_""" ) ) + """]"""
A : Any = """# This file is autogenerated by the command `make fix-copies`, do not edit.\n"""
dummy_file += "from ..utils import DummyObject, requires_backends\n\n"
dummy_file += "\n".join([create_dummy_object(_lowerCAmelCase , _lowerCAmelCase ) for o in objects] )
A : Optional[Any] = dummy_file
return dummy_files
def __UpperCamelCase ( _lowerCAmelCase=False ) -> str:
"""simple docstring"""
A : str = create_dummy_files()
# For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py
A : List[str] = {"""torch""": """pt"""}
# Locate actual dummy modules and read their content.
A : Union[str, Any] = os.path.join(_lowerCAmelCase , """utils""" )
A : Any = {
backend: os.path.join(_lowerCAmelCase , f'''dummy_{short_names.get(_lowerCAmelCase , _lowerCAmelCase )}_objects.py''' )
for backend in dummy_files.keys()
}
A : List[Any] = {}
for backend, file_path in dummy_file_paths.items():
if os.path.isfile(_lowerCAmelCase ):
with open(_lowerCAmelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
A : Dict = f.read()
else:
A : str = """"""
for backend in dummy_files.keys():
if dummy_files[backend] != actual_dummies[backend]:
if overwrite:
print(
f'''Updating diffusers.utils.dummy_{short_names.get(_lowerCAmelCase , _lowerCAmelCase )}_objects.py as the main '''
"""__init__ has new objects.""" )
with open(dummy_file_paths[backend] , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.write(dummy_files[backend] )
else:
raise ValueError(
"""The main __init__ has objects that are not present in """
f'''diffusers.utils.dummy_{short_names.get(_lowerCAmelCase , _lowerCAmelCase )}_objects.py. Run `make fix-copies` '''
"""to fix this.""" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_:List[Any] = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
SCREAMING_SNAKE_CASE_:Any = parser.parse_args()
check_dummies(args.fix_and_overwrite)
| 115
| 0
|
import copy
from typing import Dict, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
from ..detr import DetrConfig
from ..swin import SwinConfig
__snake_case = {
"""facebook/maskformer-swin-base-ade""": (
"""https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json"""
)
# See all MaskFormer models at https://huggingface.co/models?filter=maskformer
}
__snake_case = logging.get_logger(__name__)
class lowercase__ ( _UpperCAmelCase ):
A__ : Optional[Any] ="""maskformer"""
A__ : int ={"""hidden_size""": """mask_feature_size"""}
A__ : Optional[int] =["""resnet""", """swin"""]
A__ : Optional[int] =["""detr"""]
def __init__( self : str , UpperCAmelCase_ : int = 256 , UpperCAmelCase_ : int = 256 , UpperCAmelCase_ : float = 0.1 , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : Optional[Dict] = None , UpperCAmelCase_ : Optional[Dict] = None , UpperCAmelCase_ : float = 0.02 , UpperCAmelCase_ : float = 1.0 , UpperCAmelCase_ : float = 1.0 , UpperCAmelCase_ : float = 1.0 , UpperCAmelCase_ : float = 20.0 , UpperCAmelCase_ : Optional[bool] = None , **UpperCAmelCase_ : int , ):
if backbone_config is None:
# fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k
SCREAMING_SNAKE_CASE__ = SwinConfig(
image_size=384 , in_channels=3 , patch_size=4 , embed_dim=128 , depths=[2, 2, 18, 2] , num_heads=[4, 8, 16, 32] , window_size=12 , drop_path_rate=0.3 , out_features=['stage1', 'stage2', 'stage3', 'stage4'] , )
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
SCREAMING_SNAKE_CASE__ = backbone_config.pop('model_type' )
SCREAMING_SNAKE_CASE__ = CONFIG_MAPPING[backbone_model_type]
SCREAMING_SNAKE_CASE__ = config_class.from_dict(UpperCAmelCase_ )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
F'Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. '
F'Supported model types: {",".join(self.backbones_supported )}' )
if decoder_config is None:
# fall back to https://huggingface.co/facebook/detr-resnet-50
SCREAMING_SNAKE_CASE__ = DetrConfig()
else:
# verify that the decoder is supported
SCREAMING_SNAKE_CASE__ = (
decoder_config.pop('model_type' ) if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else decoder_config.model_type
)
if decoder_type not in self.decoders_supported:
raise ValueError(
F'Transformer Decoder {decoder_type} not supported, please use one of'
F' {",".join(self.decoders_supported )}' )
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
SCREAMING_SNAKE_CASE__ = CONFIG_MAPPING[decoder_type]
SCREAMING_SNAKE_CASE__ = config_class.from_dict(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = backbone_config
SCREAMING_SNAKE_CASE__ = decoder_config
# main feature dimension for the model
SCREAMING_SNAKE_CASE__ = fpn_feature_size
SCREAMING_SNAKE_CASE__ = mask_feature_size
# initializer
SCREAMING_SNAKE_CASE__ = init_std
SCREAMING_SNAKE_CASE__ = init_xavier_std
# Hungarian matcher && loss
SCREAMING_SNAKE_CASE__ = cross_entropy_weight
SCREAMING_SNAKE_CASE__ = dice_weight
SCREAMING_SNAKE_CASE__ = mask_weight
SCREAMING_SNAKE_CASE__ = use_auxiliary_loss
SCREAMING_SNAKE_CASE__ = no_object_weight
SCREAMING_SNAKE_CASE__ = output_auxiliary_logits
SCREAMING_SNAKE_CASE__ = self.decoder_config.encoder_attention_heads
SCREAMING_SNAKE_CASE__ = self.decoder_config.num_hidden_layers
super().__init__(**UpperCAmelCase_ )
@classmethod
def A_ ( cls : str , UpperCAmelCase_ : PretrainedConfig , UpperCAmelCase_ : PretrainedConfig , **UpperCAmelCase_ : Optional[Any] ):
return cls(
backbone_config=UpperCAmelCase_ , decoder_config=UpperCAmelCase_ , **UpperCAmelCase_ , )
def A_ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE__ = self.backbone_config.to_dict()
SCREAMING_SNAKE_CASE__ = self.decoder_config.to_dict()
SCREAMING_SNAKE_CASE__ = self.__class__.model_type
return output
| 176
|
import unittest
from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__snake_case = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
class lowercase__ ( _UpperCAmelCase , unittest.TestCase ):
A__ : List[str] =XLMProphetNetTokenizer
A__ : List[Any] =False
A__ : Tuple =True
def A_ ( self : str ):
super().setUp()
# We have a SentencePiece fixture for testing
SCREAMING_SNAKE_CASE__ = XLMProphetNetTokenizer(UpperCAmelCase_ , keep_accents=UpperCAmelCase_ )
tokenizer.save_pretrained(self.tmpdirname )
def A_ ( self : Tuple ):
SCREAMING_SNAKE_CASE__ = '[PAD]'
SCREAMING_SNAKE_CASE__ = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase_ ) , UpperCAmelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase_ ) , UpperCAmelCase_ )
def A_ ( self : Tuple ):
SCREAMING_SNAKE_CASE__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '[PAD]' )
self.assertEqual(vocab_keys[1] , '[CLS]' )
self.assertEqual(vocab_keys[-1] , 'j' )
self.assertEqual(len(UpperCAmelCase_ ) , 1012 )
def A_ ( self : List[Any] ):
self.assertEqual(self.get_tokenizer().vocab_size , 1012 )
def A_ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE__ = XLMProphetNetTokenizer(UpperCAmelCase_ , keep_accents=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = tokenizer.tokenize('This is a test' )
self.assertListEqual(UpperCAmelCase_ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
SCREAMING_SNAKE_CASE__ = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
UpperCAmelCase_ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
SCREAMING_SNAKE_CASE__ = tokenizer.convert_tokens_to_ids(UpperCAmelCase_ )
self.assertListEqual(
UpperCAmelCase_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, -9, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, -9, 4]
] , )
SCREAMING_SNAKE_CASE__ = tokenizer.convert_ids_to_tokens(UpperCAmelCase_ )
self.assertListEqual(
UpperCAmelCase_ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'[UNK]',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'[UNK]',
'.',
] , )
@cached_property
def A_ ( self : Optional[Any] ):
return XLMProphetNetTokenizer.from_pretrained('microsoft/xprophetnet-large-wiki100-cased' )
@slow
def A_ ( self : Any ):
SCREAMING_SNAKE_CASE__ = 'Hello World!'
SCREAMING_SNAKE_CASE__ = [35389, 6672, 49, 2]
self.assertListEqual(UpperCAmelCase_ , self.big_tokenizer.encode(UpperCAmelCase_ ) )
@slow
def A_ ( self : Tuple ):
# fmt: off
SCREAMING_SNAKE_CASE__ = {'input_ids': [[11073, 82783, 18, 26, 82783, 549, 51540, 248, 17209, 1301, 217, 20, 215186, 1325, 147, 17209, 1301, 217, 20, 56370, 53, 122020, 20, 16477, 27, 87355, 4548, 20, 4728, 78392, 17, 159969, 18, 26, 24491, 629, 15, 538, 22704, 5439, 15, 2788, 24491, 9885, 15, 43534, 605, 15, 814, 18403, 33200, 29, 15, 43534, 24458, 12410, 111, 24966, 83669, 9637, 144068, 26, 850, 22346, 27, 147, 24966, 83669, 83490, 26, 39113, 735, 27, 689, 656, 2800, 1339, 4600, 53, 122020, 115785, 34, 816, 1339, 46887, 18, 147, 53905, 1951, 42238, 41170, 17732, 834, 436, 15, 27523, 98733, 217, 147, 5542, 4981, 930, 17347, 16, 2], [20091, 629, 94, 82786, 58, 490, 20, 1528, 84, 53905, 344, 80592, 110128, 18822, 5267, 1306, 62, 152537, 308, 7997, 401, 124427, 549, 35442, 225, 109, 15055, 25748, 147, 7119, 43712, 34, 767, 135366, 18, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [592, 63784, 119466, 17, 147808, 88214, 18, 656, 81, 32, 3296, 10280, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase_ , model_name='microsoft/xprophetnet-large-wiki100-cased' , revision='1acad1643ddd54a44df6a1b797ada8373685d90e' , )
| 176
| 1
|
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class _lowerCAmelCase( snake_case_ ):
"""simple docstring"""
a : Tuple ='''M-CLIP'''
def __init__( self , _lowerCamelCase=1_0_2_4 , _lowerCamelCase=7_6_8 , **_lowerCamelCase ):
UpperCamelCase_: Tuple = transformerDimSize
UpperCamelCase_: Optional[Any] = imageDimSize
super().__init__(**_lowerCamelCase )
class _lowerCAmelCase( snake_case_ ):
"""simple docstring"""
a : List[Any] =MCLIPConfig
def __init__( self , _lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase ):
super().__init__(_lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase )
UpperCamelCase_: Optional[int] = XLMRobertaModel(_lowerCamelCase )
UpperCamelCase_: List[str] = torch.nn.Linear(
in_features=config.transformerDimensions , out_features=config.numDims )
def _a ( self , _lowerCamelCase , _lowerCamelCase ):
UpperCamelCase_: List[Any] = self.transformer(input_ids=_lowerCamelCase , attention_mask=_lowerCamelCase )[0]
UpperCamelCase_: Optional[Any] = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None]
return self.LinearTransformation(_lowerCamelCase ), embs
| 356
|
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class _lowerCAmelCase( unittest.TestCase ):
"""simple docstring"""
def _a ( self ):
UpperCamelCase_: Any = torch.tensor([-1_0_0, -1, -0.1, 0, 0.1, 1.0, 1_0_0] )
UpperCamelCase_: List[str] = get_activation('gelu' )
self.assertTrue(torch.allclose(gelu_python(_lowerCamelCase ) , torch_builtin(_lowerCamelCase ) ) )
self.assertFalse(torch.allclose(gelu_python(_lowerCamelCase ) , gelu_new(_lowerCamelCase ) ) )
def _a ( self ):
UpperCamelCase_: Optional[Any] = torch.tensor([-1_0_0, -1, -0.1, 0, 0.1, 1.0, 1_0_0] )
UpperCamelCase_: Union[str, Any] = get_activation('gelu' )
UpperCamelCase_: int = get_activation('gelu_10' )
UpperCamelCase_: Union[str, Any] = torch_builtin(_lowerCamelCase )
UpperCamelCase_: List[str] = geluaa(_lowerCamelCase )
UpperCamelCase_: Dict = torch.where(y_gelu_aa < 1_0.0 , 1 , 0 )
self.assertTrue(torch.max(_lowerCamelCase ).item() == 1_0.0 )
self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) )
def _a ( self ):
get_activation('gelu' )
get_activation('gelu_10' )
get_activation('gelu_fast' )
get_activation('gelu_new' )
get_activation('gelu_python' )
get_activation('gelu_pytorch_tanh' )
get_activation('linear' )
get_activation('mish' )
get_activation('quick_gelu' )
get_activation('relu' )
get_activation('sigmoid' )
get_activation('silu' )
get_activation('swish' )
get_activation('tanh' )
with self.assertRaises(_lowerCamelCase ):
get_activation('bogus' )
with self.assertRaises(_lowerCamelCase ):
get_activation(_lowerCamelCase )
def _a ( self ):
UpperCamelCase_: str = get_activation('gelu' )
UpperCamelCase_: str = 1
UpperCamelCase_: int = get_activation('gelu' )
self.assertEqual(acta.a , 1 )
with self.assertRaises(_lowerCamelCase ):
UpperCamelCase_: Tuple = acta.a
| 292
| 0
|
"""simple docstring"""
import heapq
import sys
import numpy as np
_A : Dict = tuple[int, int]
class a__ :
def __init__( self ):
lowercase : str = []
lowercase : Optional[Any] = set()
def __magic_name__ ( self ):
if not self.empty():
return self.elements[0][0]
else:
return float("inf" )
def __magic_name__ ( self ):
return len(self.elements ) == 0
def __magic_name__ ( self , _a , _a ):
if item not in self.set:
heapq.heappush(self.elements , (priority, item) )
self.set.add(_a )
else:
# update
# print("update", item)
lowercase : Union[str, Any] = []
((lowercase) , (lowercase)) : Optional[int] = heapq.heappop(self.elements )
while x != item:
temp.append((pri, x) )
((lowercase) , (lowercase)) : Dict = heapq.heappop(self.elements )
temp.append((priority, item) )
for pro, xxx in temp:
heapq.heappush(self.elements , (pro, xxx) )
def __magic_name__ ( self , _a ):
if item in self.set:
self.set.remove(_a )
lowercase : List[Any] = []
((lowercase) , (lowercase)) : Any = heapq.heappop(self.elements )
while x != item:
temp.append((pro, x) )
((lowercase) , (lowercase)) : Tuple = heapq.heappop(self.elements )
for prito, yyy in temp:
heapq.heappush(self.elements , (prito, yyy) )
def __magic_name__ ( self ):
return self.elements[0][1]
def __magic_name__ ( self ):
((lowercase) , (lowercase)) : int = heapq.heappop(self.elements )
self.set.remove(_a )
return (priority, item)
def __magic_name__ ( __snake_case : TPos , __snake_case : TPos ) -> List[Any]:
# euclidean distance
lowercase : Optional[Any] = np.array(__snake_case )
lowercase : List[Any] = np.array(__snake_case )
return np.linalg.norm(a - b )
def __magic_name__ ( __snake_case : TPos , __snake_case : TPos ) -> Union[str, Any]:
# integer division by time variable
return consistent_heuristic(__snake_case , __snake_case ) // t
def __magic_name__ ( __snake_case : TPos , __snake_case : TPos ) -> List[str]:
# manhattan distance
return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] )
def __magic_name__ ( __snake_case : TPos , __snake_case : int , __snake_case : TPos , __snake_case : dict[TPos, float] ) -> List[str]:
lowercase : Tuple = g_function[start] + Wa * heuristics[i](__snake_case , __snake_case )
return ans
def __magic_name__ ( __snake_case : Any , __snake_case : str , __snake_case : Tuple ) -> Optional[int]:
lowercase : Union[str, Any] = np.chararray((n, n) )
for i in range(__snake_case ):
for j in range(__snake_case ):
lowercase : str = "*"
for i in range(__snake_case ):
for j in range(__snake_case ):
if (j, (n - 1) - i) in blocks:
lowercase : Any = "#"
lowercase : str = "-"
lowercase : str = back_pointer[goal]
while x != start:
((lowercase) , (lowercase)) : Any = x
# print(x)
lowercase : Union[str, Any] = "-"
lowercase : Union[str, Any] = back_pointer[x]
lowercase : Any = "-"
for i in range(__snake_case ):
for j in range(__snake_case ):
if (i, j) == (0, n - 1):
print(grid[i][j] , end=" " )
print("<-- End position" , end=" " )
else:
print(grid[i][j] , end=" " )
print()
print("^" )
print("Start position" )
print()
print("# is an obstacle" )
print("- is the path taken by algorithm" )
print("PATH TAKEN BY THE ALGORITHM IS:-" )
lowercase : Tuple = back_pointer[goal]
while x != start:
print(__snake_case , end=" " )
lowercase : int = back_pointer[x]
print(__snake_case )
sys.exit()
def __magic_name__ ( __snake_case : TPos ) -> str:
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def __magic_name__ ( __snake_case : List[str] , __snake_case : List[str] , __snake_case : Any , __snake_case : Optional[int] , __snake_case : int , __snake_case : List[Any] , __snake_case : Optional[Any] , __snake_case : Optional[Any] , ) -> Any:
for itera in range(__snake_case ):
open_list[itera].remove_element(__snake_case )
# print("s", s)
# print("j", j)
((lowercase) , (lowercase)) : Optional[int] = s
lowercase : Tuple = (x - 1, y)
lowercase : List[Any] = (x + 1, y)
lowercase : Union[str, Any] = (x, y + 1)
lowercase : Any = (x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(__snake_case ) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(__snake_case )
lowercase : List[Any] = -1
lowercase : Dict = float("inf" )
if valid(__snake_case ) and g_function[neighbours] > g_function[s] + 1:
lowercase : Optional[int] = g_function[s] + 1
lowercase : Tuple = s
if neighbours not in close_list_anchor:
open_list[0].put(__snake_case , key(__snake_case , 0 , __snake_case , __snake_case ) )
if neighbours not in close_list_inad:
for var in range(1 , __snake_case ):
if key(__snake_case , __snake_case , __snake_case , __snake_case ) <= Wa * key(
__snake_case , 0 , __snake_case , __snake_case ):
open_list[j].put(
__snake_case , key(__snake_case , __snake_case , __snake_case , __snake_case ) )
def __magic_name__ ( ) -> int:
lowercase : int = []
for x in range(1 , 5 ):
for y in range(1 , 6 ):
some_list.append((x, y) )
for x in range(15 , 20 ):
some_list.append((x, 17) )
for x in range(10 , 19 ):
for y in range(1 , 15 ):
some_list.append((x, y) )
# L block
for x in range(1 , 4 ):
for y in range(12 , 19 ):
some_list.append((x, y) )
for x in range(3 , 13 ):
for y in range(16 , 19 ):
some_list.append((x, y) )
return some_list
_A : int = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
_A : Any = [
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(10, 1),
(11, 1),
(12, 1),
(13, 1),
(14, 1),
(15, 1),
(16, 1),
(17, 1),
(18, 1),
(19, 1),
]
_A : Any = make_common_ground()
_A : List[Any] = blocks_blk
# hyper parameters
_A : Tuple = 1
_A : Dict = 1
_A : Dict = 20
_A : List[str] = 3 # one consistent and two other inconsistent
# start and end destination
_A : int = (0, 0)
_A : int = (n - 1, n - 1)
_A : List[Any] = 1
def __magic_name__ ( __snake_case : TPos , __snake_case : TPos , __snake_case : int ) -> List[str]:
lowercase : str = {start: 0, goal: float("inf" )}
lowercase : str = {start: -1, goal: -1}
lowercase : List[str] = []
lowercase : Tuple = set()
for i in range(__snake_case ):
open_list.append(PriorityQueue() )
open_list[i].put(__snake_case , key(__snake_case , __snake_case , __snake_case , __snake_case ) )
lowercase : list[int] = []
lowercase : list[int] = []
while open_list[0].minkey() < float("inf" ):
for i in range(1 , __snake_case ):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float("inf" ):
do_something(__snake_case , __snake_case , __snake_case )
else:
lowercase , lowercase : List[Any] = open_list[i].top_show()
visited.add(__snake_case )
expand_state(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , )
close_list_inad.append(__snake_case )
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float("inf" ):
do_something(__snake_case , __snake_case , __snake_case )
else:
lowercase : Dict = open_list[0].top_show()
visited.add(__snake_case )
expand_state(
__snake_case , 0 , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , )
close_list_anchor.append(__snake_case )
print("No path found to goal" )
print()
for i in range(n - 1 , -1 , -1 ):
for j in range(__snake_case ):
if (j, i) in blocks:
print("#" , end=" " )
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print("*" , end=" " )
else:
print("-" , end=" " )
else:
print("*" , end=" " )
if (j, i) == (n - 1, n - 1):
print("<-- End position" , end=" " )
print()
print("^" )
print("Start position" )
print()
print("# is an obstacle" )
print("- is the path taken by algorithm" )
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic)
| 202
|
"""simple docstring"""
from __future__ import annotations
from functools import lru_cache
from math import ceil
_A : Optional[Any] = 1_00
_A : Optional[int] = set(range(3, NUM_PRIMES, 2))
primes.add(2)
_A : int
for prime in range(3, ceil(NUM_PRIMES**0.5), 2):
if prime not in primes:
continue
primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime)))
@lru_cache(maxsize=100 )
def __magic_name__ ( __snake_case : int ) -> set[int]:
if number_to_partition < 0:
return set()
elif number_to_partition == 0:
return {1}
lowercase : set[int] = set()
lowercase : int
lowercase : int
for prime in primes:
if prime > number_to_partition:
continue
for sub in partition(number_to_partition - prime ):
ret.add(sub * prime )
return ret
def __magic_name__ ( __snake_case : int = 5000 ) -> int | None:
for number_to_partition in range(1 , __snake_case ):
if len(partition(__snake_case ) ) > number_unique_partitions:
return number_to_partition
return None
if __name__ == "__main__":
print(F"{solution() = }")
| 202
| 1
|
def lowerCamelCase_ ( _UpperCamelCase ) -> str:
"""simple docstring"""
return "".join([hex(_UpperCamelCase )[2:].zfill(2 ).upper() for byte in list(_UpperCamelCase )] )
def lowerCamelCase_ ( _UpperCamelCase ) -> bytes:
"""simple docstring"""
if (len(_UpperCamelCase ) % 2) != 0:
raise ValueError(
'''Base16 encoded data is invalid:
Data does not have an even number of hex digits.''' )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(_UpperCamelCase ) <= set('''0123456789ABCDEF''' ):
raise ValueError(
'''Base16 encoded data is invalid:
Data is not uppercase hex or it contains invalid characters.''' )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(_UpperCamelCase ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 354
|
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase=False ) -> int:
"""simple docstring"""
try:
snake_case_ : Optional[Any] = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
snake_case_ : Tuple = default
else:
# KEY is set, convert it to True or False.
try:
snake_case_ : Union[str, Any] = strtobool(_UpperCamelCase )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f'''If set, {key} must be yes or no.''' )
return _value
lowerCAmelCase_ = parse_flag_from_env('''RUN_SLOW''', default=False)
def lowerCamelCase_ ( _UpperCamelCase ) -> List[Any]:
"""simple docstring"""
return unittest.skip('''Test was skipped''' )(_UpperCamelCase )
def lowerCamelCase_ ( _UpperCamelCase ) -> Optional[int]:
"""simple docstring"""
return unittest.skipUnless(_run_slow_tests , '''test is slow''' )(_UpperCamelCase )
def lowerCamelCase_ ( _UpperCamelCase ) -> Dict:
"""simple docstring"""
return unittest.skipUnless(not torch.cuda.is_available() , '''test requires only a CPU''' )(_UpperCamelCase )
def lowerCamelCase_ ( _UpperCamelCase ) -> int:
"""simple docstring"""
return unittest.skipUnless(torch.cuda.is_available() , '''test requires a GPU''' )(_UpperCamelCase )
def lowerCamelCase_ ( _UpperCamelCase ) -> str:
"""simple docstring"""
return unittest.skipUnless(is_xpu_available() , '''test requires a XPU''' )(_UpperCamelCase )
def lowerCamelCase_ ( _UpperCamelCase ) -> List[Any]:
"""simple docstring"""
return unittest.skipUnless(is_mps_available() , '''test requires a `mps` backend support in `torch`''' )(_UpperCamelCase )
def lowerCamelCase_ ( _UpperCamelCase ) -> Optional[Any]:
"""simple docstring"""
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , '''test requires the Hugging Face suite''' )(_UpperCamelCase )
def lowerCamelCase_ ( _UpperCamelCase ) -> Any:
"""simple docstring"""
return unittest.skipUnless(is_bnb_available() , '''test requires the bitsandbytes library''' )(_UpperCamelCase )
def lowerCamelCase_ ( _UpperCamelCase ) -> List[Any]:
"""simple docstring"""
return unittest.skipUnless(is_tpu_available() , '''test requires TPU''' )(_UpperCamelCase )
def lowerCamelCase_ ( _UpperCamelCase ) -> List[Any]:
"""simple docstring"""
return unittest.skipUnless(torch.cuda.device_count() == 1 , '''test requires a GPU''' )(_UpperCamelCase )
def lowerCamelCase_ ( _UpperCamelCase ) -> int:
"""simple docstring"""
return unittest.skipUnless(torch.xpu.device_count() == 1 , '''test requires a XPU''' )(_UpperCamelCase )
def lowerCamelCase_ ( _UpperCamelCase ) -> Any:
"""simple docstring"""
return unittest.skipUnless(torch.cuda.device_count() > 1 , '''test requires multiple GPUs''' )(_UpperCamelCase )
def lowerCamelCase_ ( _UpperCamelCase ) -> Any:
"""simple docstring"""
return unittest.skipUnless(torch.xpu.device_count() > 1 , '''test requires multiple XPUs''' )(_UpperCamelCase )
def lowerCamelCase_ ( _UpperCamelCase ) -> str:
"""simple docstring"""
return unittest.skipUnless(is_safetensors_available() , '''test requires safetensors''' )(_UpperCamelCase )
def lowerCamelCase_ ( _UpperCamelCase ) -> int:
"""simple docstring"""
return unittest.skipUnless(is_deepspeed_available() , '''test requires DeepSpeed''' )(_UpperCamelCase )
def lowerCamelCase_ ( _UpperCamelCase ) -> Any:
"""simple docstring"""
return unittest.skipUnless(is_torch_version('''>=''' , '''1.12.0''' ) , '''test requires torch version >= 1.12.0''' )(_UpperCamelCase )
def lowerCamelCase_ ( _UpperCamelCase=None , _UpperCamelCase=None ) -> Tuple:
"""simple docstring"""
if test_case is None:
return partial(_UpperCamelCase , version=_UpperCamelCase )
return unittest.skipUnless(is_torch_version('''>=''' , _UpperCamelCase ) , f'''test requires torch version >= {version}''' )(_UpperCamelCase )
def lowerCamelCase_ ( _UpperCamelCase ) -> int:
"""simple docstring"""
return unittest.skipUnless(is_tensorboard_available() , '''test requires Tensorboard''' )(_UpperCamelCase )
def lowerCamelCase_ ( _UpperCamelCase ) -> Optional[int]:
"""simple docstring"""
return unittest.skipUnless(is_wandb_available() , '''test requires wandb''' )(_UpperCamelCase )
def lowerCamelCase_ ( _UpperCamelCase ) -> Union[str, Any]:
"""simple docstring"""
return unittest.skipUnless(is_comet_ml_available() , '''test requires comet_ml''' )(_UpperCamelCase )
lowerCAmelCase_ = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def lowerCamelCase_ ( _UpperCamelCase ) -> Union[str, Any]:
"""simple docstring"""
return unittest.skipUnless(
_atleast_one_tracker_available , '''test requires at least one tracker to be available and for `comet_ml` to not be installed''' , )(_UpperCamelCase )
class __lowerCAmelCase ( unittest.TestCase ):
lowerCamelCase_ : str = True
@classmethod
def lowerCamelCase (cls ) -> List[Any]:
'''simple docstring'''
snake_case_ : Any = tempfile.mkdtemp()
@classmethod
def lowerCamelCase (cls ) -> List[Any]:
'''simple docstring'''
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def lowerCamelCase (self ) -> Optional[int]:
'''simple docstring'''
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob('''**/*''' ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(__magic_name__ )
class __lowerCAmelCase ( unittest.TestCase ):
def lowerCamelCase (self ) -> int:
'''simple docstring'''
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class __lowerCAmelCase ( unittest.TestCase ):
def lowerCamelCase (self , __magic_name__ ) -> Dict:
'''simple docstring'''
snake_case_ : str = mocks if isinstance(__magic_name__ , (tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def lowerCamelCase_ ( _UpperCamelCase ) -> List[str]:
"""simple docstring"""
snake_case_ : Optional[Any] = AcceleratorState()
snake_case_ : List[str] = tensor[None].clone().to(state.device )
snake_case_ : Optional[Any] = gather(_UpperCamelCase ).cpu()
snake_case_ : Optional[Any] = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , _UpperCamelCase ):
return False
return True
class __lowerCAmelCase :
def __init__(self , __magic_name__ , __magic_name__ , __magic_name__ ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Any = returncode
snake_case_ : List[Any] = stdout
snake_case_ : Tuple = stderr
async def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> str:
"""simple docstring"""
while True:
snake_case_ : Tuple = await stream.readline()
if line:
callback(_UpperCamelCase )
else:
break
async def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=False , _UpperCamelCase=False ) -> _RunOutput:
"""simple docstring"""
if echo:
print('''\nRunning: ''' , ''' '''.join(_UpperCamelCase ) )
snake_case_ : List[str] = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=_UpperCamelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=_UpperCamelCase , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
snake_case_ : List[Any] = []
snake_case_ : List[Any] = []
def tee(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase="" ):
snake_case_ : Union[str, Any] = line.decode('''utf-8''' ).rstrip()
sink.append(_UpperCamelCase )
if not quiet:
print(_UpperCamelCase , _UpperCamelCase , file=_UpperCamelCase )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda _UpperCamelCase : tee(_UpperCamelCase , _UpperCamelCase , sys.stdout , label='''stdout:''' ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda _UpperCamelCase : tee(_UpperCamelCase , _UpperCamelCase , sys.stderr , label='''stderr:''' ) ) ),
] , timeout=_UpperCamelCase , )
return _RunOutput(await p.wait() , _UpperCamelCase , _UpperCamelCase )
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=180 , _UpperCamelCase=False , _UpperCamelCase=True ) -> _RunOutput:
"""simple docstring"""
snake_case_ : List[str] = asyncio.get_event_loop()
snake_case_ : List[Any] = loop.run_until_complete(
_stream_subprocess(_UpperCamelCase , env=_UpperCamelCase , stdin=_UpperCamelCase , timeout=_UpperCamelCase , quiet=_UpperCamelCase , echo=_UpperCamelCase ) )
snake_case_ : Optional[int] = ''' '''.join(_UpperCamelCase )
if result.returncode > 0:
snake_case_ : Union[str, Any] = '''\n'''.join(result.stderr )
raise RuntimeError(
f'''\'{cmd_str}\' failed with returncode {result.returncode}\n\n'''
f'''The combined stderr from workers follows:\n{stderr}''' )
return result
class __lowerCAmelCase ( _a ):
pass
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase=False ) -> List[Any]:
"""simple docstring"""
try:
snake_case_ : List[str] = subprocess.check_output(_UpperCamelCase , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(_UpperCamelCase , '''decode''' ):
snake_case_ : Tuple = output.decode('''utf-8''' )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
f'''Command `{" ".join(_UpperCamelCase )}` failed with the following error:\n\n{e.output.decode()}''' ) from e
| 279
| 0
|
'''simple docstring'''
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : Optional[Any] =(IPNDMScheduler,)
lowercase : Optional[int] =(('num_inference_steps', 50),)
def lowercase__ ( self, **lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ ={'''num_train_timesteps''': 1_000}
config.update(**lowerCAmelCase )
return config
def lowercase__ ( self, lowerCAmelCase=0, **lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =dict(self.forward_default_kwargs )
lowerCamelCase_ =kwargs.pop('''num_inference_steps''', lowerCAmelCase )
lowerCamelCase_ =self.dummy_sample
lowerCamelCase_ =0.1 * sample
lowerCamelCase_ =[residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
for scheduler_class in self.scheduler_classes:
lowerCamelCase_ =self.get_scheduler_config(**lowerCAmelCase )
lowerCamelCase_ =scheduler_class(**lowerCAmelCase )
scheduler.set_timesteps(lowerCAmelCase )
# copy over dummy past residuals
lowerCamelCase_ =dummy_past_residuals[:]
if time_step is None:
lowerCamelCase_ =scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCAmelCase )
lowerCamelCase_ =scheduler_class.from_pretrained(lowerCAmelCase )
new_scheduler.set_timesteps(lowerCAmelCase )
# copy over dummy past residuals
lowerCamelCase_ =dummy_past_residuals[:]
lowerCamelCase_ =scheduler.step(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, **lowerCAmelCase ).prev_sample
lowerCamelCase_ =new_scheduler.step(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, **lowerCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
lowerCamelCase_ =scheduler.step(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, **lowerCAmelCase ).prev_sample
lowerCamelCase_ =new_scheduler.step(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, **lowerCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def lowercase__ ( self ):
"""simple docstring"""
pass
def lowercase__ ( self, lowerCAmelCase=0, **lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =dict(self.forward_default_kwargs )
lowerCamelCase_ =kwargs.pop('''num_inference_steps''', lowerCAmelCase )
lowerCamelCase_ =self.dummy_sample
lowerCamelCase_ =0.1 * sample
lowerCamelCase_ =[residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
for scheduler_class in self.scheduler_classes:
lowerCamelCase_ =self.get_scheduler_config()
lowerCamelCase_ =scheduler_class(**lowerCAmelCase )
scheduler.set_timesteps(lowerCAmelCase )
# copy over dummy past residuals (must be after setting timesteps)
lowerCamelCase_ =dummy_past_residuals[:]
if time_step is None:
lowerCamelCase_ =scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCAmelCase )
lowerCamelCase_ =scheduler_class.from_pretrained(lowerCAmelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(lowerCAmelCase )
# copy over dummy past residual (must be after setting timesteps)
lowerCamelCase_ =dummy_past_residuals[:]
lowerCamelCase_ =scheduler.step(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, **lowerCAmelCase ).prev_sample
lowerCamelCase_ =new_scheduler.step(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, **lowerCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
lowerCamelCase_ =scheduler.step(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, **lowerCAmelCase ).prev_sample
lowerCamelCase_ =new_scheduler.step(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, **lowerCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def lowercase__ ( self, **lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =self.scheduler_classes[0]
lowerCamelCase_ =self.get_scheduler_config(**lowerCAmelCase )
lowerCamelCase_ =scheduler_class(**lowerCAmelCase )
lowerCamelCase_ =10
lowerCamelCase_ =self.dummy_model()
lowerCamelCase_ =self.dummy_sample_deter
scheduler.set_timesteps(lowerCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
lowerCamelCase_ =model(lowerCAmelCase, lowerCAmelCase )
lowerCamelCase_ =scheduler.step(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
lowerCamelCase_ =model(lowerCAmelCase, lowerCAmelCase )
lowerCamelCase_ =scheduler.step(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ).prev_sample
return sample
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =dict(self.forward_default_kwargs )
lowerCamelCase_ =kwargs.pop('''num_inference_steps''', lowerCAmelCase )
for scheduler_class in self.scheduler_classes:
lowerCamelCase_ =self.get_scheduler_config()
lowerCamelCase_ =scheduler_class(**lowerCAmelCase )
lowerCamelCase_ =self.dummy_sample
lowerCamelCase_ =0.1 * sample
if num_inference_steps is not None and hasattr(lowerCAmelCase, '''set_timesteps''' ):
scheduler.set_timesteps(lowerCAmelCase )
elif num_inference_steps is not None and not hasattr(lowerCAmelCase, '''set_timesteps''' ):
lowerCamelCase_ =num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
lowerCamelCase_ =[residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
lowerCamelCase_ =dummy_past_residuals[:]
lowerCamelCase_ =scheduler.timesteps[5]
lowerCamelCase_ =scheduler.timesteps[6]
lowerCamelCase_ =scheduler.step(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, **lowerCAmelCase ).prev_sample
lowerCamelCase_ =scheduler.step(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, **lowerCAmelCase ).prev_sample
self.assertEqual(output_a.shape, sample.shape )
self.assertEqual(output_a.shape, output_a.shape )
lowerCamelCase_ =scheduler.step(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, **lowerCAmelCase ).prev_sample
lowerCamelCase_ =scheduler.step(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, **lowerCAmelCase ).prev_sample
self.assertEqual(output_a.shape, sample.shape )
self.assertEqual(output_a.shape, output_a.shape )
def lowercase__ ( self ):
"""simple docstring"""
for timesteps in [100, 1_000]:
self.check_over_configs(num_train_timesteps=lowerCAmelCase, time_step=lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
for t, num_inference_steps in zip([1, 5, 10], [10, 50, 100] ):
self.check_over_forward(num_inference_steps=lowerCAmelCase, time_step=lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.full_loop()
lowerCamelCase_ =torch.mean(torch.abs(lowerCAmelCase ) )
assert abs(result_mean.item() - 2_540_529 ) < 10
| 75
|
'''simple docstring'''
from __future__ import annotations
def a_ ( __snake_case : str , __snake_case : list[str] | None = None , __snake_case : dict[str, float] | None = None , __snake_case : bool = False , ) -> tuple[int, float, str]:
"""simple docstring"""
lowerCamelCase_ =cipher_alphabet or [chr(__snake_case ) for i in range(97 , 123 )]
# If the argument is None or the user provided an empty dictionary
if not frequencies_dict:
# Frequencies of letters in the english language (how much they show up)
lowerCamelCase_ ={
'''a''': 0.0_8_4_9_7,
'''b''': 0.0_1_4_9_2,
'''c''': 0.0_2_2_0_2,
'''d''': 0.0_4_2_5_3,
'''e''': 0.1_1_1_6_2,
'''f''': 0.0_2_2_2_8,
'''g''': 0.0_2_0_1_5,
'''h''': 0.0_6_0_9_4,
'''i''': 0.0_7_5_4_6,
'''j''': 0.0_0_1_5_3,
'''k''': 0.0_1_2_9_2,
'''l''': 0.0_4_0_2_5,
'''m''': 0.0_2_4_0_6,
'''n''': 0.0_6_7_4_9,
'''o''': 0.0_7_5_0_7,
'''p''': 0.0_1_9_2_9,
'''q''': 0.0_0_0_9_5,
'''r''': 0.0_7_5_8_7,
'''s''': 0.0_6_3_2_7,
'''t''': 0.0_9_3_5_6,
'''u''': 0.0_2_7_5_8,
'''v''': 0.0_0_9_7_8,
'''w''': 0.0_2_5_6_0,
'''x''': 0.0_0_1_5_0,
'''y''': 0.0_1_9_9_4,
'''z''': 0.0_0_0_7_7,
}
else:
# Custom frequencies dictionary
lowerCamelCase_ =frequencies_dict
if not case_sensitive:
lowerCamelCase_ =ciphertext.lower()
# Chi squared statistic values
lowerCamelCase_ ={}
# cycle through all of the shifts
for shift in range(len(__snake_case ) ):
lowerCamelCase_ =''''''
# decrypt the message with the shift
for letter in ciphertext:
try:
# Try to index the letter in the alphabet
lowerCamelCase_ =(alphabet_letters.index(letter.lower() ) - shift) % len(
__snake_case )
decrypted_with_shift += (
alphabet_letters[new_key].upper()
if case_sensitive and letter.isupper()
else alphabet_letters[new_key]
)
except ValueError:
# Append the character if it isn't in the alphabet
decrypted_with_shift += letter
lowerCamelCase_ =0.0
# Loop through each letter in the decoded message with the shift
for letter in decrypted_with_shift:
if case_sensitive:
lowerCamelCase_ =letter.lower()
if letter in frequencies:
# Get the amount of times the letter occurs in the message
lowerCamelCase_ =decrypted_with_shift.lower().count(__snake_case )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
lowerCamelCase_ =frequencies[letter] * occurrences
# Complete the chi squared statistic formula
lowerCamelCase_ =((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
else:
if letter.lower() in frequencies:
# Get the amount of times the letter occurs in the message
lowerCamelCase_ =decrypted_with_shift.count(__snake_case )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
lowerCamelCase_ =frequencies[letter] * occurrences
# Complete the chi squared statistic formula
lowerCamelCase_ =((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
# Add the data to the chi_squared_statistic_values dictionary
lowerCamelCase_ =(
chi_squared_statistic,
decrypted_with_shift,
)
# Get the most likely cipher by finding the cipher with the smallest chi squared
# statistic
def chi_squared_statistic_values_sorting_key(__snake_case : int ) -> tuple[float, str]:
return chi_squared_statistic_values[key]
lowerCamelCase_ =min(
__snake_case , key=__snake_case , )
# Get all the data from the most likely cipher (key, decoded message)
(
(
lowerCamelCase_
), (
lowerCamelCase_
),
) =chi_squared_statistic_values[most_likely_cipher]
# Return the data on the most likely shift
return (
most_likely_cipher,
most_likely_cipher_chi_squared_value,
decoded_most_likely_cipher,
)
| 75
| 1
|
'''simple docstring'''
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, ClassLabel, Features
from .base import TaskTemplate
@dataclass(frozen=__magic_name__ )
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
_snake_case : str = field(default='audio-classification' , metadata={'include_in_asdict_even_if_is_default': True} )
_snake_case : ClassVar[Features] = Features({'audio': Audio()} )
_snake_case : ClassVar[Features] = Features({'labels': ClassLabel} )
_snake_case : str = "audio"
_snake_case : str = "labels"
def snake_case__ ( self : Optional[Any] , lowerCAmelCase__ : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
if self.label_column not in features:
raise ValueError(f"""Column {self.label_column} is not present in features.""" )
if not isinstance(features[self.label_column] , lowerCAmelCase__ ):
raise ValueError(f"""Column {self.label_column} is not a ClassLabel.""" )
_UpperCamelCase = copy.deepcopy(self )
_UpperCamelCase = self.label_schema.copy()
_UpperCamelCase = features[self.label_column]
_UpperCamelCase = label_schema
return task_template
@property
def snake_case__ ( self : List[Any] ) -> Dict[str, str]:
'''simple docstring'''
return {
self.audio_column: "audio",
self.label_column: "labels",
}
| 365
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ : Optional[Any] = logging.get_logger(__name__)
lowercase__ : List[Any] = {
'microsoft/biogpt': 'https://huggingface.co/microsoft/biogpt/resolve/main/config.json',
# See all BioGPT models at https://huggingface.co/models?filter=biogpt
}
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
_snake_case : Union[str, Any] = 'biogpt'
def __init__( self : Optional[Any] , lowerCAmelCase__ : List[str]=42384 , lowerCAmelCase__ : Optional[int]=1024 , lowerCAmelCase__ : List[str]=24 , lowerCAmelCase__ : List[Any]=16 , lowerCAmelCase__ : Optional[int]=4096 , lowerCAmelCase__ : Optional[int]="gelu" , lowerCAmelCase__ : Optional[Any]=0.1 , lowerCAmelCase__ : Optional[int]=0.1 , lowerCAmelCase__ : Union[str, Any]=1024 , lowerCAmelCase__ : List[str]=0.02 , lowerCAmelCase__ : Tuple=1e-1_2 , lowerCAmelCase__ : Dict=True , lowerCAmelCase__ : Tuple=True , lowerCAmelCase__ : Dict=0.0 , lowerCAmelCase__ : Union[str, Any]=0.0 , lowerCAmelCase__ : Optional[int]=1 , lowerCAmelCase__ : Union[str, Any]=0 , lowerCAmelCase__ : Optional[Any]=2 , **lowerCAmelCase__ : Optional[Any] , ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = vocab_size
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_act
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = initializer_range
_UpperCamelCase = layer_norm_eps
_UpperCamelCase = scale_embedding
_UpperCamelCase = use_cache
_UpperCamelCase = layerdrop
_UpperCamelCase = activation_dropout
super().__init__(pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
| 287
| 0
|
"""simple docstring"""
import json
import os
from collections import Counter
import torch
import torchvision
import torchvision.transforms as transforms
from PIL import Image
from torch import nn
from torch.utils.data import Dataset
__A = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)}
class _snake_case ( nn.Module ):
def __init__( self : Union[str, Any] , UpperCAmelCase : Optional[int] ):
super().__init__()
__lowerCamelCase : Union[str, Any] = torchvision.models.resnetaaa(pretrained=UpperCAmelCase )
__lowerCamelCase : Union[str, Any] = list(model.children() )[:-2]
__lowerCamelCase : Tuple = nn.Sequential(*UpperCAmelCase )
__lowerCamelCase : Tuple = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] )
def lowerCamelCase__ ( self : List[Any] , UpperCAmelCase : List[str] ):
__lowerCamelCase : int = self.pool(self.model(UpperCAmelCase ) )
__lowerCamelCase : str = torch.flatten(UpperCAmelCase , start_dim=2 )
__lowerCamelCase : Union[str, Any] = out.transpose(1 , 2 ).contiguous()
return out # BxNx2048
class _snake_case ( a__ ):
def __init__( self : str , UpperCAmelCase : str , UpperCAmelCase : Tuple , UpperCAmelCase : str , UpperCAmelCase : Any , UpperCAmelCase : Optional[int] ):
__lowerCamelCase : Optional[Any] = [json.loads(UpperCAmelCase ) for l in open(UpperCAmelCase )]
__lowerCamelCase : Optional[Any] = os.path.dirname(UpperCAmelCase )
__lowerCamelCase : List[Any] = tokenizer
__lowerCamelCase : List[str] = labels
__lowerCamelCase : Optional[int] = len(UpperCAmelCase )
__lowerCamelCase : Union[str, Any] = max_seq_length
__lowerCamelCase : Tuple = transforms
def __len__( self : Tuple ):
return len(self.data )
def __getitem__( self : int , UpperCAmelCase : Any ):
__lowerCamelCase : Union[str, Any] = torch.LongTensor(self.tokenizer.encode(self.data[index]["text"] , add_special_tokens=UpperCAmelCase ) )
__lowerCamelCase : Dict = sentence[0], sentence[1:-1], sentence[-1]
__lowerCamelCase : Union[str, Any] = sentence[: self.max_seq_length]
__lowerCamelCase : Tuple = torch.zeros(self.n_classes )
__lowerCamelCase : str = 1
__lowerCamelCase : List[str] = Image.open(os.path.join(self.data_dir , self.data[index]["img"] ) ).convert("RGB" )
__lowerCamelCase : Any = self.transforms(UpperCAmelCase )
return {
"image_start_token": start_token,
"image_end_token": end_token,
"sentence": sentence,
"image": image,
"label": label,
}
def lowerCamelCase__ ( self : Tuple ):
__lowerCamelCase : int = Counter()
for row in self.data:
label_freqs.update(row["label"] )
return label_freqs
def lowercase_ ( _lowerCamelCase: Union[str, Any] ) -> Any:
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = [len(row["sentence"] ) for row in batch]
__lowerCamelCase : Union[str, Any] = len(_UpperCamelCase ), max(_UpperCamelCase )
__lowerCamelCase : int = torch.zeros(_UpperCamelCase , _UpperCamelCase , dtype=torch.long )
__lowerCamelCase : Any = torch.zeros(_UpperCamelCase , _UpperCamelCase , dtype=torch.long )
for i_batch, (input_row, length) in enumerate(zip(_UpperCamelCase , _UpperCamelCase ) ):
__lowerCamelCase : List[str] = input_row["""sentence"""]
__lowerCamelCase : Optional[Any] = 1
__lowerCamelCase : int = torch.stack([row["image"] for row in batch] )
__lowerCamelCase : Optional[int] = torch.stack([row["label"] for row in batch] )
__lowerCamelCase : int = torch.stack([row["image_start_token"] for row in batch] )
__lowerCamelCase : int = torch.stack([row["image_end_token"] for row in batch] )
return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor
def lowercase_ ( ) -> List[str]:
'''simple docstring'''
return [
"Crime",
"Drama",
"Thriller",
"Action",
"Comedy",
"Romance",
"Documentary",
"Short",
"Mystery",
"History",
"Family",
"Adventure",
"Fantasy",
"Sci-Fi",
"Western",
"Horror",
"Sport",
"War",
"Music",
"Musical",
"Animation",
"Biography",
"Film-Noir",
]
def lowercase_ ( ) -> Dict:
'''simple docstring'''
return transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.4677_7044, 0.4453_1429, 0.4066_1017] , std=[0.1222_1994, 0.1214_5835, 0.1438_0469] , ),
] )
| 135
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
UpperCAmelCase : Optional[Any] = logging.get_logger(__name__)
def lowerCamelCase ( _UpperCamelCase : Any , _UpperCamelCase : Optional[Any] ) -> int:
'''simple docstring'''
__UpperCAmelCase : int = b.T
__UpperCAmelCase : Union[str, Any] = np.sum(np.square(_UpperCamelCase ) , axis=1 )
__UpperCAmelCase : List[Any] = np.sum(np.square(_UpperCamelCase ) , axis=0 )
__UpperCAmelCase : int = np.matmul(_UpperCamelCase , _UpperCamelCase )
__UpperCAmelCase : Tuple = aa[:, None] - 2 * ab + ba[None, :]
return d
def lowerCamelCase ( _UpperCamelCase : Tuple , _UpperCamelCase : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : Tuple = x.reshape(-1 , 3 )
__UpperCAmelCase : Optional[Any] = squared_euclidean_distance(_UpperCamelCase , _UpperCamelCase )
return np.argmin(_UpperCamelCase , axis=1 )
class lowerCamelCase__ ( A ):
"""simple docstring"""
__a = ["""pixel_values"""]
def __init__( self : List[str] , UpperCamelCase : Optional[Union[List[List[int]], np.ndarray]] = None , UpperCamelCase : bool = True , UpperCamelCase : Dict[str, int] = None , UpperCamelCase : PILImageResampling = PILImageResampling.BILINEAR , UpperCamelCase : bool = True , UpperCamelCase : bool = True , **UpperCamelCase : int , ):
'''simple docstring'''
super().__init__(**UpperCamelCase )
__UpperCAmelCase : Optional[Any] = size if size is not None else {"""height""": 256, """width""": 256}
__UpperCAmelCase : Tuple = get_size_dict(UpperCamelCase )
__UpperCAmelCase : Union[str, Any] = np.array(UpperCamelCase ) if clusters is not None else None
__UpperCAmelCase : int = do_resize
__UpperCAmelCase : List[str] = size
__UpperCAmelCase : Dict = resample
__UpperCAmelCase : int = do_normalize
__UpperCAmelCase : List[Any] = do_color_quantize
def lowerCamelCase__ ( self : List[str] , UpperCamelCase : np.ndarray , UpperCamelCase : Dict[str, int] , UpperCamelCase : PILImageResampling = PILImageResampling.BILINEAR , UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase : str , ):
'''simple docstring'''
__UpperCAmelCase : Any = get_size_dict(UpperCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'''Size dictionary must contain both height and width keys. Got {size.keys()}''' )
return resize(
UpperCamelCase , size=(size["""height"""], size["""width"""]) , resample=UpperCamelCase , data_format=UpperCamelCase , **UpperCamelCase )
def lowerCamelCase__ ( self : Dict , UpperCamelCase : np.ndarray , UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = rescale(image=UpperCamelCase , scale=1 / 127.5 , data_format=UpperCamelCase )
__UpperCAmelCase : Optional[Any] = image - 1
return image
def lowerCamelCase__ ( self : Dict , UpperCamelCase : ImageInput , UpperCamelCase : bool = None , UpperCamelCase : Dict[str, int] = None , UpperCamelCase : PILImageResampling = None , UpperCamelCase : bool = None , UpperCamelCase : Optional[bool] = None , UpperCamelCase : Optional[Union[List[List[int]], np.ndarray]] = None , UpperCamelCase : Optional[Union[str, TensorType]] = None , UpperCamelCase : Optional[Union[str, ChannelDimension]] = ChannelDimension.FIRST , **UpperCamelCase : Optional[int] , ):
'''simple docstring'''
__UpperCAmelCase : List[str] = do_resize if do_resize is not None else self.do_resize
__UpperCAmelCase : Any = size if size is not None else self.size
__UpperCAmelCase : Any = get_size_dict(UpperCamelCase )
__UpperCAmelCase : Optional[Any] = resample if resample is not None else self.resample
__UpperCAmelCase : Any = do_normalize if do_normalize is not None else self.do_normalize
__UpperCAmelCase : Any = do_color_quantize if do_color_quantize is not None else self.do_color_quantize
__UpperCAmelCase : Tuple = clusters if clusters is not None else self.clusters
__UpperCAmelCase : Tuple = np.array(UpperCamelCase )
__UpperCAmelCase : int = make_list_of_images(UpperCamelCase )
if not valid_images(UpperCamelCase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_color_quantize and clusters is None:
raise ValueError("""Clusters must be specified if do_color_quantize is True.""" )
# All transformations expect numpy arrays.
__UpperCAmelCase : Tuple = [to_numpy_array(UpperCamelCase ) for image in images]
if do_resize:
__UpperCAmelCase : Dict = [self.resize(image=UpperCamelCase , size=UpperCamelCase , resample=UpperCamelCase ) for image in images]
if do_normalize:
__UpperCAmelCase : Any = [self.normalize(image=UpperCamelCase ) for image in images]
if do_color_quantize:
__UpperCAmelCase : str = [to_channel_dimension_format(UpperCamelCase , ChannelDimension.LAST ) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
__UpperCAmelCase : Optional[int] = np.array(UpperCamelCase )
__UpperCAmelCase : Any = color_quantize(UpperCamelCase , UpperCamelCase ).reshape(images.shape[:-1] )
# flatten to (batch_size, height*width)
__UpperCAmelCase : Dict = images.shape[0]
__UpperCAmelCase : Optional[Any] = images.reshape(UpperCamelCase , -1 )
# We need to convert back to a list of images to keep consistent behaviour across processors.
__UpperCAmelCase : int = list(UpperCamelCase )
else:
__UpperCAmelCase : List[Any] = [to_channel_dimension_format(UpperCamelCase , UpperCamelCase ) for image in images]
__UpperCAmelCase : List[str] = {"""input_ids""": images}
return BatchFeature(data=UpperCamelCase , tensor_type=UpperCamelCase )
| 115
| 0
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE : List[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : str = {
"sail/poolformer_s12": "https://huggingface.co/sail/poolformer_s12/resolve/main/config.json",
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
}
class _lowerCamelCase( _a ):
lowercase_ : int = """poolformer"""
def __init__( self, lowerCamelCase=3, lowerCamelCase=16, lowerCamelCase=16, lowerCamelCase=3, lowerCamelCase=4.0, lowerCamelCase=[2, 2, 6, 2], lowerCamelCase=[64, 1_28, 3_20, 5_12], lowerCamelCase=[7, 3, 3, 3], lowerCamelCase=[4, 2, 2, 2], lowerCamelCase=[2, 1, 1, 1], lowerCamelCase=4, lowerCamelCase=0.0, lowerCamelCase="gelu", lowerCamelCase=True, lowerCamelCase=1E-5, lowerCamelCase=0.0_2, **lowerCamelCase, ) -> int:
"""simple docstring"""
_lowercase : List[str] = num_channels
_lowercase : int = patch_size
_lowercase : Tuple = stride
_lowercase : List[str] = padding
_lowercase : List[Any] = pool_size
_lowercase : Optional[int] = hidden_sizes
_lowercase : Any = mlp_ratio
_lowercase : Dict = depths
_lowercase : Union[str, Any] = patch_sizes
_lowercase : Optional[int] = strides
_lowercase : List[Any] = num_encoder_blocks
_lowercase : List[str] = drop_path_rate
_lowercase : Union[str, Any] = hidden_act
_lowercase : List[Any] = use_layer_scale
_lowercase : List[Any] = layer_scale_init_value
_lowercase : int = initializer_range
super().__init__(**lowerCamelCase)
class _lowerCamelCase( _a ):
lowercase_ : Optional[int] = version.parse("""1.11""" )
@property
def UpperCamelCase ( self) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
])
@property
def UpperCamelCase ( self) -> float:
"""simple docstring"""
return 2E-3
| 84
|
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
"The `inpainting.py` script is outdated. Please use directly `from diffusers import"
" StableDiffusionInpaintPipeline` instead."
)
| 84
| 1
|
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_validate_point(SCREAMING_SNAKE_CASE )
_validate_point(SCREAMING_SNAKE_CASE )
if len(SCREAMING_SNAKE_CASE ) != len(SCREAMING_SNAKE_CASE ):
raise ValueError('''Both points must be in the same n-dimensional space''' )
return float(sum(abs(a - b ) for a, b in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ) )
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if point:
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
for item in point:
if not isinstance(SCREAMING_SNAKE_CASE , (int, float) ):
__UpperCamelCase :Optional[int] = (
'''Expected a list of numbers as input, found '''
f"""{type(SCREAMING_SNAKE_CASE ).__name__}"""
)
raise TypeError(SCREAMING_SNAKE_CASE )
else:
__UpperCamelCase :List[str] = f"""Expected a list of numbers as input, found {type(SCREAMING_SNAKE_CASE ).__name__}"""
raise TypeError(SCREAMING_SNAKE_CASE )
else:
raise ValueError('''Missing an input''' )
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_validate_point(SCREAMING_SNAKE_CASE )
_validate_point(SCREAMING_SNAKE_CASE )
if len(SCREAMING_SNAKE_CASE ) != len(SCREAMING_SNAKE_CASE ):
raise ValueError('''Both points must be in the same n-dimensional space''' )
return float(sum(abs(x - y ) for x, y in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 43
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case : Optional[int] = logging.get_logger(__name__)
_snake_case : Optional[int] = {
'google/vivit-b-16x2-kinetics400': (
'https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json'
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class _UpperCAmelCase ( lowercase_ ):
UpperCamelCase = '''vivit'''
def __init__( self :Optional[Any] , __UpperCamelCase :Dict=2_24 , __UpperCamelCase :int=32 , __UpperCamelCase :Union[str, Any]=[2, 16, 16] , __UpperCamelCase :Optional[Any]=3 , __UpperCamelCase :Optional[Any]=7_68 , __UpperCamelCase :Any=12 , __UpperCamelCase :List[str]=12 , __UpperCamelCase :List[str]=30_72 , __UpperCamelCase :Any="gelu_fast" , __UpperCamelCase :List[Any]=0.0 , __UpperCamelCase :str=0.0 , __UpperCamelCase :Dict=0.02 , __UpperCamelCase :Optional[Any]=1e-06 , __UpperCamelCase :Dict=True , **__UpperCamelCase :Tuple , ):
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_act
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = initializer_range
A = layer_norm_eps
A = image_size
A = num_frames
A = tubelet_size
A = num_channels
A = qkv_bias
super().__init__(**__UpperCamelCase )
| 292
| 0
|
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_electra import ElectraTokenizer
__SCREAMING_SNAKE_CASE : List[Any] = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__SCREAMING_SNAKE_CASE : Optional[int] = {
'''vocab_file''': {
'''google/electra-small-generator''': (
'''https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt'''
),
'''google/electra-base-generator''': '''https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt''',
'''google/electra-large-generator''': (
'''https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt'''
),
'''google/electra-small-discriminator''': (
'''https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt'''
),
'''google/electra-base-discriminator''': (
'''https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt'''
),
'''google/electra-large-discriminator''': (
'''https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''google/electra-small-generator''': (
'''https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json'''
),
'''google/electra-base-generator''': (
'''https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json'''
),
'''google/electra-large-generator''': (
'''https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json'''
),
'''google/electra-small-discriminator''': (
'''https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json'''
),
'''google/electra-base-discriminator''': (
'''https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json'''
),
'''google/electra-large-discriminator''': (
'''https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json'''
),
},
}
__SCREAMING_SNAKE_CASE : Tuple = {
'''google/electra-small-generator''': 5_1_2,
'''google/electra-base-generator''': 5_1_2,
'''google/electra-large-generator''': 5_1_2,
'''google/electra-small-discriminator''': 5_1_2,
'''google/electra-base-discriminator''': 5_1_2,
'''google/electra-large-discriminator''': 5_1_2,
}
__SCREAMING_SNAKE_CASE : Dict = {
'''google/electra-small-generator''': {'''do_lower_case''': True},
'''google/electra-base-generator''': {'''do_lower_case''': True},
'''google/electra-large-generator''': {'''do_lower_case''': True},
'''google/electra-small-discriminator''': {'''do_lower_case''': True},
'''google/electra-base-discriminator''': {'''do_lower_case''': True},
'''google/electra-large-discriminator''': {'''do_lower_case''': True},
}
class lowerCamelCase_( A__ ):
'''simple docstring'''
lowercase__ : Optional[Any] = VOCAB_FILES_NAMES
lowercase__ : str = PRETRAINED_VOCAB_FILES_MAP
lowercase__ : Optional[int] = PRETRAINED_INIT_CONFIGURATION
lowercase__ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ : Dict = ElectraTokenizer
def __init__( self , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=True , lowerCamelCase__="[UNK]" , lowerCamelCase__="[SEP]" , lowerCamelCase__="[PAD]" , lowerCamelCase__="[CLS]" , lowerCamelCase__="[MASK]" , lowerCamelCase__=True , lowerCamelCase__=None , **lowerCamelCase__ , ):
super().__init__(
lowerCamelCase__ , tokenizer_file=lowerCamelCase__ , do_lower_case=lowerCamelCase__ , unk_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , tokenize_chinese_chars=lowerCamelCase__ , strip_accents=lowerCamelCase__ , **lowerCamelCase__ , )
_lowerCamelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , lowerCamelCase__ ) != do_lower_case
or normalizer_state.get('''strip_accents''' , lowerCamelCase__ ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , lowerCamelCase__ ) != tokenize_chinese_chars
):
_lowerCamelCase = getattr(lowerCamelCase__ , normalizer_state.pop('''type''' ) )
_lowerCamelCase = do_lower_case
_lowerCamelCase = strip_accents
_lowerCamelCase = tokenize_chinese_chars
_lowerCamelCase = normalizer_class(**lowerCamelCase__ )
_lowerCamelCase = do_lower_case
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__=None ):
_lowerCamelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ = None ):
_lowerCamelCase = [self.sep_token_id]
_lowerCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ = None ):
_lowerCamelCase = self._tokenizer.model.save(lowerCamelCase__ , name=lowerCamelCase__ )
return tuple(lowerCamelCase__ )
| 363
|
"""simple docstring"""
import argparse
from collections import defaultdict
def lowerCAmelCase_( lowercase_ : str , lowercase_ : Dict , lowercase_ : Tuple , lowercase_ : str , lowercase_ : str ) -> Optional[int]:
_lowerCamelCase = F"""{file}_{class_name}_{test_name}"""
done_test[_id] += 1
with open(lowercase_ , '''r''' ) as f:
_lowerCamelCase = f.readlines()
_lowerCamelCase = F"""class {class_name}("""
_lowerCamelCase = F"""{4 * " "}def {test_name}("""
_lowerCamelCase = F"""{8 * " "}{correct_line.split()[0]}"""
_lowerCamelCase = F"""{16 * " "}{correct_line.split()[0]}"""
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = 0
_lowerCamelCase = 0
_lowerCamelCase = []
for line in lines:
if line.startswith(lowercase_ ):
_lowerCamelCase = True
elif in_class and line.startswith(lowercase_ ):
_lowerCamelCase = True
elif in_class and in_func and (line.startswith(lowercase_ ) or line.startswith(lowercase_ )):
_lowerCamelCase = len(line.split(correct_line.split()[0] )[0] )
count += 1
if count == done_test[_id]:
_lowerCamelCase = True
if in_class and in_func and in_line:
if ")" not in line:
continue
else:
_lowerCamelCase = True
if in_class and in_func and in_line and insert_line:
new_lines.append(F"""{spaces * " "}{correct_line}""" )
_lowerCamelCase = _lowerCamelCase = _lowerCamelCase = _lowerCamelCase = False
else:
new_lines.append(lowercase_ )
with open(lowercase_ , '''w''' ) as f:
for line in new_lines:
f.write(lowercase_ )
def lowerCAmelCase_( lowercase_ : str , lowercase_ : Union[str, Any]=None ) -> Any:
if fail is not None:
with open(lowercase_ , '''r''' ) as f:
_lowerCamelCase = {l.strip() for l in f.readlines()}
else:
_lowerCamelCase = None
with open(lowercase_ , '''r''' ) as f:
_lowerCamelCase = f.readlines()
_lowerCamelCase = defaultdict(lowercase_ )
for line in correct_lines:
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = line.split(''';''' )
if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures:
overwrite_file(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Optional[Any] = argparse.ArgumentParser()
parser.add_argument('''--correct_filename''', help='''filename of tests with expected result''')
parser.add_argument('''--fail_filename''', help='''filename of test failures''', type=str, default=None)
__SCREAMING_SNAKE_CASE : Dict = parser.parse_args()
main(args.correct_filename, args.fail_filename)
| 73
| 0
|
import argparse
import torch
from datasets import load_dataset
from donut import DonutModel
from transformers import (
DonutImageProcessor,
DonutProcessor,
DonutSwinConfig,
DonutSwinModel,
MBartConfig,
MBartForCausalLM,
VisionEncoderDecoderModel,
XLMRobertaTokenizerFast,
)
def lowerCamelCase_ ( lowerCamelCase__ ):
lowerCamelCase_ = model.config
lowerCamelCase_ = DonutSwinConfig(
image_size=original_config.input_size , patch_size=4 , depths=original_config.encoder_layer , num_heads=[4, 8, 1_6, 3_2] , window_size=original_config.window_size , embed_dim=1_2_8 , )
lowerCamelCase_ = MBartConfig(
is_decoder=lowerCamelCase__ , is_encoder_decoder=lowerCamelCase__ , add_cross_attention=lowerCamelCase__ , decoder_layers=original_config.decoder_layer , max_position_embeddings=original_config.max_position_embeddings , vocab_size=len(
model.decoder.tokenizer ) , scale_embedding=lowerCamelCase__ , add_final_layer_norm=lowerCamelCase__ , )
return encoder_config, decoder_config
def lowerCamelCase_ ( lowerCamelCase__ ):
if "encoder.model" in name:
lowerCamelCase_ = name.replace("encoder.model" , "encoder" )
if "decoder.model" in name:
lowerCamelCase_ = name.replace("decoder.model" , "decoder" )
if "patch_embed.proj" in name:
lowerCamelCase_ = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
lowerCamelCase_ = name.replace("patch_embed.norm" , "embeddings.norm" )
if name.startswith("encoder" ):
if "layers" in name:
lowerCamelCase_ = "encoder." + name
if "attn.proj" in name:
lowerCamelCase_ = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name and "mask" not in name:
lowerCamelCase_ = name.replace("attn" , "attention.self" )
if "norm1" in name:
lowerCamelCase_ = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
lowerCamelCase_ = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
lowerCamelCase_ = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
lowerCamelCase_ = name.replace("mlp.fc2" , "output.dense" )
if name == "encoder.norm.weight":
lowerCamelCase_ = "encoder.layernorm.weight"
if name == "encoder.norm.bias":
lowerCamelCase_ = "encoder.layernorm.bias"
return name
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
for key in orig_state_dict.copy().keys():
lowerCamelCase_ = orig_state_dict.pop(lowerCamelCase__ )
if "qkv" in key:
lowerCamelCase_ = key.split("." )
lowerCamelCase_ = int(key_split[3] )
lowerCamelCase_ = int(key_split[5] )
lowerCamelCase_ = model.encoder.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
lowerCamelCase_ = val[:dim, :]
lowerCamelCase_ = val[dim : dim * 2, :]
lowerCamelCase_ = val[-dim:, :]
else:
lowerCamelCase_ = val[:dim]
lowerCamelCase_ = val[dim : dim * 2]
lowerCamelCase_ = val[-dim:]
elif "attn_mask" in key or key in ["encoder.model.norm.weight", "encoder.model.norm.bias"]:
# HuggingFace implementation doesn't use attn_mask buffer
# and model doesn't use final LayerNorms for the encoder
pass
else:
lowerCamelCase_ = val
return orig_state_dict
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=False ):
# load original model
lowerCamelCase_ = DonutModel.from_pretrained(lowerCamelCase__ ).eval()
# load HuggingFace model
lowerCamelCase_ , lowerCamelCase_ = get_configs(lowerCamelCase__ )
lowerCamelCase_ = DonutSwinModel(lowerCamelCase__ )
lowerCamelCase_ = MBartForCausalLM(lowerCamelCase__ )
lowerCamelCase_ = VisionEncoderDecoderModel(encoder=lowerCamelCase__ , decoder=lowerCamelCase__ )
model.eval()
lowerCamelCase_ = original_model.state_dict()
lowerCamelCase_ = convert_state_dict(lowerCamelCase__ , lowerCamelCase__ )
model.load_state_dict(lowerCamelCase__ )
# verify results on scanned document
lowerCamelCase_ = load_dataset("hf-internal-testing/example-documents" )
lowerCamelCase_ = dataset["test"][0]["image"].convert("RGB" )
lowerCamelCase_ = XLMRobertaTokenizerFast.from_pretrained(lowerCamelCase__ , from_slow=lowerCamelCase__ )
lowerCamelCase_ = DonutImageProcessor(
do_align_long_axis=original_model.config.align_long_axis , size=original_model.config.input_size[::-1] )
lowerCamelCase_ = DonutProcessor(lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase_ = processor(lowerCamelCase__ , return_tensors="pt" ).pixel_values
if model_name == "naver-clova-ix/donut-base-finetuned-docvqa":
lowerCamelCase_ = "<s_docvqa><s_question>{user_input}</s_question><s_answer>"
lowerCamelCase_ = "When is the coffee break?"
lowerCamelCase_ = task_prompt.replace("{user_input}" , lowerCamelCase__ )
elif model_name == "naver-clova-ix/donut-base-finetuned-rvlcdip":
lowerCamelCase_ = "<s_rvlcdip>"
elif model_name in [
"naver-clova-ix/donut-base-finetuned-cord-v1",
"naver-clova-ix/donut-base-finetuned-cord-v1-2560",
]:
lowerCamelCase_ = "<s_cord>"
elif model_name == "naver-clova-ix/donut-base-finetuned-cord-v2":
lowerCamelCase_ = "s_cord-v2>"
elif model_name == "naver-clova-ix/donut-base-finetuned-zhtrainticket":
lowerCamelCase_ = "<s_zhtrainticket>"
elif model_name in ["naver-clova-ix/donut-proto", "naver-clova-ix/donut-base"]:
# use a random prompt
lowerCamelCase_ = "hello world"
else:
raise ValueError("Model name not supported" )
lowerCamelCase_ = original_model.decoder.tokenizer(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , return_tensors="pt" )[
"input_ids"
]
lowerCamelCase_ = original_model.encoder.model.patch_embed(lowerCamelCase__ )
lowerCamelCase_ , lowerCamelCase_ = model.encoder.embeddings(lowerCamelCase__ )
assert torch.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-3 )
# verify encoder hidden states
lowerCamelCase_ = original_model.encoder(lowerCamelCase__ )
lowerCamelCase_ = model.encoder(lowerCamelCase__ ).last_hidden_state
assert torch.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-2 )
# verify decoder hidden states
lowerCamelCase_ = original_model(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ).logits
lowerCamelCase_ = model(lowerCamelCase__ , decoder_input_ids=lowerCamelCase__ ).logits
assert torch.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-3 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(F'Saving model and processor to {pytorch_dump_folder_path}' )
model.save_pretrained(lowerCamelCase__ )
processor.save_pretrained(lowerCamelCase__ )
if push_to_hub:
model.push_to_hub("nielsr/" + model_name.split("/" )[-1] , commit_message="Update model" )
processor.push_to_hub("nielsr/" + model_name.split("/" )[-1] , commit_message="Update model" )
if __name__ == "__main__":
__A =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''naver-clova-ix/donut-base-finetuned-docvqa''',
required=False,
type=str,
help='''Name of the original model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
required=False,
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether or not to push the converted model and processor to the 🤗 hub.''',
)
__A =parser.parse_args()
convert_donut_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 19
|
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class __lowerCAmelCase :
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ ) -> Dict:
'''simple docstring'''
return None
class __lowerCAmelCase :
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> List[str]:
'''simple docstring'''
return None
class __lowerCAmelCase ( unittest.TestCase ):
lowerCamelCase_ : Dict = [
# (model_name, model_kwargs)
('''bert-base-cased''', {}),
('''gpt2''', {'''use_cache''': False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def lowerCamelCase (self ) -> Union[str, Any]:
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(__magic_name__ , '''tf''' , 12 , **__magic_name__ )
@require_torch
@slow
def lowerCamelCase (self ) -> int:
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(__magic_name__ , '''pt''' , 12 , **__magic_name__ )
@require_torch
@slow
def lowerCamelCase (self ) -> int:
'''simple docstring'''
from transformers import BertModel
snake_case_ : str = ['''[UNK]''', '''[SEP]''', '''[CLS]''', '''[PAD]''', '''[MASK]''', '''some''', '''other''', '''words''']
with NamedTemporaryFile(mode='''w+t''' ) as vocab_file:
vocab_file.write('''\n'''.join(__magic_name__ ) )
vocab_file.flush()
snake_case_ : Optional[Any] = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
snake_case_ : str = BertModel(BertConfig(vocab_size=len(__magic_name__ ) ) )
model.save_pretrained(__magic_name__ )
self._test_export(__magic_name__ , '''pt''' , 12 , __magic_name__ )
@require_tf
@slow
def lowerCamelCase (self ) -> Tuple:
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
snake_case_ : Tuple = self._test_export(__magic_name__ , '''tf''' , 12 , **__magic_name__ )
snake_case_ : List[str] = quantize(Path(__magic_name__ ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(__magic_name__ ).stat().st_size:
self.fail('''Quantized model is bigger than initial ONNX model''' )
@require_torch
@slow
def lowerCamelCase (self ) -> Any:
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
snake_case_ : Any = self._test_export(__magic_name__ , '''pt''' , 12 , **__magic_name__ )
snake_case_ : Any = quantize(__magic_name__ )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(__magic_name__ ).stat().st_size:
self.fail('''Quantized model is bigger than initial ONNX model''' )
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=None , **__magic_name__ ) -> Tuple:
'''simple docstring'''
try:
# Compute path
with TemporaryDirectory() as tempdir:
snake_case_ : List[str] = Path(__magic_name__ ).joinpath('''model.onnx''' )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ )
return path
except Exception as e:
self.fail(__magic_name__ )
@require_torch
@require_tokenizers
@slow
def lowerCamelCase (self ) -> Optional[Any]:
'''simple docstring'''
from transformers import BertModel
snake_case_ : Optional[Any] = BertModel(BertConfig.from_pretrained('''lysandre/tiny-bert-random''' ) )
snake_case_ : int = BertTokenizerFast.from_pretrained('''lysandre/tiny-bert-random''' )
self._test_infer_dynamic_axis(__magic_name__ , __magic_name__ , '''pt''' )
@require_tf
@require_tokenizers
@slow
def lowerCamelCase (self ) -> List[str]:
'''simple docstring'''
from transformers import TFBertModel
snake_case_ : Any = TFBertModel(BertConfig.from_pretrained('''lysandre/tiny-bert-random''' ) )
snake_case_ : str = BertTokenizerFast.from_pretrained('''lysandre/tiny-bert-random''' )
self._test_infer_dynamic_axis(__magic_name__ , __magic_name__ , '''tf''' )
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ ) -> int:
'''simple docstring'''
snake_case_ : Tuple = FeatureExtractionPipeline(__magic_name__ , __magic_name__ )
snake_case_ : Optional[int] = ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''output_0''', '''output_1''']
snake_case_ , snake_case_ , snake_case_ , snake_case_ : Optional[int] = infer_shapes(__magic_name__ , __magic_name__ )
# Assert all variables are present
self.assertEqual(len(__magic_name__ ) , len(__magic_name__ ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3] , __magic_name__ )
self.assertSequenceEqual(variable_names[3:] , __magic_name__ )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name] , {0: '''batch''', 1: '''sequence'''} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes['''output_0'''] , {0: '''batch''', 1: '''sequence'''} )
self.assertDictEqual(shapes['''output_1'''] , {0: '''batch'''} )
def lowerCamelCase (self ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Tuple = ['''input_ids''', '''attention_mask''', '''token_type_ids''']
snake_case_ : List[str] = {'''input_ids''': [1, 2, 3, 4], '''attention_mask''': [0, 0, 0, 0], '''token_type_ids''': [1, 1, 1, 1]}
snake_case_ , snake_case_ : Tuple = ensure_valid_input(FuncContiguousArgs() , __magic_name__ , __magic_name__ )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(__magic_name__ ) , 3 )
# Should have exactly the same input names
self.assertEqual(set(__magic_name__ ) , set(__magic_name__ ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(__magic_name__ , (tokens['''input_ids'''], tokens['''token_type_ids'''], tokens['''attention_mask''']) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
snake_case_ , snake_case_ : Dict = ensure_valid_input(FuncNonContiguousArgs() , __magic_name__ , __magic_name__ )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(__magic_name__ ) , 1 )
self.assertEqual(len(__magic_name__ ) , 1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0] , tokens['''input_ids'''] )
self.assertEqual(ordered_input_names[0] , '''input_ids''' )
def lowerCamelCase (self ) -> Any:
'''simple docstring'''
snake_case_ : Optional[int] = generate_identified_filename(Path('''/home/something/my_fake_model.onnx''' ) , '''-test''' )
self.assertEqual('''/home/something/my_fake_model-test.onnx''' , generated.as_posix() )
| 279
| 0
|
"""simple docstring"""
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def lowercase ( ) -> Dict:
__a = HfArgumentParser(lowerCAmelCase__ )
__a = parser.parse_args_into_dataclasses()[0]
__a = TensorFlowBenchmark(args=lowerCAmelCase__ )
try:
__a = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
__a = '''Arg --no_{0} is no longer used, please use --no-{0} instead.'''
__a = ''' '''.join(str(lowerCAmelCase__ ).split(''' ''' )[:-1] )
__a = ''''''
__a = eval(str(lowerCAmelCase__ ).split(''' ''' )[-1] )
__a = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(lowerCAmelCase__ )
if len(lowerCAmelCase__ ) > 0:
__a = full_error_msg + begin_error_msg + str(lowerCAmelCase__ )
raise ValueError(lowerCAmelCase__ )
benchmark.run()
if __name__ == "__main__":
main()
| 11
|
"""simple docstring"""
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def lowercase ( lowerCAmelCase__ : Dict ) -> Optional[int]:
__a , __a = image.size
__a , __a = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
__a = image.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] )
__a = np.array(lowerCAmelCase__ ).astype(np.floataa ) / 2_55.0
__a = image[None].transpose(0 , 3 , 1 , 2 )
__a = torch.from_numpy(lowerCAmelCase__ )
return 2.0 * image - 1.0
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , _a , _a , _a , ):
super().__init__()
self.register_modules(vqvae=_a , unet=_a , scheduler=_a )
@torch.no_grad()
def __call__( self , _a = None , _a = 1 , _a = 100 , _a = 0.0 , _a = None , _a = "pil" , _a = True , ):
if isinstance(_a , PIL.Image.Image ):
__a = 1
elif isinstance(_a , torch.Tensor ):
__a = image.shape[0]
else:
raise ValueError(f'''`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(_a )}''' )
if isinstance(_a , PIL.Image.Image ):
__a = preprocess(_a )
__a , __a = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
__a = (batch_size, self.unet.config.in_channels // 2, height, width)
__a = next(self.unet.parameters() ).dtype
__a = randn_tensor(_a , generator=_a , device=self.device , dtype=_a )
__a = image.to(device=self.device , dtype=_a )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(_a , device=self.device )
__a = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
__a = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__a = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
__a = {}
if accepts_eta:
__a = eta
for t in self.progress_bar(_a ):
# concat latents and low resolution image in the channel dimension.
__a = torch.cat([latents, image] , dim=1 )
__a = self.scheduler.scale_model_input(_a , _a )
# predict the noise residual
__a = self.unet(_a , _a ).sample
# compute the previous noisy sample x_t -> x_t-1
__a = self.scheduler.step(_a , _a , _a , **_a ).prev_sample
# decode the image latents with the VQVAE
__a = self.vqvae.decode(_a ).sample
__a = torch.clamp(_a , -1.0 , 1.0 )
__a = image / 2 + 0.5
__a = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__a = self.numpy_to_pil(_a )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_a )
| 11
| 1
|
from math import factorial
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
if successes > trials:
raise ValueError("""successes must be lower or equal to trials""" )
if trials < 0 or successes < 0:
raise ValueError("""the function is defined for non-negative integers""" )
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise ValueError("""the function is defined for non-negative integers""" )
if not 0 < prob < 1:
raise ValueError("""prob has to be in range of 1 - 0""" )
SCREAMING_SNAKE_CASE = (prob**successes) * ((1 - prob) ** (trials - successes))
# Calculate the binomial coefficient: n! / k!(n-k)!
SCREAMING_SNAKE_CASE = float(factorial(_SCREAMING_SNAKE_CASE ) )
coefficient /= factorial(_SCREAMING_SNAKE_CASE ) * factorial(trials - successes )
return probability * coefficient
if __name__ == "__main__":
from doctest import testmod
testmod()
print("""Probability of 2 successes out of 4 trails""")
print("""with probability of 0.75 is:""", end=""" """)
print(binomial_distribution(2, 4, 0.75))
| 296
|
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class A__ ( unittest.TestCase):
@slow
def UpperCamelCase__ ( self ):
lowerCamelCase : Union[str, Any] = TFCamembertModel.from_pretrained("""jplu/tf-camembert-base""" )
lowerCamelCase : Any = tf.convert_to_tensor(
[[5, 1_2_1, 1_1, 6_6_0, 1_6, 7_3_0, 2_5_5_4_3, 1_1_0, 8_3, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
lowerCamelCase : str = model(__magic_name__ )["""last_hidden_state"""]
lowerCamelCase : Union[str, Any] = tf.TensorShape((1, 1_0, 7_6_8) )
self.assertEqual(output.shape , __magic_name__ )
# compare the actual values for a slice.
lowerCamelCase : Dict = tf.convert_to_tensor(
[[[-0.0_254, 0.0_235, 0.1_027], [0.0_606, -0.1_811, -0.0_418], [-0.1_561, -0.1_127, 0.2_687]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 287
| 0
|
'''simple docstring'''
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase=7 , _lowerCamelCase=3 , _lowerCamelCase=18 , _lowerCamelCase=30 , _lowerCamelCase=400 , _lowerCamelCase=True , _lowerCamelCase=None , _lowerCamelCase=True , ) -> Tuple:
A_ : Tuple = size if size is not None else {"""height""": 18, """width""": 18}
A_ : Optional[int] = parent
A_ : List[Any] = batch_size
A_ : Optional[int] = num_channels
A_ : Union[str, Any] = image_size
A_ : Union[str, Any] = min_resolution
A_ : str = max_resolution
A_ : Optional[Any] = do_resize
A_ : Optional[Any] = size
A_ : List[str] = do_normalize
def UpperCAmelCase_ ( self ) -> Tuple:
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.8866_4436_3403_3203, 0.6618_8293_6954_4983, 0.3891_7464_0178_6804],
[-0.6042_5591_4688_1104, -0.0_2295_0088_6052_8469, 0.5423_7973_6900_3296],
] ),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class _lowerCAmelCase ( __A, unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = ImageGPTImageProcessor if is_vision_available() else None
def UpperCAmelCase_ ( self ) -> Optional[int]:
A_ : Tuple = ImageGPTImageProcessingTester(self )
@property
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase_ ( self ) -> Any:
A_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCamelCase , """clusters""" ) )
self.assertTrue(hasattr(_lowerCamelCase , """do_resize""" ) )
self.assertTrue(hasattr(_lowerCamelCase , """size""" ) )
self.assertTrue(hasattr(_lowerCamelCase , """do_normalize""" ) )
def UpperCAmelCase_ ( self ) -> List[Any]:
A_ : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18} )
A_ : str = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
A_ : str = self.image_processing_class(**self.image_processor_dict )
A_ : Tuple = json.loads(image_processor.to_json_string() )
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(_lowerCamelCase , obj[key] ) )
else:
self.assertEqual(obj[key] , _lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Optional[int]:
A_ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
A_ : List[str] = os.path.join(_lowerCamelCase , """image_processor.json""" )
image_processor_first.to_json_file(_lowerCamelCase )
A_ : Tuple = self.image_processing_class.from_json_file(_lowerCamelCase ).to_dict()
A_ : Tuple = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(_lowerCamelCase , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , _lowerCamelCase )
def UpperCAmelCase_ ( self ) -> int:
A_ : str = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(_lowerCamelCase )
A_ : Tuple = self.image_processing_class.from_pretrained(_lowerCamelCase ).to_dict()
A_ : Optional[Any] = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(_lowerCamelCase , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , _lowerCamelCase )
@unittest.skip("""ImageGPT requires clusters at initialization""" )
def UpperCAmelCase_ ( self ) -> Tuple:
pass
def UpperCAmelCase ( ) -> int:
"""simple docstring"""
A_ : int = load_dataset("""hf-internal-testing/fixtures_image_utils""" , split="""test""" )
A_ : Union[str, Any] = Image.open(dataset[4]["""file"""] )
A_ : Optional[int] = Image.open(dataset[5]["""file"""] )
A_ : Union[str, Any] = [imagea, imagea]
return images
@require_vision
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase_ ( self ) -> List[Any]:
A_ : int = ImageGPTImageProcessor.from_pretrained("""openai/imagegpt-small""" )
A_ : Optional[int] = prepare_images()
# test non-batched
A_ : Dict = image_processing(images[0] , return_tensors="""pt""" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (1, 1024) )
A_ : List[Any] = [306, 191, 191]
self.assertEqual(encoding.input_ids[0, :3].tolist() , _lowerCamelCase )
# test batched
A_ : List[str] = image_processing(_lowerCamelCase , return_tensors="""pt""" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (2, 1024) )
A_ : Dict = [303, 13, 13]
self.assertEqual(encoding.input_ids[1, -3:].tolist() , _lowerCamelCase )
| 164
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
UpperCamelCase__ : Any = {'configuration_speech_encoder_decoder': ['SpeechEncoderDecoderConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Union[str, Any] = ['SpeechEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : int = ['FlaxSpeechEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
else:
import sys
UpperCamelCase__ : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 164
| 1
|
"""simple docstring"""
def _snake_case ( lowercase__ : int = 1_0_0_0 ) -> int:
'''simple docstring'''
lowerCAmelCase_ , lowerCAmelCase_ :Optional[int] = 1, 1
lowerCAmelCase_ :Dict = 2
while True:
lowerCAmelCase_ :str = 0
lowerCAmelCase_ :str = fa + fa
lowerCAmelCase_ , lowerCAmelCase_ :Tuple = fa, f
index += 1
for _ in str(lowercase__ ):
i += 1
if i == n:
break
return index
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 84
|
"""simple docstring"""
def _snake_case ( lowercase__ : list[int] ) -> list[list[int]]:
'''simple docstring'''
lowerCAmelCase_ :Optional[Any] = []
if len(lowercase__ ) == 1:
return [nums.copy()]
for _ in range(len(lowercase__ ) ):
lowerCAmelCase_ :Optional[Any] = nums.pop(0 )
lowerCAmelCase_ :str = permute(lowercase__ )
for perm in permutations:
perm.append(lowercase__ )
result.extend(lowercase__ )
nums.append(lowercase__ )
return result
def _snake_case ( lowercase__ : Tuple ) -> List[str]:
'''simple docstring'''
def backtrack(lowercase__ : str ):
if start == len(lowercase__ ) - 1:
output.append(nums[:] )
else:
for i in range(lowercase__ , len(lowercase__ ) ):
lowerCAmelCase_ , lowerCAmelCase_ :str = nums[i], nums[start]
backtrack(start + 1 )
lowerCAmelCase_ , lowerCAmelCase_ :str = nums[i], nums[start] # backtrack
lowerCAmelCase_ :int = []
backtrack(0 )
return output
if __name__ == "__main__":
import doctest
# use res to print the data in permute2 function
__UpperCAmelCase = permutea([1, 2, 3])
print(res)
doctest.testmod()
| 84
| 1
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
_UpperCamelCase: Tuple = logging.get_logger(__name__)
if is_vision_available():
import PIL
class a__ ( SCREAMING_SNAKE_CASE__ ):
_lowerCamelCase = ['pixel_values']
def __init__( self : List[Any], lowerCAmelCase : bool = True, lowerCAmelCase : Dict[str, int] = None, lowerCAmelCase : PILImageResampling = PILImageResampling.BICUBIC, lowerCAmelCase : bool = True, lowerCAmelCase : Dict[str, int] = None, lowerCAmelCase : bool = True, lowerCAmelCase : Union[int, float] = 1 / 255, lowerCAmelCase : bool = True, lowerCAmelCase : Optional[Union[float, List[float]]] = None, lowerCAmelCase : Optional[Union[float, List[float]]] = None, lowerCAmelCase : bool = True, **lowerCAmelCase : Dict, ) -> None:
super().__init__(**lowerCAmelCase )
lowercase : Union[str, Any] = size if size is not None else {'shortest_edge': 224}
lowercase : Optional[int] = get_size_dict(lowerCAmelCase, default_to_square=lowerCAmelCase )
lowercase : Optional[Any] = crop_size if crop_size is not None else {'height': 224, 'width': 224}
lowercase : List[str] = get_size_dict(lowerCAmelCase, default_to_square=lowerCAmelCase, param_name='crop_size' )
lowercase : Union[str, Any] = do_resize
lowercase : Tuple = size
lowercase : Dict = resample
lowercase : List[Any] = do_center_crop
lowercase : Tuple = crop_size
lowercase : Optional[int] = do_rescale
lowercase : int = rescale_factor
lowercase : Dict = do_normalize
lowercase : Any = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
lowercase : Any = image_std if image_std is not None else OPENAI_CLIP_STD
lowercase : List[str] = do_convert_rgb
def lowercase ( self : str, lowerCAmelCase : np.ndarray, lowerCAmelCase : Dict[str, int], lowerCAmelCase : PILImageResampling = PILImageResampling.BICUBIC, lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None, **lowerCAmelCase : Dict, ) -> np.ndarray:
lowercase : str = get_size_dict(lowerCAmelCase, default_to_square=lowerCAmelCase )
if "shortest_edge" not in size:
raise ValueError(f'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
lowercase : List[str] = get_resize_output_image_size(lowerCAmelCase, size=size['shortest_edge'], default_to_square=lowerCAmelCase )
return resize(lowerCAmelCase, size=lowerCAmelCase, resample=lowerCAmelCase, data_format=lowerCAmelCase, **lowerCAmelCase )
def lowercase ( self : str, lowerCAmelCase : np.ndarray, lowerCAmelCase : Dict[str, int], lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None, **lowerCAmelCase : Optional[Any], ) -> np.ndarray:
lowercase : Dict = get_size_dict(lowerCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(lowerCAmelCase, size=(size['height'], size['width']), data_format=lowerCAmelCase, **lowerCAmelCase )
def lowercase ( self : List[str], lowerCAmelCase : np.ndarray, lowerCAmelCase : Union[int, float], lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None, **lowerCAmelCase : Optional[int], ) -> Union[str, Any]:
return rescale(lowerCAmelCase, scale=lowerCAmelCase, data_format=lowerCAmelCase, **lowerCAmelCase )
def lowercase ( self : Union[str, Any], lowerCAmelCase : np.ndarray, lowerCAmelCase : Union[float, List[float]], lowerCAmelCase : Union[float, List[float]], lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None, **lowerCAmelCase : Any, ) -> np.ndarray:
return normalize(lowerCAmelCase, mean=lowerCAmelCase, std=lowerCAmelCase, data_format=lowerCAmelCase, **lowerCAmelCase )
def lowercase ( self : Optional[Any], lowerCAmelCase : ImageInput, lowerCAmelCase : bool = None, lowerCAmelCase : Dict[str, int] = None, lowerCAmelCase : PILImageResampling = None, lowerCAmelCase : bool = None, lowerCAmelCase : int = None, lowerCAmelCase : bool = None, lowerCAmelCase : float = None, lowerCAmelCase : bool = None, lowerCAmelCase : Optional[Union[float, List[float]]] = None, lowerCAmelCase : Optional[Union[float, List[float]]] = None, lowerCAmelCase : bool = None, lowerCAmelCase : Optional[Union[str, TensorType]] = None, lowerCAmelCase : Optional[ChannelDimension] = ChannelDimension.FIRST, **lowerCAmelCase : Dict, ) -> PIL.Image.Image:
lowercase : Optional[Any] = do_resize if do_resize is not None else self.do_resize
lowercase : Optional[Any] = size if size is not None else self.size
lowercase : List[str] = get_size_dict(lowerCAmelCase, param_name='size', default_to_square=lowerCAmelCase )
lowercase : Dict = resample if resample is not None else self.resample
lowercase : Union[str, Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
lowercase : Dict = crop_size if crop_size is not None else self.crop_size
lowercase : str = get_size_dict(lowerCAmelCase, param_name='crop_size', default_to_square=lowerCAmelCase )
lowercase : Dict = do_rescale if do_rescale is not None else self.do_rescale
lowercase : Dict = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase : Any = do_normalize if do_normalize is not None else self.do_normalize
lowercase : Tuple = image_mean if image_mean is not None else self.image_mean
lowercase : Optional[int] = image_std if image_std is not None else self.image_std
lowercase : str = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
lowercase : List[Any] = make_list_of_images(lowerCAmelCase )
if not valid_images(lowerCAmelCase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
lowercase : Tuple = [convert_to_rgb(lowerCAmelCase ) for image in images]
# All transformations expect numpy arrays.
lowercase : Dict = [to_numpy_array(lowerCAmelCase ) for image in images]
if do_resize:
lowercase : Dict = [self.resize(image=lowerCAmelCase, size=lowerCAmelCase, resample=lowerCAmelCase ) for image in images]
if do_center_crop:
lowercase : List[Any] = [self.center_crop(image=lowerCAmelCase, size=lowerCAmelCase ) for image in images]
if do_rescale:
lowercase : Optional[int] = [self.rescale(image=lowerCAmelCase, scale=lowerCAmelCase ) for image in images]
if do_normalize:
lowercase : int = [self.normalize(image=lowerCAmelCase, mean=lowerCAmelCase, std=lowerCAmelCase ) for image in images]
lowercase : List[Any] = [to_channel_dimension_format(lowerCAmelCase, lowerCAmelCase ) for image in images]
lowercase : Optional[Any] = {'pixel_values': images}
return BatchFeature(data=lowerCAmelCase, tensor_type=lowerCAmelCase )
| 354
|
"""simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
_UpperCamelCase: Optional[int] = logging.get_logger(__name__)
_UpperCamelCase: Union[str, Any] = {
'EleutherAI/gpt-neo-1.3B': 'https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json',
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class a__ ( SCREAMING_SNAKE_CASE__ ):
_lowerCamelCase = 'gpt_neo'
_lowerCamelCase = ['past_key_values']
_lowerCamelCase = {'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers'}
def __init__( self : Optional[Any], lowerCAmelCase : int=50257, lowerCAmelCase : Tuple=2048, lowerCAmelCase : int=2048, lowerCAmelCase : Tuple=24, lowerCAmelCase : Optional[Any]=[[["global", "local"], 12]], lowerCAmelCase : Optional[int]=16, lowerCAmelCase : Optional[Any]=None, lowerCAmelCase : Dict=256, lowerCAmelCase : Optional[int]="gelu_new", lowerCAmelCase : Any=0.0, lowerCAmelCase : Dict=0.0, lowerCAmelCase : Optional[Any]=0.0, lowerCAmelCase : Dict=0.1, lowerCAmelCase : List[Any]=1e-5, lowerCAmelCase : Optional[Any]=0.02, lowerCAmelCase : Dict=True, lowerCAmelCase : int=50256, lowerCAmelCase : Optional[Any]=50256, **lowerCAmelCase : Any, ) -> Optional[Any]:
lowercase : List[Any] = vocab_size
lowercase : Optional[Any] = max_position_embeddings
lowercase : Dict = hidden_size
lowercase : Optional[Any] = num_layers
lowercase : str = num_heads
lowercase : Optional[int] = intermediate_size
lowercase : List[str] = window_size
lowercase : Dict = activation_function
lowercase : Dict = resid_dropout
lowercase : int = embed_dropout
lowercase : Optional[Any] = attention_dropout
lowercase : Tuple = classifier_dropout
lowercase : Optional[int] = layer_norm_epsilon
lowercase : Dict = initializer_range
lowercase : Optional[Any] = use_cache
lowercase : Union[str, Any] = bos_token_id
lowercase : int = eos_token_id
lowercase : str = attention_types
lowercase : int = self.expand_attention_types_params(lowerCAmelCase )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
'Configuration for convolutional module is incorrect. '
'It is required that `len(config.attention_layers)` == `config.num_layers` '
f'''but is `len(config.attention_layers) = {len(self.attention_layers )}`, '''
f'''`config.num_layers = {self.num_layers}`. '''
'`config.attention_layers` is prepared using `config.attention_types`. '
'Please verify the value of `config.attention_types` argument.' )
super().__init__(bos_token_id=lowerCAmelCase, eos_token_id=lowerCAmelCase, **lowerCAmelCase )
@staticmethod
def lowercase ( lowerCAmelCase : str ) -> Optional[Any]:
lowercase : Dict = []
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> int:
'''simple docstring'''
import torch
lowercase : Dict = input.size()
lowercase : Optional[int] = len(_UpperCAmelCase )
lowercase : str = shape[dimension]
lowercase : Optional[Any] = torch.arange(0 , _UpperCAmelCase , _UpperCAmelCase )
lowercase : List[str] = torch.div(sizedim - size , _UpperCAmelCase , rounding_mode='floor' ) + 1
lowercase : Any = torch.arange(_UpperCAmelCase ) + low_indices[:min_length][:, None]
lowercase : List[Any] = [slice(_UpperCAmelCase )] * rank
lowercase : int = indices
lowercase : Optional[Any] = input[s]
lowercase : str = list(range(0 , rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(_UpperCAmelCase )
def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase ) -> Any:
'''simple docstring'''
import torch
lowercase : int = torch.arange(1 , _UpperCAmelCase )
lowercase : List[str] = torch.remainder(_UpperCAmelCase , _UpperCAmelCase )
lowercase : Optional[int] = remainders == 0
lowercase : Tuple = candidates[divisor_indices]
lowercase : Any = torch.max(_UpperCAmelCase )
return largest_divisor, torch.div(_UpperCAmelCase , _UpperCAmelCase , rounding_mode='floor' )
class a__ ( SCREAMING_SNAKE_CASE__ ):
@property
def lowercase ( self : int ) -> Mapping[str, Mapping[int, str]]:
lowercase : str = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} )
if self.use_past:
self.fill_with_past_key_values_(lowerCAmelCase, direction='inputs' )
lowercase : Dict = {0: 'batch', 1: 'past_sequence + sequence'}
else:
lowercase : List[str] = {0: 'batch', 1: 'sequence'}
return common_inputs
@property
def lowercase ( self : int ) -> int:
return self._config.num_heads
def lowercase ( self : Tuple, lowerCAmelCase : PreTrainedTokenizer, lowerCAmelCase : int = -1, lowerCAmelCase : int = -1, lowerCAmelCase : bool = False, lowerCAmelCase : Optional[TensorType] = None, ) -> Mapping[str, Any]:
lowercase : Union[str, Any] = super(lowerCAmelCase, self ).generate_dummy_inputs(
lowerCAmelCase, batch_size=lowerCAmelCase, seq_length=lowerCAmelCase, is_pair=lowerCAmelCase, framework=lowerCAmelCase )
# We need to order the input in the way they appears in the forward()
lowercase : int = OrderedDict({'input_ids': common_inputs['input_ids']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
lowercase , lowercase : str = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
lowercase : Tuple = seqlen + 2
lowercase : Tuple = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
lowercase : Any = [
(torch.zeros(lowerCAmelCase ), torch.zeros(lowerCAmelCase )) for _ in range(self.num_layers )
]
lowercase : Optional[int] = common_inputs['attention_mask']
if self.use_past:
lowercase : Optional[int] = ordered_inputs['attention_mask'].dtype
lowercase : Dict = torch.cat(
[ordered_inputs['attention_mask'], torch.ones(lowerCAmelCase, lowerCAmelCase, dtype=lowerCAmelCase )], dim=1 )
return ordered_inputs
@property
def lowercase ( self : int ) -> int:
return 13
| 53
| 0
|
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
UpperCamelCase = pytest.mark.integration
@pytest.mark.parametrize("""path""" ,["""paws""", """csv"""] )
def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> Union[str, Any]:
"""simple docstring"""
inspect_dataset(lowerCamelCase__ ,lowerCamelCase__ )
_SCREAMING_SNAKE_CASE = path + '.py'
assert script_name in os.listdir(lowerCamelCase__ )
assert "__pycache__" not in os.listdir(lowerCamelCase__ )
@pytest.mark.filterwarnings("""ignore:inspect_metric is deprecated:FutureWarning""" )
@pytest.mark.filterwarnings("""ignore:metric_module_factory is deprecated:FutureWarning""" )
@pytest.mark.parametrize("""path""" ,["""accuracy"""] )
def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> int:
"""simple docstring"""
inspect_metric(lowerCamelCase__ ,lowerCamelCase__ )
_SCREAMING_SNAKE_CASE = path + '.py'
assert script_name in os.listdir(lowerCamelCase__ )
assert "__pycache__" not in os.listdir(lowerCamelCase__ )
@pytest.mark.parametrize(
"""path, config_name, expected_splits""" ,[
("""squad""", """plain_text""", ["""train""", """validation"""]),
("""dalle-mini/wit""", """dalle-mini--wit""", ["""train"""]),
("""paws""", """labeled_final""", ["""train""", """test""", """validation"""]),
] ,)
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = get_dataset_config_info(lowerCamelCase__ ,config_name=lowerCamelCase__ )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"""path, config_name, expected_exception""" ,[
("""paws""", None, ValueError),
] ,)
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ) -> Union[str, Any]:
"""simple docstring"""
with pytest.raises(lowerCamelCase__ ):
get_dataset_config_info(lowerCamelCase__ ,config_name=lowerCamelCase__ )
@pytest.mark.parametrize(
"""path, expected""" ,[
("""squad""", """plain_text"""),
("""acronym_identification""", """default"""),
("""lhoestq/squad""", """plain_text"""),
("""lhoestq/test""", """default"""),
("""lhoestq/demo1""", """lhoestq--demo1"""),
("""dalle-mini/wit""", """dalle-mini--wit"""),
] ,)
def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = get_dataset_config_names(lowerCamelCase__ )
assert expected in config_names
@pytest.mark.parametrize(
"""path, expected_configs, expected_splits_in_first_config""" ,[
("""squad""", ["""plain_text"""], ["""train""", """validation"""]),
("""dalle-mini/wit""", ["""dalle-mini--wit"""], ["""train"""]),
("""paws""", ["""labeled_final""", """labeled_swap""", """unlabeled_final"""], ["""train""", """test""", """validation"""]),
] ,)
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = get_dataset_infos(lowerCamelCase__ )
assert list(infos.keys() ) == expected_configs
_SCREAMING_SNAKE_CASE = expected_configs[0]
assert expected_config in infos
_SCREAMING_SNAKE_CASE = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
"""path, expected_config, expected_splits""" ,[
("""squad""", """plain_text""", ["""train""", """validation"""]),
("""dalle-mini/wit""", """dalle-mini--wit""", ["""train"""]),
("""paws""", """labeled_final""", ["""train""", """test""", """validation"""]),
] ,)
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = get_dataset_infos(lowerCamelCase__ )
assert expected_config in infos
_SCREAMING_SNAKE_CASE = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"""path, config_name, expected_exception""" ,[
("""paws""", None, ValueError),
] ,)
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ) -> Optional[Any]:
"""simple docstring"""
with pytest.raises(lowerCamelCase__ ):
get_dataset_split_names(lowerCamelCase__ ,config_name=lowerCamelCase__ )
| 306
|
from __future__ import annotations
import time
a =list[tuple[int, int]]
a =[
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
a =[[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
class A_ :
def __init__( self : List[str] ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : Node | None):
__lowerCamelCase : Tuple = pos_x
__lowerCamelCase : List[str] = pos_y
__lowerCamelCase : str = (pos_y, pos_x)
__lowerCamelCase : str = goal_x
__lowerCamelCase : int = goal_y
__lowerCamelCase : List[Any] = parent
class A_ :
def __init__( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : tuple[int, int] ,SCREAMING_SNAKE_CASE__ : tuple[int, int]):
__lowerCamelCase : Any = Node(start[1] ,start[0] ,goal[1] ,goal[0] ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = Node(goal[1] ,goal[0] ,goal[1] ,goal[0] ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Union[str, Any] = [self.start]
__lowerCamelCase : List[str] = False
def lowerCAmelCase ( self : List[Any]):
while self.node_queue:
__lowerCamelCase : Any = self.node_queue.pop(0)
if current_node.pos == self.target.pos:
__lowerCamelCase : Dict = True
return self.retrace_path(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Tuple = self.get_successors(SCREAMING_SNAKE_CASE__)
for node in successors:
self.node_queue.append(SCREAMING_SNAKE_CASE__)
if not self.reached:
return [self.start.pos]
return None
def lowerCAmelCase ( self : str ,SCREAMING_SNAKE_CASE__ : Node):
__lowerCamelCase : Union[str, Any] = []
for action in delta:
__lowerCamelCase : Optional[Any] = parent.pos_x + action[1]
__lowerCamelCase : Optional[int] = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0]) - 1 and 0 <= pos_y <= len(SCREAMING_SNAKE_CASE__) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,self.target.pos_y ,self.target.pos_x ,SCREAMING_SNAKE_CASE__))
return successors
def lowerCAmelCase ( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : Node | None):
__lowerCamelCase : List[Any] = node
__lowerCamelCase : int = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x))
__lowerCamelCase : int = current_node.parent
path.reverse()
return path
class A_ :
def __init__( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : int):
__lowerCamelCase : int = BreadthFirstSearch(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = BreadthFirstSearch(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = False
def lowerCAmelCase ( self : str):
while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue:
__lowerCamelCase : Any = self.fwd_bfs.node_queue.pop(0)
__lowerCamelCase : Any = self.bwd_bfs.node_queue.pop(0)
if current_bwd_node.pos == current_fwd_node.pos:
__lowerCamelCase : List[str] = True
return self.retrace_bidirectional_path(
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = current_bwd_node
__lowerCamelCase : int = current_fwd_node
__lowerCamelCase : str = {
self.fwd_bfs: self.fwd_bfs.get_successors(SCREAMING_SNAKE_CASE__),
self.bwd_bfs: self.bwd_bfs.get_successors(SCREAMING_SNAKE_CASE__),
}
for bfs in [self.fwd_bfs, self.bwd_bfs]:
for node in successors[bfs]:
bfs.node_queue.append(SCREAMING_SNAKE_CASE__)
if not self.reached:
return [self.fwd_bfs.start.pos]
return None
def lowerCAmelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : Node ,SCREAMING_SNAKE_CASE__ : Node):
__lowerCamelCase : List[Any] = self.fwd_bfs.retrace_path(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : str = self.bwd_bfs.retrace_path(SCREAMING_SNAKE_CASE__)
bwd_path.pop()
bwd_path.reverse()
__lowerCamelCase : List[Any] = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
import doctest
doctest.testmod()
a =(0, 0)
a =(len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
a =time.time()
a =BreadthFirstSearch(init, goal)
a =bfs.search()
a =time.time() - start_bfs_time
print("""Unidirectional BFS computation time : """, bfs_time)
a =time.time()
a =BidirectionalBreadthFirstSearch(init, goal)
a =bd_bfs.search()
a =time.time() - start_bd_bfs_time
print("""Bidirectional BFS computation time : """, bd_bfs_time)
| 73
| 0
|
import itertools
import random
import unittest
import numpy as np
from transformers import WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, WavaVecaConfig, WavaVecaFeatureExtractor
from transformers.testing_utils import require_torch, slow
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
UpperCAmelCase__ = random.Random()
def A ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Tuple=1.0 , _UpperCAmelCase : Optional[int]=None , _UpperCAmelCase : int=None ) -> Optional[int]:
'''simple docstring'''
if rng is None:
_UpperCAmelCase = global_rng
_UpperCAmelCase = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class __lowerCAmelCase ( unittest.TestCase ):
def __init__( self : Union[str, Any] , A : Optional[Any] , A : Any=7 , A : str=4_00 , A : List[Any]=20_00 , A : List[Any]=1 , A : Any=0.0 , A : Tuple=1_60_00 , A : Optional[int]=True , A : str=True , ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = min_seq_length
_UpperCAmelCase = max_seq_length
_UpperCAmelCase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_UpperCAmelCase = feature_size
_UpperCAmelCase = padding_value
_UpperCAmelCase = sampling_rate
_UpperCAmelCase = return_attention_mask
_UpperCAmelCase = do_normalize
def _lowerCamelCase ( self : int) -> List[Any]:
"""simple docstring"""
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def _lowerCamelCase ( self : str , A : Optional[Any]=False , A : Any=False) -> Optional[Any]:
"""simple docstring"""
def _flatten(A : Tuple):
return list(itertools.chain(*A))
if equal_length:
_UpperCAmelCase = floats_list((self.batch_size, self.max_seq_length))
else:
# make sure that inputs increase in size
_UpperCAmelCase = [
_flatten(floats_list((x, self.feature_size)))
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff)
]
if numpify:
_UpperCAmelCase = [np.asarray(A) for x in speech_inputs]
return speech_inputs
class __lowerCAmelCase ( A , unittest.TestCase ):
UpperCamelCase = WavaVecaFeatureExtractor
def _lowerCamelCase ( self : List[str]) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = WavaVecaFeatureExtractionTester(self)
def _lowerCamelCase ( self : str , A : str) -> Optional[int]:
"""simple docstring"""
self.assertTrue(np.all(np.mean(A , axis=0) < 1E-3))
self.assertTrue(np.all(np.abs(np.var(A , axis=0) - 1) < 1E-3))
def _lowerCamelCase ( self : List[Any]) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
# create three inputs of length 800, 1000, and 1200
_UpperCAmelCase = [floats_list((1, x))[0] for x in range(8_00 , 14_00 , 2_00)]
_UpperCAmelCase = [np.asarray(A) for speech_input in speech_inputs]
# Test not batched input
_UpperCAmelCase = feat_extract(speech_inputs[0] , return_tensors='np').input_values
_UpperCAmelCase = feat_extract(np_speech_inputs[0] , return_tensors='np').input_values
self.assertTrue(np.allclose(A , A , atol=1E-3))
# Test batched
_UpperCAmelCase = feat_extract(A , return_tensors='np').input_values
_UpperCAmelCase = feat_extract(A , return_tensors='np').input_values
for enc_seq_a, enc_seq_a in zip(A , A):
self.assertTrue(np.allclose(A , A , atol=1E-3))
# Test 2-D numpy arrays are batched.
_UpperCAmelCase = [floats_list((1, x))[0] for x in (8_00, 8_00, 8_00)]
_UpperCAmelCase = np.asarray(A)
_UpperCAmelCase = feat_extract(A , return_tensors='np').input_values
_UpperCAmelCase = feat_extract(A , return_tensors='np').input_values
for enc_seq_a, enc_seq_a in zip(A , A):
self.assertTrue(np.allclose(A , A , atol=1E-3))
def _lowerCamelCase ( self : str) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
_UpperCAmelCase = [floats_list((1, x))[0] for x in range(8_00 , 14_00 , 2_00)]
_UpperCAmelCase = ['longest', 'max_length', 'do_not_pad']
_UpperCAmelCase = [None, 16_00, None]
for max_length, padding in zip(A , A):
_UpperCAmelCase = feat_extract(A , padding=A , max_length=A , return_tensors='np')
_UpperCAmelCase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_00])
self.assertTrue(input_values[0][8_00:].sum() < 1E-6)
self._check_zero_mean_unit_variance(input_values[1][:10_00])
self.assertTrue(input_values[0][10_00:].sum() < 1E-6)
self._check_zero_mean_unit_variance(input_values[2][:12_00])
def _lowerCamelCase ( self : Dict) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
_UpperCAmelCase = range(8_00 , 14_00 , 2_00)
_UpperCAmelCase = [floats_list((1, x))[0] for x in lengths]
_UpperCAmelCase = ['longest', 'max_length', 'do_not_pad']
_UpperCAmelCase = [None, 16_00, None]
for max_length, padding in zip(A , A):
_UpperCAmelCase = feat_extract(A , max_length=A , padding=A)
_UpperCAmelCase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_00])
self._check_zero_mean_unit_variance(input_values[1][:10_00])
self._check_zero_mean_unit_variance(input_values[2][:12_00])
def _lowerCamelCase ( self : Optional[Any]) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
_UpperCAmelCase = [floats_list((1, x))[0] for x in range(8_00 , 14_00 , 2_00)]
_UpperCAmelCase = feat_extract(
A , truncation=A , max_length=10_00 , padding='max_length' , return_tensors='np')
_UpperCAmelCase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_00])
self._check_zero_mean_unit_variance(input_values[1])
self._check_zero_mean_unit_variance(input_values[2])
def _lowerCamelCase ( self : str) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
_UpperCAmelCase = [floats_list((1, x))[0] for x in range(8_00 , 14_00 , 2_00)]
_UpperCAmelCase = feat_extract(
A , truncation=A , max_length=10_00 , padding='longest' , return_tensors='np')
_UpperCAmelCase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_00])
self._check_zero_mean_unit_variance(input_values[1, :10_00])
self._check_zero_mean_unit_variance(input_values[2])
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 10_00))
_UpperCAmelCase = [floats_list((1, x))[0] for x in range(8_00 , 14_00 , 2_00)]
_UpperCAmelCase = feat_extract(
A , truncation=A , max_length=20_00 , padding='longest' , return_tensors='np')
_UpperCAmelCase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_00])
self._check_zero_mean_unit_variance(input_values[1, :10_00])
self._check_zero_mean_unit_variance(input_values[2])
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 12_00))
@require_torch
def _lowerCamelCase ( self : Tuple) -> int:
"""simple docstring"""
import torch
_UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
_UpperCAmelCase = np.random.rand(1_00).astype(np.floataa)
_UpperCAmelCase = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
_UpperCAmelCase = feature_extractor.pad([{'input_values': inputs}] , return_tensors='np')
self.assertTrue(np_processed.input_values.dtype == np.floataa)
_UpperCAmelCase = feature_extractor.pad([{'input_values': inputs}] , return_tensors='pt')
self.assertTrue(pt_processed.input_values.dtype == torch.floataa)
@slow
@require_torch
def _lowerCamelCase ( self : Optional[int]) -> Tuple:
"""simple docstring"""
for model_id in WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST:
_UpperCAmelCase = WavaVecaConfig.from_pretrained(A)
_UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained(A)
# only "layer" feature extraction norm should make use of
# attention_mask
self.assertEqual(feat_extract.return_attention_mask , config.feat_extract_norm == 'layer')
| 290
|
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('''TEST_SAGEMAKER''' , '''False''' ) ) is not True , reason='''Skipping test because should only be run when releasing minor transformers version''' , )
@pytest.mark.usefixtures('''sm_env''' )
@parameterized_class(
[
{
'''framework''': '''pytorch''',
'''script''': '''run_glue.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.g4dn.xlarge''',
'''results''': {'''train_runtime''': 6_5_0, '''eval_accuracy''': 0.6, '''eval_loss''': 0.9},
},
{
'''framework''': '''tensorflow''',
'''script''': '''run_tf.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.g4dn.xlarge''',
'''results''': {'''train_runtime''': 6_0_0, '''eval_accuracy''': 0.3, '''eval_loss''': 0.9},
},
] )
class __lowerCAmelCase ( unittest.TestCase ):
def _lowerCamelCase ( self : List[Any]) -> Any:
"""simple docstring"""
if self.framework == "pytorch":
subprocess.run(
F"cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py".split() , encoding='utf-8' , check=A , )
assert hasattr(self , 'env')
def _lowerCamelCase ( self : Any , A : Tuple=1) -> List[str]:
"""simple docstring"""
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=F"{self.env.base_job_name}-single" , instance_count=A , instance_type=self.instance_type , debugger_hook_config=A , hyperparameters={**self.env.hyperparameters, 'model_name_or_path': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version='py36' , )
def _lowerCamelCase ( self : Dict , A : int) -> str:
"""simple docstring"""
TrainingJobAnalytics(A).export_csv(F"{self.env.test_path}/{job_name}_metrics.csv")
def _lowerCamelCase ( self : Union[str, Any]) -> Dict:
"""simple docstring"""
_UpperCAmelCase = self.create_estimator()
# run training
estimator.fit()
# result dataframe
_UpperCAmelCase = TrainingJobAnalytics(estimator.latest_training_job.name).dataframe()
# extract kpis
_UpperCAmelCase = list(result_metrics_df[result_metrics_df.metric_name == 'eval_accuracy']['value'])
_UpperCAmelCase = list(result_metrics_df[result_metrics_df.metric_name == 'eval_loss']['value'])
# get train time from SageMaker job, this includes starting, preprocessing, stopping
_UpperCAmelCase = (
Session().describe_training_job(estimator.latest_training_job.name).get('TrainingTimeInSeconds' , 99_99_99)
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['eval_accuracy'] for t in eval_accuracy)
assert all(t <= self.results['eval_loss'] for t in eval_loss)
# dump tests result into json file to share in PR
with open(F"{estimator.latest_training_job.name}.json" , 'w') as outfile:
json.dump({'train_time': train_runtime, 'eval_accuracy': eval_accuracy, 'eval_loss': eval_loss} , A)
| 290
| 1
|
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def _UpperCAmelCase ():
_A : int = HfArgumentParser(UpperCamelCase__ )
_A : Tuple = parser.parse_args_into_dataclasses()[0]
_A : Optional[Any] = TensorFlowBenchmark(args=UpperCamelCase__ )
try:
_A : str = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
_A : int = "Arg --no_{0} is no longer used, please use --no-{0} instead."
_A : List[str] = " ".join(str(UpperCamelCase__ ).split(" " )[:-1] )
_A : Tuple = ""
_A : List[str] = eval(str(UpperCamelCase__ ).split(" " )[-1] )
_A : Union[str, Any] = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(UpperCamelCase__ )
if len(UpperCamelCase__ ) > 0:
_A : List[str] = full_error_msg + begin_error_msg + str(UpperCamelCase__ )
raise ValueError(UpperCamelCase__ )
benchmark.run()
if __name__ == "__main__":
main()
| 11
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
lowerCAmelCase__ = None
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
lowerCAmelCase__ = {
'vocab_file': {
'facebook/mbart-large-en-ro': (
'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model'
),
'facebook/mbart-large-cc25': (
'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model'
),
},
'tokenizer_file': {
'facebook/mbart-large-en-ro': 'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json',
'facebook/mbart-large-cc25': 'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json',
},
}
lowerCAmelCase__ = {
'facebook/mbart-large-en-ro': 10_24,
'facebook/mbart-large-cc25': 10_24,
}
# fmt: off
lowerCAmelCase__ = ['ar_AR', 'cs_CZ', 'de_DE', 'en_XX', 'es_XX', 'et_EE', 'fi_FI', 'fr_XX', 'gu_IN', 'hi_IN', 'it_IT', 'ja_XX', 'kk_KZ', 'ko_KR', 'lt_LT', 'lv_LV', 'my_MM', 'ne_NP', 'nl_XX', 'ro_RO', 'ru_RU', 'si_LK', 'tr_TR', 'vi_VN', 'zh_CN']
class lowerCAmelCase__ ( a):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE = ["input_ids", "attention_mask"]
__SCREAMING_SNAKE_CASE = MBartTokenizer
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = []
def __init__( self , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase="<s>" , __lowerCamelCase="</s>" , __lowerCamelCase="</s>" , __lowerCamelCase="<s>" , __lowerCamelCase="<unk>" , __lowerCamelCase="<pad>" , __lowerCamelCase="<mask>" , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , **__lowerCamelCase , ) -> Optional[int]:
# Mask token behave like a normal word, i.e. include the space before it
_A : List[str] = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase) if isinstance(__lowerCamelCase , __lowerCamelCase) else mask_token
super().__init__(
vocab_file=__lowerCamelCase , tokenizer_file=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , unk_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , src_lang=__lowerCamelCase , tgt_lang=__lowerCamelCase , additional_special_tokens=__lowerCamelCase , **__lowerCamelCase , )
_A : Union[str, Any] = vocab_file
_A : int = False if not self.vocab_file else True
_A : Optional[int] = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens])
self.add_special_tokens({"additional_special_tokens": _additional_special_tokens})
_A : Union[str, Any] = {
lang_code: self.convert_tokens_to_ids(__lowerCamelCase) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
_A : Optional[int] = src_lang if src_lang is not None else "en_XX"
_A : Union[str, Any] = self.convert_tokens_to_ids(self._src_lang)
_A : int = tgt_lang
self.set_src_lang_special_tokens(self._src_lang)
@property
def _lowerCamelCase ( self) -> str:
return self._src_lang
@src_lang.setter
def _lowerCamelCase ( self , __lowerCamelCase) -> None:
_A : Dict = new_src_lang
self.set_src_lang_special_tokens(self._src_lang)
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> List[int]:
_A : List[str] = [self.sep_token_id]
_A : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase) -> Dict:
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model")
_A : str = src_lang
_A : Any = self(__lowerCamelCase , add_special_tokens=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase)
_A : Tuple = self.convert_tokens_to_ids(__lowerCamelCase)
_A : Dict = tgt_lang_id
return inputs
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = "en_XX" , __lowerCamelCase = None , __lowerCamelCase = "ro_RO" , **__lowerCamelCase , ) -> BatchEncoding:
_A : Any = src_lang
_A : int = tgt_lang
return super().prepare_seqaseq_batch(__lowerCamelCase , __lowerCamelCase , **__lowerCamelCase)
def _lowerCamelCase ( self) -> List[str]:
return self.set_src_lang_special_tokens(self.src_lang)
def _lowerCamelCase ( self) -> List[Any]:
return self.set_tgt_lang_special_tokens(self.tgt_lang)
def _lowerCamelCase ( self , __lowerCamelCase) -> None:
_A : int = self.convert_tokens_to_ids(__lowerCamelCase)
_A : int = []
_A : List[str] = [self.eos_token_id, self.cur_lang_code]
_A : Union[str, Any] = self.convert_ids_to_tokens(self.prefix_tokens)
_A : str = self.convert_ids_to_tokens(self.suffix_tokens)
_A : List[Any] = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens)) , )
def _lowerCamelCase ( self , __lowerCamelCase) -> None:
_A : Optional[int] = self.convert_tokens_to_ids(__lowerCamelCase)
_A : List[Any] = []
_A : str = [self.eos_token_id, self.cur_lang_code]
_A : Optional[int] = self.convert_ids_to_tokens(self.prefix_tokens)
_A : int = self.convert_ids_to_tokens(self.suffix_tokens)
_A : str = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens)) , )
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer.")
if not os.path.isdir(__lowerCamelCase):
logger.error(F"Vocabulary path ({save_directory}) should be a directory.")
return
_A : int = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
if os.path.abspath(self.vocab_file) != os.path.abspath(__lowerCamelCase):
copyfile(self.vocab_file , __lowerCamelCase)
return (out_vocab_file,)
| 11
| 1
|
"""simple docstring"""
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def __A (_SCREAMING_SNAKE_CASE ) ->Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ :Dict = [
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'decoder.output_projection.weight',
'_float_tensor',
'encoder.embed_positions._float_tensor',
'decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
state_dict.pop(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __A (_SCREAMING_SNAKE_CASE ) ->Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ :List[Any] = emb.weight.shape
lowerCAmelCase__ :Dict = nn.Linear(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , bias=_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :Dict = emb.weight.data
return lin_layer
def __A (_SCREAMING_SNAKE_CASE ) ->Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ :Dict = torch.load(_SCREAMING_SNAKE_CASE , map_location='cpu' )
lowerCAmelCase__ :List[Any] = mam_aaa['args'] or mam_aaa['cfg']['model']
lowerCAmelCase__ :Union[str, Any] = mam_aaa['model']
remove_ignore_keys_(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :int = state_dict['encoder.embed_tokens.weight'].shape[0]
lowerCAmelCase__ :int = MaMaaaConfig(
vocab_size=_SCREAMING_SNAKE_CASE , max_position_embeddings=1024 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='relu' , )
lowerCAmelCase__ :Union[str, Any] = state_dict['decoder.embed_tokens.weight']
lowerCAmelCase__ :Union[str, Any] = MaMaaaForConditionalGeneration(_SCREAMING_SNAKE_CASE )
model.model.load_state_dict(_SCREAMING_SNAKE_CASE , strict=_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :str = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""fairseq_path""", type=str, help="""path to a model.pt on local filesystem.""")
parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
__A = parser.parse_args()
__A = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path)
| 361
|
"""simple docstring"""
import json
from typing import Iterator, List, Union
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
from tokenizers.implementations.base_tokenizer import BaseTokenizer
from tokenizers.models import Unigram
from tokenizers.processors import TemplateProcessing
class _lowerCAmelCase ( a ):
"""simple docstring"""
def __init__( self , __UpperCAmelCase = "▁" , __UpperCAmelCase = True , __UpperCAmelCase = "<unk>" , __UpperCAmelCase = "</s>" , __UpperCAmelCase = "<pad>" , ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = {
'pad': {'id': 0, 'token': pad_token},
'eos': {'id': 1, 'token': eos_token},
'unk': {'id': 2, 'token': unk_token},
}
lowerCAmelCase__ :Optional[int] = [None] * len(self.special_tokens )
for token_dict in self.special_tokens.values():
lowerCAmelCase__ :Any = token_dict['token']
lowerCAmelCase__ :int = Tokenizer(Unigram() )
lowerCAmelCase__ :Tuple = normalizers.Sequence(
[
normalizers.Nmt(),
normalizers.NFKC(),
normalizers.Replace(Regex(' {2,}' ) , ' ' ),
normalizers.Lowercase(),
] )
lowerCAmelCase__ :Any = pre_tokenizers.Sequence(
[
pre_tokenizers.Metaspace(replacement=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase ),
pre_tokenizers.Digits(individual_digits=__UpperCAmelCase ),
pre_tokenizers.Punctuation(),
] )
lowerCAmelCase__ :List[str] = decoders.Metaspace(replacement=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase )
lowerCAmelCase__ :Tuple = TemplateProcessing(
single=F"$A {self.special_tokens['eos']['token']}" , special_tokens=[(self.special_tokens['eos']['token'], self.special_tokens['eos']['id'])] , )
lowerCAmelCase__ :Optional[int] = {
'model': 'SentencePieceUnigram',
'replacement': replacement,
'add_prefix_space': add_prefix_space,
}
super().__init__(__UpperCAmelCase , __UpperCAmelCase )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase = 8_0_0_0 , __UpperCAmelCase = True , ):
'''simple docstring'''
lowerCAmelCase__ :int = trainers.UnigramTrainer(
vocab_size=__UpperCAmelCase , special_tokens=self.special_tokens_list , show_progress=__UpperCAmelCase , )
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ :int = [files]
self._tokenizer.train(__UpperCAmelCase , trainer=__UpperCAmelCase )
self.add_unk_id()
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase = 8_0_0_0 , __UpperCAmelCase = True , ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = trainers.UnigramTrainer(
vocab_size=__UpperCAmelCase , special_tokens=self.special_tokens_list , show_progress=__UpperCAmelCase , )
self._tokenizer.train_from_iterator(__UpperCAmelCase , trainer=__UpperCAmelCase )
self.add_unk_id()
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = json.loads(self._tokenizer.to_str() )
lowerCAmelCase__ :List[str] = self.special_tokens['unk']['id']
lowerCAmelCase__ :Union[str, Any] = Tokenizer.from_str(json.dumps(__UpperCAmelCase ) )
| 254
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.