code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
"""google/mobilenet_v2_1.4_224""": """https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json""",
"""google/mobilenet_v2_1.0_224""": """https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json""",
"""google/mobilenet_v2_0.75_160""": """https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json""",
"""google/mobilenet_v2_0.35_96""": """https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json""",
# See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2
}
class __snake_case ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = """mobilenet_v2"""
def __init__( self , __lowerCamelCase=3 , __lowerCamelCase=224 , __lowerCamelCase=1.0 , __lowerCamelCase=8 , __lowerCamelCase=8 , __lowerCamelCase=6 , __lowerCamelCase=32 , __lowerCamelCase=True , __lowerCamelCase=True , __lowerCamelCase="relu6" , __lowerCamelCase=True , __lowerCamelCase=0.8 , __lowerCamelCase=0.0_2 , __lowerCamelCase=0.0_0_1 , __lowerCamelCase=255 , **__lowerCamelCase , ):
'''simple docstring'''
super().__init__(**__lowerCamelCase )
if depth_multiplier <= 0:
raise ValueError('''depth_multiplier must be greater than zero.''' )
__A : Any = num_channels
__A : Tuple = image_size
__A : Tuple = depth_multiplier
__A : int = depth_divisible_by
__A : Optional[Any] = min_depth
__A : Dict = expand_ratio
__A : Optional[Any] = output_stride
__A : Tuple = first_layer_is_expansion
__A : Tuple = finegrained_output
__A : List[Any] = hidden_act
__A : List[Any] = tf_padding
__A : List[Any] = classifier_dropout_prob
__A : Tuple = initializer_range
__A : Any = layer_norm_eps
__A : Union[str, Any] = semantic_loss_ignore_index
class __snake_case ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = version.parse("""1.11""" )
@property
def UpperCamelCase__( self ):
'''simple docstring'''
return OrderedDict([('''pixel_values''', {0: '''batch'''})] )
@property
def UpperCamelCase__( self ):
'''simple docstring'''
if self.task == "image-classification":
return OrderedDict([('''logits''', {0: '''batch'''})] )
else:
return OrderedDict([('''last_hidden_state''', {0: '''batch'''}), ('''pooler_output''', {0: '''batch'''})] )
@property
def UpperCamelCase__( self ):
'''simple docstring'''
return 1e-4
| 177
|
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
"""microsoft/unispeech-large-1500h-cv""": (
"""https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json"""
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class __snake_case ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = """unispeech"""
def __init__( self , __lowerCamelCase=32 , __lowerCamelCase=768 , __lowerCamelCase=12 , __lowerCamelCase=12 , __lowerCamelCase=3072 , __lowerCamelCase="gelu" , __lowerCamelCase=0.1 , __lowerCamelCase=0.1 , __lowerCamelCase=0.1 , __lowerCamelCase=0.0 , __lowerCamelCase=0.0 , __lowerCamelCase=0.1 , __lowerCamelCase=0.1 , __lowerCamelCase=0.0_2 , __lowerCamelCase=1e-5 , __lowerCamelCase="group" , __lowerCamelCase="gelu" , __lowerCamelCase=(512, 512, 512, 512, 512, 512, 512) , __lowerCamelCase=(5, 2, 2, 2, 2, 2, 2) , __lowerCamelCase=(10, 3, 3, 3, 3, 2, 2) , __lowerCamelCase=False , __lowerCamelCase=128 , __lowerCamelCase=16 , __lowerCamelCase=False , __lowerCamelCase=True , __lowerCamelCase=0.0_5 , __lowerCamelCase=10 , __lowerCamelCase=2 , __lowerCamelCase=0.0 , __lowerCamelCase=10 , __lowerCamelCase=0 , __lowerCamelCase=320 , __lowerCamelCase=2 , __lowerCamelCase=0.1 , __lowerCamelCase=100 , __lowerCamelCase=256 , __lowerCamelCase=256 , __lowerCamelCase=0.1 , __lowerCamelCase="mean" , __lowerCamelCase=False , __lowerCamelCase=False , __lowerCamelCase=256 , __lowerCamelCase=80 , __lowerCamelCase=0 , __lowerCamelCase=1 , __lowerCamelCase=2 , __lowerCamelCase=0.5 , **__lowerCamelCase , ):
'''simple docstring'''
super().__init__(**__lowerCamelCase , pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase )
__A : Dict = hidden_size
__A : Dict = feat_extract_norm
__A : int = feat_extract_activation
__A : Dict = list(__lowerCamelCase )
__A : str = list(__lowerCamelCase )
__A : Dict = list(__lowerCamelCase )
__A : str = conv_bias
__A : Optional[int] = num_conv_pos_embeddings
__A : int = num_conv_pos_embedding_groups
__A : Union[str, Any] = len(self.conv_dim )
__A : List[str] = num_hidden_layers
__A : Dict = intermediate_size
__A : List[str] = hidden_act
__A : int = num_attention_heads
__A : str = hidden_dropout
__A : str = attention_dropout
__A : Optional[Any] = activation_dropout
__A : Optional[int] = feat_proj_dropout
__A : Optional[Any] = final_dropout
__A : List[Any] = layerdrop
__A : Any = layer_norm_eps
__A : str = initializer_range
__A : List[str] = num_ctc_classes
__A : Dict = vocab_size
__A : Dict = do_stable_layer_norm
__A : Union[str, Any] = use_weighted_layer_sum
__A : Tuple = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
F""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
F""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__A : List[Any] = apply_spec_augment
__A : str = mask_time_prob
__A : Dict = mask_time_length
__A : List[str] = mask_time_min_masks
__A : Union[str, Any] = mask_feature_prob
__A : Tuple = mask_feature_length
__A : Any = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
__A : int = num_codevectors_per_group
__A : Any = num_codevector_groups
__A : Optional[Any] = contrastive_logits_temperature
__A : Dict = feat_quantizer_dropout
__A : Tuple = num_negatives
__A : List[str] = codevector_dim
__A : str = proj_codevector_dim
__A : Union[str, Any] = diversity_loss_weight
# ctc loss
__A : Tuple = ctc_loss_reduction
__A : Optional[int] = ctc_zero_infinity
# pretraining loss
__A : str = replace_prob
@property
def UpperCamelCase__( self ):
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 177
| 1
|
def lowercase__( A ):
if upper_limit < 0:
raise ValueError('Limit for the Catalan sequence must be ≥ 0' )
snake_case__ : Optional[int] = [0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
snake_case__ : Union[str, Any] = 1
if upper_limit > 0:
snake_case__ : Optional[int] = 1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 , upper_limit + 1 ):
for j in range(A ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print('\n********* Catalan Numbers Using Dynamic Programming ************\n')
print('\n*** Enter -1 at any time to quit ***')
print('\nEnter the upper limit (≥ 0) for the Catalan number sequence: ', end='')
try:
while True:
lowerCamelCase : Dict = int(input().strip())
if N < 0:
print('\n********* Goodbye!! ************')
break
else:
print(F"""The Catalan numbers from 0 through {N} are:""")
print(catalan_numbers(N))
print('Try another upper limit for the sequence: ', end='')
except (NameError, ValueError):
print('\n********* Invalid input, goodbye! ************\n')
import doctest
doctest.testmod()
| 303
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase : Optional[int] = logging.get_logger(__name__)
lowerCamelCase : List[Any] = {
'facebook/xmod-base': 'https://huggingface.co/facebook/xmod-base/resolve/main/config.json',
'facebook/xmod-large-prenorm': 'https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json',
'facebook/xmod-base-13-125k': 'https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json',
'facebook/xmod-base-30-125k': 'https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json',
'facebook/xmod-base-30-195k': 'https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json',
'facebook/xmod-base-60-125k': 'https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json',
'facebook/xmod-base-60-265k': 'https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json',
'facebook/xmod-base-75-125k': 'https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json',
'facebook/xmod-base-75-269k': 'https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json',
}
class snake_case__ ( UpperCamelCase_ ):
_lowerCAmelCase ='xmod'
def __init__( self : Any , _lowerCamelCase : Any=3_0_5_2_2 , _lowerCamelCase : int=7_6_8 , _lowerCamelCase : Union[str, Any]=1_2 , _lowerCamelCase : Any=1_2 , _lowerCamelCase : int=3_0_7_2 , _lowerCamelCase : Any="gelu" , _lowerCamelCase : List[Any]=0.1 , _lowerCamelCase : Optional[Any]=0.1 , _lowerCamelCase : List[Any]=5_1_2 , _lowerCamelCase : List[str]=2 , _lowerCamelCase : Dict=0.02 , _lowerCamelCase : List[str]=1E-12 , _lowerCamelCase : List[Any]=1 , _lowerCamelCase : Tuple=0 , _lowerCamelCase : Union[str, Any]=2 , _lowerCamelCase : str="absolute" , _lowerCamelCase : int=True , _lowerCamelCase : Optional[int]=None , _lowerCamelCase : int=False , _lowerCamelCase : List[str]=2 , _lowerCamelCase : List[str]=False , _lowerCamelCase : List[str]=True , _lowerCamelCase : Tuple=True , _lowerCamelCase : Dict=("en_XX",) , _lowerCamelCase : Tuple=None , **_lowerCamelCase : List[str] , ):
super().__init__(pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , **_lowerCamelCase )
snake_case__ : List[str] = vocab_size
snake_case__ : List[str] = hidden_size
snake_case__ : int = num_hidden_layers
snake_case__ : Optional[int] = num_attention_heads
snake_case__ : Tuple = hidden_act
snake_case__ : Optional[Any] = intermediate_size
snake_case__ : Dict = hidden_dropout_prob
snake_case__ : Any = attention_probs_dropout_prob
snake_case__ : Any = max_position_embeddings
snake_case__ : List[Any] = type_vocab_size
snake_case__ : List[str] = initializer_range
snake_case__ : str = layer_norm_eps
snake_case__ : Optional[int] = position_embedding_type
snake_case__ : Tuple = use_cache
snake_case__ : Dict = classifier_dropout
snake_case__ : Any = pre_norm
snake_case__ : str = adapter_reduction_factor
snake_case__ : Any = adapter_layer_norm
snake_case__ : Optional[int] = adapter_reuse_layer_norm
snake_case__ : List[Any] = ln_before_adapter
snake_case__ : Dict = list(_lowerCamelCase )
snake_case__ : Union[str, Any] = default_language
class snake_case__ ( UpperCamelCase_ ):
@property
def UpperCAmelCase__ ( self : Optional[Any] ):
if self.task == "multiple-choice":
snake_case__ : Any = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
snake_case__ : int = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 303
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : Optional[int] = logging.get_logger(__name__)
__UpperCamelCase : Dict = {
'''microsoft/markuplm-base''': '''https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json''',
'''microsoft/markuplm-large''': '''https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json''',
}
class a ( a__ ):
snake_case__ = '''markuplm'''
def __init__( self , _snake_case=3_05_22 , _snake_case=7_68 , _snake_case=12 , _snake_case=12 , _snake_case=30_72 , _snake_case="gelu" , _snake_case=0.1 , _snake_case=0.1 , _snake_case=5_12 , _snake_case=2 , _snake_case=0.02 , _snake_case=1E-12 , _snake_case=0 , _snake_case=0 , _snake_case=2 , _snake_case=2_56 , _snake_case=10_24 , _snake_case=2_16 , _snake_case=10_01 , _snake_case=32 , _snake_case=50 , _snake_case="absolute" , _snake_case=True , _snake_case=None , **_snake_case , ):
"""simple docstring"""
super().__init__(
pad_token_id=_snake_case , bos_token_id=_snake_case , eos_token_id=_snake_case , **_snake_case , )
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = hidden_act
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = initializer_range
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = position_embedding_type
lowerCAmelCase = use_cache
lowerCAmelCase = classifier_dropout
# additional properties
lowerCAmelCase = max_depth
lowerCAmelCase = max_xpath_tag_unit_embeddings
lowerCAmelCase = max_xpath_subs_unit_embeddings
lowerCAmelCase = tag_pad_id
lowerCAmelCase = subs_pad_id
lowerCAmelCase = xpath_unit_hidden_size
| 4
|
"""simple docstring"""
import argparse
import json
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification
def __lowerCAmelCase ( lowercase : Optional[int] ) -> Tuple:
"""simple docstring"""
snake_case : Optional[Any] = SwinConfig()
snake_case : Union[str, Any] = swin_name.split("_" )
snake_case : str = name_split[1]
snake_case : Tuple = int(name_split[4] )
snake_case : str = int(name_split[3][-1] )
if model_size == "tiny":
snake_case : str = 96
snake_case : List[Any] = (2, 2, 6, 2)
snake_case : Union[str, Any] = (3, 6, 12, 24)
elif model_size == "small":
snake_case : int = 96
snake_case : Optional[Any] = (2, 2, 18, 2)
snake_case : Any = (3, 6, 12, 24)
elif model_size == "base":
snake_case : List[str] = 128
snake_case : int = (2, 2, 18, 2)
snake_case : Any = (4, 8, 16, 32)
else:
snake_case : List[str] = 192
snake_case : str = (2, 2, 18, 2)
snake_case : Any = (6, 12, 24, 48)
if "in22k" in swin_name:
snake_case : List[Any] = 2_1841
else:
snake_case : str = 1000
snake_case : Any = "huggingface/label-files"
snake_case : str = "imagenet-1k-id2label.json"
snake_case : Optional[int] = json.load(open(hf_hub_download(lowercase , lowercase , repo_type="dataset" ) , "r" ) )
snake_case : List[Any] = {int(lowercase ): v for k, v in idalabel.items()}
snake_case : Optional[int] = idalabel
snake_case : Optional[int] = {v: k for k, v in idalabel.items()}
snake_case : Tuple = img_size
snake_case : Any = num_classes
snake_case : str = embed_dim
snake_case : Any = depths
snake_case : Dict = num_heads
snake_case : List[Any] = window_size
return config
def __lowerCAmelCase ( lowercase : Optional[Any] ) -> Any:
"""simple docstring"""
if "patch_embed.proj" in name:
snake_case : int = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
snake_case : Union[str, Any] = name.replace("patch_embed.norm" , "embeddings.norm" )
if "layers" in name:
snake_case : Optional[Any] = "encoder." + name
if "attn.proj" in name:
snake_case : List[Any] = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
snake_case : List[str] = name.replace("attn" , "attention.self" )
if "norm1" in name:
snake_case : List[str] = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
snake_case : Optional[int] = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
snake_case : int = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
snake_case : Tuple = name.replace("mlp.fc2" , "output.dense" )
if name == "norm.weight":
snake_case : Optional[int] = "layernorm.weight"
if name == "norm.bias":
snake_case : Tuple = "layernorm.bias"
if "head" in name:
snake_case : Any = name.replace("head" , "classifier" )
else:
snake_case : Tuple = "swin." + name
return name
def __lowerCAmelCase ( lowercase : List[Any] , lowercase : List[Any] ) -> Any:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
snake_case : int = orig_state_dict.pop(lowercase )
if "mask" in key:
continue
elif "qkv" in key:
snake_case : List[Any] = key.split("." )
snake_case : Dict = int(key_split[1] )
snake_case : Dict = int(key_split[3] )
snake_case : str = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
snake_case : Dict = val[:dim, :]
snake_case : Tuple = val[
dim : dim * 2, :
]
snake_case : List[str] = val[-dim:, :]
else:
snake_case : Dict = val[
:dim
]
snake_case : List[str] = val[
dim : dim * 2
]
snake_case : Union[str, Any] = val[
-dim:
]
else:
snake_case : Any = val
return orig_state_dict
def __lowerCAmelCase ( lowercase : Tuple , lowercase : Any ) -> Dict:
"""simple docstring"""
snake_case : str = timm.create_model(lowercase , pretrained=lowercase )
timm_model.eval()
snake_case : int = get_swin_config(lowercase )
snake_case : Optional[int] = SwinForImageClassification(lowercase )
model.eval()
snake_case : int = convert_state_dict(timm_model.state_dict() , lowercase )
model.load_state_dict(lowercase )
snake_case : Dict = "http://images.cocodataset.org/val2017/000000039769.jpg"
snake_case : Tuple = AutoImageProcessor.from_pretrained("microsoft/{}".format(swin_name.replace("_" , "-" ) ) )
snake_case : Optional[int] = Image.open(requests.get(lowercase , stream=lowercase ).raw )
snake_case : Dict = image_processor(images=lowercase , return_tensors="pt" )
snake_case : str = timm_model(inputs["pixel_values"] )
snake_case : Tuple = model(**lowercase ).logits
assert torch.allclose(lowercase , lowercase , atol=1e-3 )
print(F'Saving model {swin_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(lowercase )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(lowercase )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--swin_name""",
default="""swin_tiny_patch4_window7_224""",
type=str,
help="""Name of the Swin timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
__snake_case = parser.parse_args()
convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
| 178
| 0
|
from math import pow
def UpperCamelCase ( _A : int , _A : int , _A : int , _A : int , _A : int , )-> tuple[int, int]:
"""simple docstring"""
if current_sum == needed_sum:
# If the sum of the powers is equal to needed_sum, then we have a solution.
solutions_count += 1
return current_sum, solutions_count
A__ = int(pow(_A , _A ) )
if current_sum + i_to_n <= needed_sum:
# If the sum of the powers is less than needed_sum, then continue adding powers.
current_sum += i_to_n
A__ , A__ = backtrack(
_A , _A , current_number + 1 , _A , _A )
current_sum -= i_to_n
if i_to_n < needed_sum:
# If the power of i is less than needed_sum, then try with the next power.
A__ , A__ = backtrack(
_A , _A , current_number + 1 , _A , _A )
return current_sum, solutions_count
def UpperCamelCase ( _A : int , _A : int )-> int:
"""simple docstring"""
if not (1 <= needed_sum <= 1000 and 2 <= power <= 10):
raise ValueError(
"Invalid input\n"
"needed_sum must be between 1 and 1000, power between 2 and 10." )
return backtrack(_A , _A , 1 , 0 , 0 )[1] # Return the solutions_count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 232
|
from .imports import is_rich_available
if is_rich_available():
from rich.traceback import install
install(show_locals=False)
else:
raise ModuleNotFoundError("To use the rich extension, install rich with `pip install rich`")
| 232
| 1
|
'''simple docstring'''
import logging
import random
import ray
from transformers import RagConfig, RagRetriever, RagTokenizer
from transformers.models.rag.retrieval_rag import CustomHFIndex
_SCREAMING_SNAKE_CASE : Union[str, Any] = logging.getLogger(__name__)
class _snake_case :
def __init__( self ) -> str:
'''simple docstring'''
snake_case_ = False
def lowerCAmelCase__ ( self , a__ , a__ , a__ , a__ ) -> Optional[int]:
'''simple docstring'''
if not self.initialized:
snake_case_ = RagRetriever(
a__ , question_encoder_tokenizer=a__ , generator_tokenizer=a__ , index=a__ , init_retrieval=a__ , )
snake_case_ = True
def lowerCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
self.retriever.index.init_index()
def lowerCAmelCase__ ( self , a__ , a__ ) -> List[str]:
'''simple docstring'''
snake_case_ , snake_case_ = self.retriever._main_retrieve(a__ , a__ )
return doc_ids, retrieved_doc_embeds
class _snake_case ( lowercase_ ):
def __init__( self , a__ , a__ , a__ , a__ , a__=None ) -> int:
'''simple docstring'''
if index is not None and index.is_initialized() and len(a__ ) > 0:
raise ValueError(
"When using Ray for distributed fine-tuning, "
"you'll need to provide the paths instead, "
"as the dataset and the index are loaded "
"separately. More info in examples/rag/use_own_knowledge_dataset.py " )
super().__init__(
a__ , question_encoder_tokenizer=a__ , generator_tokenizer=a__ , index=a__ , init_retrieval=a__ , )
snake_case_ = retrieval_workers
if len(self.retrieval_workers ) > 0:
ray.get(
[
worker.create_rag_retriever.remote(a__ , a__ , a__ , a__ )
for worker in self.retrieval_workers
] )
def lowerCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
logger.info("initializing retrieval" )
if len(self.retrieval_workers ) > 0:
ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers] )
else:
# Non-distributed training. Load index into this same process.
self.index.init_index()
def lowerCAmelCase__ ( self , a__ , a__ ) -> Union[str, Any]:
'''simple docstring'''
if len(self.retrieval_workers ) > 0:
# Select a random retrieval actor.
snake_case_ = self.retrieval_workers[random.randint(0 , len(self.retrieval_workers ) - 1 )]
snake_case_ , snake_case_ = ray.get(random_worker.retrieve.remote(a__ , a__ ) )
else:
snake_case_ , snake_case_ = self._main_retrieve(a__ , a__ )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(a__ )
@classmethod
def lowerCAmelCase__ ( cls , a__ , a__=None , **a__ ) -> Tuple:
'''simple docstring'''
return super(a__ , cls ).get_tokenizers(a__ , a__ , **a__ )
@classmethod
def lowerCAmelCase__ ( cls , a__ , a__ , a__=None , **a__ ) -> Any:
'''simple docstring'''
snake_case_ = kwargs.pop("config" , a__ ) or RagConfig.from_pretrained(a__ , **a__ )
snake_case_ = RagTokenizer.from_pretrained(a__ , config=a__ )
snake_case_ = rag_tokenizer.question_encoder
snake_case_ = rag_tokenizer.generator
if indexed_dataset is not None:
snake_case_ = "custom"
snake_case_ = CustomHFIndex(config.retrieval_vector_size , a__ )
else:
snake_case_ = cls._build_index(a__ )
return cls(
a__ , question_encoder_tokenizer=a__ , generator_tokenizer=a__ , retrieval_workers=a__ , index=a__ , )
| 400
|
'''simple docstring'''
import os
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from huggingface_hub.file_download import http_get
from requests.exceptions import HTTPError
from transformers import (
AlbertTokenizer,
AutoTokenizer,
BertTokenizer,
BertTokenizerFast,
GPTaTokenizerFast,
is_tokenizers_available,
)
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers
from transformers.tokenization_utils import Trie
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class _snake_case ( unittest.TestCase ):
def lowerCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
snake_case_ = mock.Mock()
snake_case_ = 500
snake_case_ = {}
snake_case_ = HTTPError
snake_case_ = {}
# Download this model to make sure it's in the cache.
snake_case_ = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert" )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("requests.Session.request" , return_value=a__ ) as mock_head:
snake_case_ = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert" )
# This check we did call the fake head request
mock_head.assert_called()
@require_tokenizers
def lowerCAmelCase__ ( self ) -> str:
'''simple docstring'''
snake_case_ = mock.Mock()
snake_case_ = 500
snake_case_ = {}
snake_case_ = HTTPError
snake_case_ = {}
# Download this model to make sure it's in the cache.
snake_case_ = GPTaTokenizerFast.from_pretrained("gpt2" )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("requests.Session.request" , return_value=a__ ) as mock_head:
snake_case_ = GPTaTokenizerFast.from_pretrained("gpt2" )
# This check we did call the fake head request
mock_head.assert_called()
def lowerCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
try:
snake_case_ = tempfile.mktemp()
with open(a__ , "wb" ) as f:
http_get("https://huggingface.co/albert-base-v1/resolve/main/spiece.model" , a__ )
snake_case_ = AlbertTokenizer.from_pretrained(a__ )
finally:
os.remove(a__ )
# Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in
# the current folder and have the right name.
if os.path.isfile("tokenizer.json" ):
# We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it.
return
try:
with open("tokenizer.json" , "wb" ) as f:
http_get("https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json" , a__ )
snake_case_ = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
# The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000
self.assertEqual(tokenizer.vocab_size , 1_000 )
# Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file.
finally:
os.remove("tokenizer.json" )
def lowerCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
snake_case_ = AlbertTokenizer.from_pretrained("https://huggingface.co/albert-base-v1/resolve/main/spiece.model" )
@is_staging_test
class _snake_case ( unittest.TestCase ):
lowerCAmelCase_ : Optional[int] = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
@classmethod
def lowerCAmelCase__ ( cls ) -> List[str]:
'''simple docstring'''
snake_case_ = TOKEN
HfFolder.save_token(a__ )
@classmethod
def lowerCAmelCase__ ( cls ) -> Any:
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id="test-tokenizer" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-tokenizer-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-tokenizer" )
except HTTPError:
pass
def lowerCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case_ = os.path.join(a__ , "vocab.txt" )
with open(a__ , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
snake_case_ = BertTokenizer(a__ )
tokenizer.push_to_hub("test-tokenizer" , use_auth_token=self._token )
snake_case_ = BertTokenizer.from_pretrained(F'{USER}/test-tokenizer' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id="test-tokenizer" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(a__ , repo_id="test-tokenizer" , push_to_hub=a__ , use_auth_token=self._token )
snake_case_ = BertTokenizer.from_pretrained(F'{USER}/test-tokenizer' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
def lowerCAmelCase__ ( self ) -> Any:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case_ = os.path.join(a__ , "vocab.txt" )
with open(a__ , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
snake_case_ = BertTokenizer(a__ )
tokenizer.push_to_hub("valid_org/test-tokenizer-org" , use_auth_token=self._token )
snake_case_ = BertTokenizer.from_pretrained("valid_org/test-tokenizer-org" )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-tokenizer-org" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(
a__ , repo_id="valid_org/test-tokenizer-org" , push_to_hub=a__ , use_auth_token=self._token )
snake_case_ = BertTokenizer.from_pretrained("valid_org/test-tokenizer-org" )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
@require_tokenizers
def lowerCAmelCase__ ( self ) -> int:
'''simple docstring'''
CustomTokenizer.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case_ = os.path.join(a__ , "vocab.txt" )
with open(a__ , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
snake_case_ = CustomTokenizer(a__ )
# No fast custom tokenizer
tokenizer.push_to_hub("test-dynamic-tokenizer" , use_auth_token=self._token )
snake_case_ = AutoTokenizer.from_pretrained(F'{USER}/test-dynamic-tokenizer' , trust_remote_code=a__ )
# Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , "CustomTokenizer" )
# Fast and slow custom tokenizer
CustomTokenizerFast.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case_ = os.path.join(a__ , "vocab.txt" )
with open(a__ , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
snake_case_ = BertTokenizerFast.from_pretrained(a__ )
bert_tokenizer.save_pretrained(a__ )
snake_case_ = CustomTokenizerFast.from_pretrained(a__ )
tokenizer.push_to_hub("test-dynamic-tokenizer" , use_auth_token=self._token )
snake_case_ = AutoTokenizer.from_pretrained(F'{USER}/test-dynamic-tokenizer' , trust_remote_code=a__ )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , "CustomTokenizerFast" )
snake_case_ = AutoTokenizer.from_pretrained(
F'{USER}/test-dynamic-tokenizer' , use_fast=a__ , trust_remote_code=a__ )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , "CustomTokenizer" )
class _snake_case ( unittest.TestCase ):
def lowerCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
snake_case_ = Trie()
trie.add("Hello 友達" )
self.assertEqual(trie.data , {"H": {"e": {"l": {"l": {"o": {" ": {"友": {"達": {"": 1}}}}}}}}} )
trie.add("Hello" )
trie.data
self.assertEqual(trie.data , {"H": {"e": {"l": {"l": {"o": {"": 1, " ": {"友": {"達": {"": 1}}}}}}}}} )
def lowerCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
snake_case_ = Trie()
self.assertEqual(trie.split("[CLS] This is a extra_id_100" ) , ["[CLS] This is a extra_id_100"] )
trie.add("[CLS]" )
trie.add("extra_id_1" )
trie.add("extra_id_100" )
self.assertEqual(trie.split("[CLS] This is a extra_id_100" ) , ["[CLS]", " This is a ", "extra_id_100"] )
def lowerCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ = Trie()
trie.add("A" )
self.assertEqual(trie.split("ABC" ) , ["A", "BC"] )
self.assertEqual(trie.split("BCA" ) , ["BC", "A"] )
def lowerCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
snake_case_ = Trie()
trie.add("TOKEN]" )
trie.add("[SPECIAL_TOKEN]" )
self.assertEqual(trie.split("This is something [SPECIAL_TOKEN]" ) , ["This is something ", "[SPECIAL_TOKEN]"] )
def lowerCAmelCase__ ( self ) -> str:
'''simple docstring'''
snake_case_ = Trie()
trie.add("A" )
trie.add("P" )
trie.add("[SPECIAL_TOKEN]" )
self.assertEqual(trie.split("This is something [SPECIAL_TOKEN]" ) , ["This is something ", "[SPECIAL_TOKEN]"] )
def lowerCAmelCase__ ( self ) -> Any:
'''simple docstring'''
snake_case_ = Trie()
trie.add("AB" )
trie.add("B" )
trie.add("C" )
self.assertEqual(trie.split("ABC" ) , ["AB", "C"] )
def lowerCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ = Trie()
trie.add("ABC" )
trie.add("B" )
trie.add("CD" )
self.assertEqual(trie.split("ABCD" ) , ["ABC", "D"] )
def lowerCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
snake_case_ = Trie()
snake_case_ = trie.cut_text("ABC" , [0, 0, 2, 1, 2, 3] )
self.assertEqual(a__ , ["AB", "C"] )
| 400
| 1
|
"""simple docstring"""
from __future__ import annotations
def lowercase__ ( lowercase_ ) -> int:
"""simple docstring"""
_UpperCamelCase : Dict = len(lowercase_ ) // 2
# choose the middle 3 elements
_UpperCamelCase : Dict = lst[m - 1 : m + 2]
# if middle element is peak
if three[1] > three[0] and three[1] > three[2]:
return three[1]
# if increasing, recurse on right
elif three[0] < three[2]:
if len(lst[:m] ) == 2:
m -= 1
return peak(lst[m:] )
# decreasing
else:
if len(lst[:m] ) == 2:
m += 1
return peak(lst[:m] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 51
|
"""simple docstring"""
lowerCamelCase__ = "\n# Installazione di Transformers\n! pip install transformers datasets\n# Per installare dalla fonte invece dell'ultima versione rilasciata, commenta il comando sopra e\n# rimuovi la modalità commento al comando seguente.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
lowerCamelCase__ = [{"type": "code", "content": INSTALL_CONTENT}]
lowerCamelCase__ = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 51
| 1
|
'''simple docstring'''
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> float:
"""simple docstring"""
return round(float(moles / volume ) * nfactor )
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> float:
"""simple docstring"""
return round(float((moles * 0.08_21 * temperature) / (volume) ) )
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> float:
"""simple docstring"""
return round(float((moles * 0.08_21 * temperature) / (pressure) ) )
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> float:
"""simple docstring"""
return round(float((pressure * volume) / (0.08_21 * moles) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 26
|
import importlib
import math
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Tuple, Union
import flax
import jax.numpy as jnp
from ..utils import BaseOutput
A : Any = 'scheduler_config.json'
class __A( a ):
snake_case_ = 1
snake_case_ = 2
snake_case_ = 3
snake_case_ = 4
snake_case_ = 5
@dataclass
class __A( a ):
snake_case_ = 42
class __A:
snake_case_ = SCHEDULER_CONFIG_NAME
snake_case_ = ['''dtype''']
snake_case_ = []
snake_case_ = True
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls , _snake_case = None , _snake_case = None , _snake_case=False , **_snake_case , ) -> List[Any]:
'''simple docstring'''
__a , __a = cls.load_config(
pretrained_model_name_or_path=_snake_case , subfolder=_snake_case , return_unused_kwargs=_snake_case , **_snake_case , )
__a , __a = cls.from_config(_snake_case , return_unused_kwargs=_snake_case , **_snake_case )
if hasattr(_snake_case , '''create_state''' ) and getattr(_snake_case , '''has_state''' , _snake_case ):
__a = scheduler.create_state()
if return_unused_kwargs:
return scheduler, state, unused_kwargs
return scheduler, state
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case = False , **_snake_case ) -> List[Any]:
'''simple docstring'''
self.save_config(save_directory=_snake_case , push_to_hub=_snake_case , **_snake_case )
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[int]:
'''simple docstring'''
return self._get_compatibles()
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls ) -> Any:
'''simple docstring'''
__a = list(set([cls.__name__] + cls._compatibles ) )
__a = importlib.import_module(__name__.split('''.''' )[0] )
__a = [
getattr(_snake_case , _snake_case ) for c in compatible_classes_str if hasattr(_snake_case , _snake_case )
]
return compatible_classes
def __lowerCAmelCase ( a__ , a__ ) -> jnp.ndarray:
assert len(a__ ) >= x.ndim
return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(a__ ) - x.ndim) ) , a__ )
def __lowerCAmelCase ( a__ , a__=0.999 , a__=jnp.floataa ) -> jnp.ndarray:
def alpha_bar(a__ ):
return math.cos((time_step + 0.008) / 1.008 * math.pi / 2 ) ** 2
__a = []
for i in range(a__ ):
__a = i / num_diffusion_timesteps
__a = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(a__ ) / alpha_bar(a__ ) , a__ ) )
return jnp.array(a__ , dtype=a__ )
@flax.struct.dataclass
class __A:
snake_case_ = 42
snake_case_ = 42
snake_case_ = 42
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls , _snake_case ) -> int:
'''simple docstring'''
__a = scheduler.config
if config.trained_betas is not None:
__a = jnp.asarray(config.trained_betas , dtype=scheduler.dtype )
elif config.beta_schedule == "linear":
__a = jnp.linspace(config.beta_start , config.beta_end , config.num_train_timesteps , dtype=scheduler.dtype )
elif config.beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
__a = (
jnp.linspace(
config.beta_start**0.5 , config.beta_end**0.5 , config.num_train_timesteps , dtype=scheduler.dtype )
** 2
)
elif config.beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
__a = betas_for_alpha_bar(config.num_train_timesteps , dtype=scheduler.dtype )
else:
raise NotImplementedError(
F"""beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}""" )
__a = 1.0 - betas
__a = jnp.cumprod(_snake_case , axis=0 )
return cls(
alphas=_snake_case , betas=_snake_case , alphas_cumprod=_snake_case , )
def __lowerCAmelCase ( a__ , a__ , a__ , a__ ) -> Dict:
__a = state.alphas_cumprod
__a = alphas_cumprod[timesteps] ** 0.5
__a = sqrt_alpha_prod.flatten()
__a = broadcast_to_shape_from_left(a__ , original_samples.shape )
__a = (1 - alphas_cumprod[timesteps]) ** 0.5
__a = sqrt_one_minus_alpha_prod.flatten()
__a = broadcast_to_shape_from_left(a__ , original_samples.shape )
return sqrt_alpha_prod, sqrt_one_minus_alpha_prod
def __lowerCAmelCase ( a__ , a__ , a__ , a__ ) -> int:
__a , __a = get_sqrt_alpha_prod(a__ , a__ , a__ , a__ )
__a = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
def __lowerCAmelCase ( a__ , a__ , a__ , a__ ) -> Union[str, Any]:
__a , __a = get_sqrt_alpha_prod(a__ , a__ , a__ , a__ )
__a = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
return velocity
| 219
| 0
|
"""simple docstring"""
from math import pi
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
return 2 * pi * radius * (angle / 360)
if __name__ == "__main__":
print(arc_length(90, 10))
| 194
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCAmelCase = {
'configuration_roberta': ['ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RobertaConfig', 'RobertaOnnxConfig'],
'tokenization_roberta': ['RobertaTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['RobertaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'RobertaForCausalLM',
'RobertaForMaskedLM',
'RobertaForMultipleChoice',
'RobertaForQuestionAnswering',
'RobertaForSequenceClassification',
'RobertaForTokenClassification',
'RobertaModel',
'RobertaPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFRobertaForCausalLM',
'TFRobertaForMaskedLM',
'TFRobertaForMultipleChoice',
'TFRobertaForQuestionAnswering',
'TFRobertaForSequenceClassification',
'TFRobertaForTokenClassification',
'TFRobertaMainLayer',
'TFRobertaModel',
'TFRobertaPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'FlaxRobertaForCausalLM',
'FlaxRobertaForMaskedLM',
'FlaxRobertaForMultipleChoice',
'FlaxRobertaForQuestionAnswering',
'FlaxRobertaForSequenceClassification',
'FlaxRobertaForTokenClassification',
'FlaxRobertaModel',
'FlaxRobertaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig
from .tokenization_roberta import RobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roberta_fast import RobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta import (
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaForCausalLM,
RobertaForMaskedLM,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaModel,
RobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta import (
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForMultipleChoice,
TFRobertaForQuestionAnswering,
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TFRobertaMainLayer,
TFRobertaModel,
TFRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
FlaxRobertaPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 194
| 1
|
"""simple docstring"""
import tempfile
import unittest
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from transformers.testing_utils import (
is_torch_available,
require_optimum,
require_torch,
slow,
)
if is_torch_available():
import torch
@require_torch
@require_optimum
@slow
class __magic_name__ ( unittest.TestCase ):
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = 'hf-internal-testing/tiny-random-t5'
_lowerCAmelCase = AutoTokenizer.from_pretrained(__magic_name__ )
_lowerCAmelCase = AutoModelForSeqaSeqLM.from_pretrained(__magic_name__ )
_lowerCAmelCase = tokenizer('This is me' , return_tensors='pt' )
_lowerCAmelCase = model.to_bettertransformer()
self.assertTrue(any('BetterTransformer' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
_lowerCAmelCase = model.generate(**__magic_name__ )
_lowerCAmelCase = model.reverse_bettertransformer()
self.assertFalse(any('BetterTransformer' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__magic_name__ )
_lowerCAmelCase = AutoModelForSeqaSeqLM.from_pretrained(__magic_name__ )
self.assertFalse(
any('BetterTransformer' in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) )
_lowerCAmelCase = model_reloaded.generate(**__magic_name__ )
self.assertTrue(torch.allclose(__magic_name__ , __magic_name__ ) )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = 'hf-internal-testing/tiny-random-t5'
_lowerCAmelCase = AutoModelForSeqaSeqLM.from_pretrained(__magic_name__ )
_lowerCAmelCase = model.to_bettertransformer()
with tempfile.TemporaryDirectory() as tmpdirname:
with self.assertRaises(__magic_name__ ):
model.save_pretrained(__magic_name__ )
_lowerCAmelCase = model.reverse_bettertransformer()
model.save_pretrained(__magic_name__ )
| 589
|
"""simple docstring"""
from string import ascii_uppercase
a__ : Any = {char: i for i, char in enumerate(ascii_uppercase)}
a__ : str = dict(enumerate(ascii_uppercase))
def A__ ( __lowerCamelCase, __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = len(__lowerCamelCase )
_lowerCAmelCase = 0
while True:
if x == i:
_lowerCAmelCase = 0
if len(__lowerCamelCase ) == len(__lowerCamelCase ):
break
key += key[i]
i += 1
return key
def A__ ( __lowerCamelCase, __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = ''
_lowerCAmelCase = 0
for letter in message:
if letter == " ":
cipher_text += " "
else:
_lowerCAmelCase = (dicta[letter] - dicta[key_new[i]]) % 2_6
i += 1
cipher_text += dicta[x]
return cipher_text
def A__ ( __lowerCamelCase, __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = ''
_lowerCAmelCase = 0
for letter in cipher_text:
if letter == " ":
or_txt += " "
else:
_lowerCAmelCase = (dicta[letter] + dicta[key_new[i]] + 2_6) % 2_6
i += 1
or_txt += dicta[x]
return or_txt
def A__ ( ):
"""simple docstring"""
_lowerCAmelCase = 'THE GERMAN ATTACK'
_lowerCAmelCase = 'SECRET'
_lowerCAmelCase = generate_key(__lowerCamelCase, __lowerCamelCase )
_lowerCAmelCase = cipher_text(__lowerCamelCase, __lowerCamelCase )
print(F'''Encrypted Text = {s}''' )
print(F'''Original Text = {original_text(__lowerCamelCase, __lowerCamelCase )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 589
| 1
|
"""simple docstring"""
# Imports
import numpy as np
class _A :
"""simple docstring"""
def __init__( self : Union[str, Any] , __UpperCAmelCase : Any=None , __UpperCAmelCase : str=None , __UpperCAmelCase : str=None , __UpperCAmelCase : Dict=None , __UpperCAmelCase : int=None):
self.set_matricies(red=__UpperCAmelCase , green=__UpperCAmelCase , blue=__UpperCAmelCase , red_edge=__UpperCAmelCase , nir=__UpperCAmelCase)
def __snake_case ( self : Optional[Any] , __UpperCAmelCase : int=None , __UpperCAmelCase : List[Any]=None , __UpperCAmelCase : str=None , __UpperCAmelCase : Union[str, Any]=None , __UpperCAmelCase : Optional[Any]=None):
if red is not None:
a : List[Any] = red
if green is not None:
a : Union[str, Any] = green
if blue is not None:
a : Dict = blue
if red_edge is not None:
a : List[Any] = red_edge
if nir is not None:
a : Tuple = nir
return True
def __snake_case ( self : str , __UpperCAmelCase : Optional[Any]="" , __UpperCAmelCase : Union[str, Any]=None , __UpperCAmelCase : Optional[int]=None , __UpperCAmelCase : Any=None , __UpperCAmelCase : Optional[int]=None , __UpperCAmelCase : Tuple=None):
self.set_matricies(red=__UpperCAmelCase , green=__UpperCAmelCase , blue=__UpperCAmelCase , red_edge=__UpperCAmelCase , nir=__UpperCAmelCase)
a : Any = {
"ARVI2": self.arvaa,
"CCCI": self.ccci,
"CVI": self.cvi,
"GLI": self.gli,
"NDVI": self.ndvi,
"BNDVI": self.bndvi,
"redEdgeNDVI": self.red_edge_ndvi,
"GNDVI": self.gndvi,
"GBNDVI": self.gbndvi,
"GRNDVI": self.grndvi,
"RBNDVI": self.rbndvi,
"PNDVI": self.pndvi,
"ATSAVI": self.atsavi,
"BWDRVI": self.bwdrvi,
"CIgreen": self.ci_green,
"CIrededge": self.ci_rededge,
"CI": self.ci,
"CTVI": self.ctvi,
"GDVI": self.gdvi,
"EVI": self.evi,
"GEMI": self.gemi,
"GOSAVI": self.gosavi,
"GSAVI": self.gsavi,
"Hue": self.hue,
"IVI": self.ivi,
"IPVI": self.ipvi,
"I": self.i,
"RVI": self.rvi,
"MRVI": self.mrvi,
"MSAVI": self.m_savi,
"NormG": self.norm_g,
"NormNIR": self.norm_nir,
"NormR": self.norm_r,
"NGRDI": self.ngrdi,
"RI": self.ri,
"S": self.s,
"IF": self._if,
"DVI": self.dvi,
"TVI": self.tvi,
"NDRE": self.ndre,
}
try:
return funcs[index]()
except KeyError:
print("Index not in the list!")
return False
def __snake_case ( self : Any):
return -0.18 + (1.17 * ((self.nir - self.red) / (self.nir + self.red)))
def __snake_case ( self : List[Any]):
return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / (
(self.nir - self.red) / (self.nir + self.red)
)
def __snake_case ( self : Dict):
return self.nir * (self.red / (self.green**2))
def __snake_case ( self : str):
return (2 * self.green - self.red - self.blue) / (
2 * self.green + self.red + self.blue
)
def __snake_case ( self : int):
return (self.nir - self.red) / (self.nir + self.red)
def __snake_case ( self : Union[str, Any]):
return (self.nir - self.blue) / (self.nir + self.blue)
def __snake_case ( self : Union[str, Any]):
return (self.redEdge - self.red) / (self.redEdge + self.red)
def __snake_case ( self : Optional[int]):
return (self.nir - self.green) / (self.nir + self.green)
def __snake_case ( self : List[str]):
return (self.nir - (self.green + self.blue)) / (
self.nir + (self.green + self.blue)
)
def __snake_case ( self : Tuple):
return (self.nir - (self.green + self.red)) / (
self.nir + (self.green + self.red)
)
def __snake_case ( self : Optional[Any]):
return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red))
def __snake_case ( self : Any):
return (self.nir - (self.green + self.red + self.blue)) / (
self.nir + (self.green + self.red + self.blue)
)
def __snake_case ( self : str , __UpperCAmelCase : Union[str, Any]=0.08 , __UpperCAmelCase : Union[str, Any]=1.22 , __UpperCAmelCase : Any=0.03):
return a * (
(self.nir - a * self.red - b)
/ (a * self.nir + self.red - a * b + x * (1 + a**2))
)
def __snake_case ( self : Union[str, Any]):
return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue)
def __snake_case ( self : List[str]):
return (self.nir / self.green) - 1
def __snake_case ( self : int):
return (self.nir / self.redEdge) - 1
def __snake_case ( self : Any):
return (self.red - self.blue) / self.red
def __snake_case ( self : Tuple):
a : List[str] = self.ndvi()
return ((ndvi + 0.5) / (abs(ndvi + 0.5))) * (abs(ndvi + 0.5) ** (1 / 2))
def __snake_case ( self : Optional[Any]):
return self.nir - self.green
def __snake_case ( self : List[Any]):
return 2.5 * (
(self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1)
)
def __snake_case ( self : Union[str, Any]):
a : str = (2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / (
self.nir + self.red + 0.5
)
return n * (1 - 0.25 * n) - (self.red - 0.125) / (1 - self.red)
def __snake_case ( self : Optional[Any] , __UpperCAmelCase : List[Any]=0.16):
return (self.nir - self.green) / (self.nir + self.green + y)
def __snake_case ( self : Dict , __UpperCAmelCase : Optional[Any]=0.5):
return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n)
def __snake_case ( self : str):
return np.arctan(
((2 * self.red - self.green - self.blue) / 30.5) * (self.green - self.blue))
def __snake_case ( self : List[Any] , __UpperCAmelCase : Union[str, Any]=None , __UpperCAmelCase : str=None):
return (self.nir - b) / (a * self.red)
def __snake_case ( self : str):
return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1)
def __snake_case ( self : int):
return (self.red + self.green + self.blue) / 30.5
def __snake_case ( self : Optional[int]):
return self.nir / self.red
def __snake_case ( self : Any):
return (self.rvi() - 1) / (self.rvi() + 1)
def __snake_case ( self : str):
return (
(2 * self.nir + 1)
- ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2)
) / 2
def __snake_case ( self : Union[str, Any]):
return self.green / (self.nir + self.red + self.green)
def __snake_case ( self : Optional[Any]):
return self.nir / (self.nir + self.red + self.green)
def __snake_case ( self : str):
return self.red / (self.nir + self.red + self.green)
def __snake_case ( self : int):
return (self.green - self.red) / (self.green + self.red)
def __snake_case ( self : Dict):
return (self.red - self.green) / (self.red + self.green)
def __snake_case ( self : List[str]):
a : Union[str, Any] = np.max([np.max(self.red), np.max(self.green), np.max(self.blue)])
a : List[Any] = np.min([np.min(self.red), np.min(self.green), np.min(self.blue)])
return (max_value - min_value) / max_value
def __snake_case ( self : List[Any]):
return (2 * self.red - self.green - self.blue) / (self.green - self.blue)
def __snake_case ( self : str):
return self.nir / self.red
def __snake_case ( self : Optional[Any]):
return (self.ndvi() + 0.5) ** (1 / 2)
def __snake_case ( self : Dict):
return (self.nir - self.redEdge) / (self.nir + self.redEdge)
| 135
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase = logging.get_logger(__name__)
__lowercase = {
"""facebook/s2t-small-librispeech-asr""": (
"""https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json"""
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text
}
class _A ( _a ):
"""simple docstring"""
UpperCAmelCase : List[Any] = """speech_to_text"""
UpperCAmelCase : str = ["""past_key_values"""]
UpperCAmelCase : Any = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self : int , __UpperCAmelCase : List[Any]=10000 , __UpperCAmelCase : Any=12 , __UpperCAmelCase : Optional[Any]=2048 , __UpperCAmelCase : int=4 , __UpperCAmelCase : Dict=6 , __UpperCAmelCase : Optional[int]=2048 , __UpperCAmelCase : Dict=4 , __UpperCAmelCase : Any=0.0 , __UpperCAmelCase : List[Any]=0.0 , __UpperCAmelCase : Tuple=True , __UpperCAmelCase : List[Any]=True , __UpperCAmelCase : List[str]="relu" , __UpperCAmelCase : Union[str, Any]=256 , __UpperCAmelCase : Optional[Any]=0.1 , __UpperCAmelCase : List[str]=0.0 , __UpperCAmelCase : int=0.0 , __UpperCAmelCase : Union[str, Any]=0.02 , __UpperCAmelCase : Dict=2 , __UpperCAmelCase : Dict=True , __UpperCAmelCase : Optional[Any]=1 , __UpperCAmelCase : int=0 , __UpperCAmelCase : List[Any]=2 , __UpperCAmelCase : Optional[int]=6000 , __UpperCAmelCase : List[str]=1024 , __UpperCAmelCase : Optional[Any]=2 , __UpperCAmelCase : List[Any]=(5, 5) , __UpperCAmelCase : Union[str, Any]=1024 , __UpperCAmelCase : str=80 , __UpperCAmelCase : str=1 , **__UpperCAmelCase : str , ):
a : Optional[Any] = vocab_size
a : Any = d_model
a : List[Any] = encoder_ffn_dim
a : List[Any] = encoder_layers
a : List[str] = encoder_attention_heads
a : Union[str, Any] = decoder_ffn_dim
a : int = decoder_layers
a : Any = decoder_attention_heads
a : Tuple = dropout
a : Any = attention_dropout
a : str = activation_dropout
a : Union[str, Any] = activation_function
a : List[Any] = init_std
a : List[Any] = encoder_layerdrop
a : str = decoder_layerdrop
a : str = use_cache
a : List[str] = encoder_layers
a : Dict = scale_embedding # scale factor will be sqrt(d_model) if True
a : List[Any] = max_source_positions
a : Optional[Any] = max_target_positions
a : Tuple = num_conv_layers
a : Optional[Any] = list(__UpperCAmelCase)
a : int = conv_channels
a : Dict = input_feat_per_channel
a : str = input_channels
if len(self.conv_kernel_sizes) != self.num_conv_layers:
raise ValueError(
"Configuration for convolutional module is incorrect. "
"It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` "
f'''but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes)}`, '''
f'''`config.num_conv_layers = {self.num_conv_layers}`.''')
super().__init__(
pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , is_encoder_decoder=__UpperCAmelCase , decoder_start_token_id=__UpperCAmelCase , **__UpperCAmelCase , )
| 135
| 1
|
'''simple docstring'''
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def a__ ( UpperCamelCase_ : Optional[int], UpperCamelCase_ : bool = True, UpperCamelCase_ : float = math.inf, UpperCamelCase_ : float = -math.inf, UpperCamelCase_ : float = math.inf, UpperCamelCase_ : float = -math.inf, UpperCamelCase_ : bool = False, UpperCamelCase_ : float = 100, UpperCamelCase_ : float = 0.01, UpperCamelCase_ : float = 1, ):
UpperCAmelCase__ :List[str] = False
UpperCAmelCase__ :Union[str, Any] = search_prob
UpperCAmelCase__ :Any = start_temperate
UpperCAmelCase__ :List[Any] = []
UpperCAmelCase__ :List[str] = 0
UpperCAmelCase__ :str = None
while not search_end:
UpperCAmelCase__ :Tuple = current_state.score()
if best_state is None or current_score > best_state.score():
UpperCAmelCase__ :Optional[int] = current_state
scores.append(UpperCamelCase_ )
iterations += 1
UpperCAmelCase__ :Union[str, Any] = None
UpperCAmelCase__ :Optional[Any] = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
UpperCAmelCase__ :List[str] = random.randint(0, len(UpperCamelCase_ ) - 1 ) # picking a random neighbor
UpperCAmelCase__ :Dict = neighbors.pop(UpperCamelCase_ )
UpperCAmelCase__ :Any = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
UpperCAmelCase__ :Tuple = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
UpperCAmelCase__ :Optional[int] = picked_neighbor
else:
UpperCAmelCase__ :Optional[int] = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
UpperCAmelCase__ :List[Any] = picked_neighbor
UpperCAmelCase__ :Tuple = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
UpperCAmelCase__ :str = True
else:
UpperCAmelCase__ :int = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(UpperCamelCase_ ), UpperCamelCase_ )
plt.xlabel('''Iterations''' )
plt.ylabel('''Function values''' )
plt.show()
return best_state
if __name__ == "__main__":
def a__ ( UpperCamelCase_ : Any, UpperCamelCase_ : Dict ):
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
__lowerCamelCase = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
__lowerCamelCase = simulated_annealing(
prob, find_max=False, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'''The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '''
F'''and 50 > y > - 5 found via hill climbing: {local_min.score()}'''
)
# starting the problem with initial coordinates (12, 47)
__lowerCamelCase = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
__lowerCamelCase = simulated_annealing(
prob, find_max=True, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'''The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '''
F'''and 50 > y > - 5 found via hill climbing: {local_min.score()}'''
)
def a__ ( UpperCamelCase_ : int, UpperCamelCase_ : List[Any] ):
return (3 * x**2) - (6 * y)
__lowerCamelCase = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
__lowerCamelCase = simulated_annealing(prob, find_max=False, visualization=True)
print(
'''The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '''
F'''{local_min.score()}'''
)
__lowerCamelCase = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
__lowerCamelCase = simulated_annealing(prob, find_max=True, visualization=True)
print(
'''The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '''
F'''{local_min.score()}'''
)
| 467
|
'''simple docstring'''
import importlib.util
import json
import os
import warnings
from dataclasses import dataclass, field
import torch
from ..training_args import TrainingArguments
from ..utils import cached_property, is_sagemaker_dp_enabled, logging
__lowerCamelCase = logging.get_logger(__name__)
def a__ ( ):
# Get the sagemaker specific mp parameters from smp_options variable.
UpperCAmelCase__ :Any = os.getenv('''SM_HP_MP_PARAMETERS''', '''{}''' )
try:
# Parse it and check the field "partitions" is included, it is required for model parallel.
UpperCAmelCase__ :Any = json.loads(UpperCamelCase_ )
if "partitions" not in smp_options:
return False
except json.JSONDecodeError:
return False
# Get the sagemaker specific framework parameters from mpi_options variable.
UpperCAmelCase__ :Optional[Any] = os.getenv('''SM_FRAMEWORK_PARAMS''', '''{}''' )
try:
# Parse it and check the field "sagemaker_distributed_dataparallel_enabled".
UpperCAmelCase__ :str = json.loads(UpperCamelCase_ )
if not mpi_options.get('''sagemaker_mpi_enabled''', UpperCamelCase_ ):
return False
except json.JSONDecodeError:
return False
# Lastly, check if the `smdistributed` module is present.
return importlib.util.find_spec('''smdistributed''' ) is not None
if is_sagemaker_model_parallel_available():
import smdistributed.modelparallel.torch as smp
smp.init()
@dataclass
class UpperCAmelCase ( _snake_case ):
UpperCAmelCase = field(
default="" , metadata={"help": "Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer"} , )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ):
super().__post_init__()
warnings.warn(
'''`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use '''
'''`TrainingArguments` instead.''' , __lowerCamelCase , )
@cached_property
def __SCREAMING_SNAKE_CASE ( self : List[Any] ):
logger.info('''PyTorch: setting up devices''' )
if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1:
logger.warning(
'''torch.distributed process group is initialized, but local_rank == -1. '''
'''In order to use Torch DDP, launch your script with `python -m torch.distributed.launch''' )
if self.no_cuda:
UpperCAmelCase__ :Union[str, Any] = torch.device('''cpu''' )
UpperCAmelCase__ :Dict = 0
elif is_sagemaker_model_parallel_available():
UpperCAmelCase__ :Dict = smp.local_rank()
UpperCAmelCase__ :List[str] = torch.device('''cuda''' , __lowerCamelCase )
UpperCAmelCase__ :Optional[int] = 1
elif is_sagemaker_dp_enabled():
import smdistributed.dataparallel.torch.torch_smddp # noqa: F401
torch.distributed.init_process_group(backend='''smddp''' , timeout=self.ddp_timeout_delta )
UpperCAmelCase__ :int = int(os.getenv('''SMDATAPARALLEL_LOCAL_RANK''' ) )
UpperCAmelCase__ :int = torch.device('''cuda''' , self.local_rank )
UpperCAmelCase__ :Union[str, Any] = 1
elif self.local_rank == -1:
# if n_gpu is > 1 we'll use nn.DataParallel.
# If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0`
# Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will
# trigger an error that a device index is missing. Index 0 takes into account the
# GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0`
# will use the first GPU in that env, i.e. GPU#1
UpperCAmelCase__ :str = torch.device('''cuda:0''' if torch.cuda.is_available() else '''cpu''' )
# Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at
# the default value.
UpperCAmelCase__ :str = torch.cuda.device_count()
else:
# Here, we'll use torch.distributed.
# Initializes the distributed backend which will take care of synchronizing nodes/GPUs
if not torch.distributed.is_initialized():
torch.distributed.init_process_group(backend='''nccl''' , timeout=self.ddp_timeout_delta )
UpperCAmelCase__ :str = torch.device('''cuda''' , self.local_rank )
UpperCAmelCase__ :Union[str, Any] = 1
if device.type == "cuda":
torch.cuda.set_device(__lowerCamelCase )
return device
@property
def __SCREAMING_SNAKE_CASE ( self : List[str] ):
if is_sagemaker_model_parallel_available():
return smp.dp_size()
return super().world_size
@property
def __SCREAMING_SNAKE_CASE ( self : Dict ):
return not is_sagemaker_model_parallel_available()
@property
def __SCREAMING_SNAKE_CASE ( self : int ):
return False
| 467
| 1
|
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
__SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Tuple = {
'''EleutherAI/gpt-neo-1.3B''': '''https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json''',
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class __lowerCamelCase ( _snake_case ):
a_: List[Any] = 'gpt_neo'
a_: Tuple = ['past_key_values']
a_: Optional[int] = {'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers'}
def __init__( self : List[Any] , lowerCamelCase_ : Optional[Any]=5_0257 , lowerCamelCase_ : List[str]=2048 , lowerCamelCase_ : Optional[int]=2048 , lowerCamelCase_ : Optional[Any]=24 , lowerCamelCase_ : str=[[["global", "local"], 12]] , lowerCamelCase_ : int=16 , lowerCamelCase_ : Union[str, Any]=None , lowerCamelCase_ : Optional[Any]=256 , lowerCamelCase_ : Union[str, Any]="gelu_new" , lowerCamelCase_ : List[str]=0.0 , lowerCamelCase_ : List[Any]=0.0 , lowerCamelCase_ : int=0.0 , lowerCamelCase_ : List[str]=0.1 , lowerCamelCase_ : Tuple=1e-5 , lowerCamelCase_ : Optional[int]=0.02 , lowerCamelCase_ : Optional[Any]=True , lowerCamelCase_ : Dict=5_0256 , lowerCamelCase_ : Tuple=5_0256 , **lowerCamelCase_ : Optional[Any] , ):
_lowerCAmelCase =vocab_size
_lowerCAmelCase =max_position_embeddings
_lowerCAmelCase =hidden_size
_lowerCAmelCase =num_layers
_lowerCAmelCase =num_heads
_lowerCAmelCase =intermediate_size
_lowerCAmelCase =window_size
_lowerCAmelCase =activation_function
_lowerCAmelCase =resid_dropout
_lowerCAmelCase =embed_dropout
_lowerCAmelCase =attention_dropout
_lowerCAmelCase =classifier_dropout
_lowerCAmelCase =layer_norm_epsilon
_lowerCAmelCase =initializer_range
_lowerCAmelCase =use_cache
_lowerCAmelCase =bos_token_id
_lowerCAmelCase =eos_token_id
_lowerCAmelCase =attention_types
_lowerCAmelCase =self.expand_attention_types_params(lowerCamelCase_ )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
"""Configuration for convolutional module is incorrect. """
"""It is required that `len(config.attention_layers)` == `config.num_layers` """
F"but is `len(config.attention_layers) = {len(self.attention_layers )}`, "
F"`config.num_layers = {self.num_layers}`. "
"""`config.attention_layers` is prepared using `config.attention_types`. """
"""Please verify the value of `config.attention_types` argument.""" )
super().__init__(bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , **lowerCamelCase_ )
@staticmethod
def lowerCAmelCase__ ( lowerCamelCase_ : List[Any] ):
_lowerCAmelCase =[]
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def snake_case_ ( lowercase__ : Tuple , lowercase__ : int , lowercase__ : List[str] , lowercase__ : List[Any] ):
'''simple docstring'''
import torch
_lowerCAmelCase =input.size()
_lowerCAmelCase =len(__UpperCAmelCase )
_lowerCAmelCase =shape[dimension]
_lowerCAmelCase =torch.arange(0 , __UpperCAmelCase , __UpperCAmelCase )
_lowerCAmelCase =torch.div(sizedim - size , __UpperCAmelCase , rounding_mode="""floor""" ) + 1
_lowerCAmelCase =torch.arange(__UpperCAmelCase ) + low_indices[:min_length][:, None]
_lowerCAmelCase =[slice(__UpperCAmelCase )] * rank
_lowerCAmelCase =indices
_lowerCAmelCase =input[s]
_lowerCAmelCase =list(range(0 , rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(__UpperCAmelCase )
def snake_case_ ( lowercase__ : Optional[int] , lowercase__ : Optional[int] ):
'''simple docstring'''
import torch
_lowerCAmelCase =torch.arange(1 , __UpperCAmelCase )
_lowerCAmelCase =torch.remainder(__UpperCAmelCase , __UpperCAmelCase )
_lowerCAmelCase =remainders == 0
_lowerCAmelCase =candidates[divisor_indices]
_lowerCAmelCase =torch.max(__UpperCAmelCase )
return largest_divisor, torch.div(__UpperCAmelCase , __UpperCAmelCase , rounding_mode="""floor""" )
class __lowerCamelCase ( _snake_case ):
@property
def lowerCAmelCase__ ( self : Tuple ):
_lowerCAmelCase =OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} )
if self.use_past:
self.fill_with_past_key_values_(lowerCamelCase_ , direction="""inputs""" )
_lowerCAmelCase ={0: """batch""", 1: """past_sequence + sequence"""}
else:
_lowerCAmelCase ={0: """batch""", 1: """sequence"""}
return common_inputs
@property
def lowerCAmelCase__ ( self : Union[str, Any] ):
return self._config.num_heads
def lowerCAmelCase__ ( self : str , lowerCamelCase_ : PreTrainedTokenizer , lowerCamelCase_ : int = -1 , lowerCamelCase_ : int = -1 , lowerCamelCase_ : bool = False , lowerCamelCase_ : Optional[TensorType] = None , ):
_lowerCAmelCase =super(lowerCamelCase_ , self ).generate_dummy_inputs(
lowerCamelCase_ , batch_size=lowerCamelCase_ , seq_length=lowerCamelCase_ , is_pair=lowerCamelCase_ , framework=lowerCamelCase_ )
# We need to order the input in the way they appears in the forward()
_lowerCAmelCase =OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
_lowerCAmelCase , _lowerCAmelCase =common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
_lowerCAmelCase =seqlen + 2
_lowerCAmelCase =(
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
_lowerCAmelCase =[
(torch.zeros(lowerCamelCase_ ), torch.zeros(lowerCamelCase_ )) for _ in range(self.num_layers )
]
_lowerCAmelCase =common_inputs["""attention_mask"""]
if self.use_past:
_lowerCAmelCase =ordered_inputs["""attention_mask"""].dtype
_lowerCAmelCase =torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(lowerCamelCase_ , lowerCamelCase_ , dtype=lowerCamelCase_ )] , dim=1 )
return ordered_inputs
@property
def lowerCAmelCase__ ( self : Any ):
return 13
| 713
|
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def snake_case_ ( lowercase__ : Optional[int] , lowercase__ : Any , lowercase__ : Dict , lowercase__ : int ):
'''simple docstring'''
for param, grad_param in zip(model_a.parameters() , model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is False
), f"Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})"
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is True
), f"Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})"
def snake_case_ ( lowercase__ : int , lowercase__ : int , lowercase__ : str , lowercase__ : Optional[int] , lowercase__ : Any=True ):
'''simple docstring'''
model.train()
_lowerCAmelCase =model(lowercase__ )
_lowerCAmelCase =F.mse_loss(lowercase__ , target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(lowercase__ )
def snake_case_ ( lowercase__ : Dict , lowercase__ : List[str]=False ):
'''simple docstring'''
set_seed(42 )
_lowerCAmelCase =RegressionModel()
_lowerCAmelCase =deepcopy(lowercase__ )
_lowerCAmelCase =RegressionDataset(length=80 )
_lowerCAmelCase =DataLoader(lowercase__ , batch_size=16 )
model.to(accelerator.device )
if sched:
_lowerCAmelCase =AdamW(params=model.parameters() , lr=1e-3 )
_lowerCAmelCase =AdamW(params=ddp_model.parameters() , lr=1e-3 )
_lowerCAmelCase =LambdaLR(lowercase__ , lr_lambda=lambda lowercase__ : epoch**0.6_5 )
_lowerCAmelCase =LambdaLR(lowercase__ , lr_lambda=lambda lowercase__ : epoch**0.6_5 )
# Make a copy of `model`
if sched:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =accelerator.prepare(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
else:
_lowerCAmelCase , _lowerCAmelCase =accelerator.prepare(lowercase__ , lowercase__ )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def snake_case_ ( lowercase__ : Union[str, Any] ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =get_training_setup(lowercase__ )
# Use a single batch
_lowerCAmelCase , _lowerCAmelCase =next(iter(lowercase__ ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
_lowerCAmelCase , _lowerCAmelCase =accelerator.gather((ddp_input, ddp_target) )
_lowerCAmelCase , _lowerCAmelCase =input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(lowercase__ ):
step_model(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
else:
# Sync grads
step_model(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad ), f"Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"
# Shuffle ddp_input on each iteration
torch.manual_seed(13_37 + iteration )
_lowerCAmelCase =ddp_input[torch.randperm(len(lowercase__ ) )]
def snake_case_ ( lowercase__ : List[str] ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =get_training_setup(lowercase__ )
# Use a single batch
_lowerCAmelCase , _lowerCAmelCase =next(iter(lowercase__ ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
_lowerCAmelCase , _lowerCAmelCase =accelerator.gather((ddp_input, ddp_target) )
_lowerCAmelCase , _lowerCAmelCase =input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(lowercase__ ):
step_model(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
else:
# Sync grads
step_model(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), f"Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})"
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), f"Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"
# Shuffle ddp_input on each iteration
torch.manual_seed(13_37 + iteration )
_lowerCAmelCase =ddp_input[torch.randperm(len(lowercase__ ) )]
def snake_case_ ( lowercase__ : Optional[Any]=False , lowercase__ : List[str]=False ):
'''simple docstring'''
_lowerCAmelCase =Accelerator(
split_batches=lowercase__ , dispatch_batches=lowercase__ , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =get_training_setup(lowercase__ )
for iteration, batch in enumerate(lowercase__ ):
_lowerCAmelCase , _lowerCAmelCase =batch.values()
# Gather the distributed inputs and targs for the base model
_lowerCAmelCase , _lowerCAmelCase =accelerator.gather((ddp_input, ddp_target) )
_lowerCAmelCase , _lowerCAmelCase =input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(lowercase__ ):
step_model(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(lowercase__ ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), f"Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), f"Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})"
# Shuffle ddp_input on each iteration
torch.manual_seed(13_37 + iteration )
_lowerCAmelCase =ddp_input[torch.randperm(len(lowercase__ ) )]
GradientState._reset_state()
def snake_case_ ( lowercase__ : int=False , lowercase__ : Dict=False ):
'''simple docstring'''
_lowerCAmelCase =Accelerator(
split_batches=lowercase__ , dispatch_batches=lowercase__ , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =get_training_setup(lowercase__ , lowercase__ )
for iteration, batch in enumerate(lowercase__ ):
_lowerCAmelCase , _lowerCAmelCase =batch.values()
# Gather the distributed inputs and targs for the base model
_lowerCAmelCase , _lowerCAmelCase =accelerator.gather((ddp_input, ddp_target) )
_lowerCAmelCase , _lowerCAmelCase =input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(lowercase__ )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(lowercase__ ):
step_model(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), f"Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]['lr']}\nDDP opt: {ddp_opt.param_groups[0]['lr']}\n"
_lowerCAmelCase =(((iteration + 1) % 2) == 0) or ((iteration + 1) == len(lowercase__ ))
if accelerator.num_processes > 1:
check_model_parameters(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
# Shuffle ddp_input on each iteration
torch.manual_seed(13_37 + iteration )
GradientState._reset_state()
def snake_case_ ( ):
'''simple docstring'''
_lowerCAmelCase =Accelerator()
_lowerCAmelCase =RegressionDataset(length=80 )
_lowerCAmelCase =DataLoader(lowercase__ , batch_size=16 )
_lowerCAmelCase =RegressionDataset(length=96 )
_lowerCAmelCase =DataLoader(lowercase__ , batch_size=16 )
_lowerCAmelCase , _lowerCAmelCase =accelerator.prepare(lowercase__ , lowercase__ )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(lowercase__ ):
assert id(accelerator.gradient_state.active_dataloader ) == id(lowercase__ )
if iteration < len(lowercase__ ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(lowercase__ ):
assert id(accelerator.gradient_state.active_dataloader ) == id(lowercase__ )
if batch_num < len(lowercase__ ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def snake_case_ ( ):
'''simple docstring'''
_lowerCAmelCase =Accelerator()
_lowerCAmelCase =accelerator.state
if state.local_process_index == 0:
print("""**Test `accumulate` gradient accumulation with dataloader break**""" )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print("""**Test NOOP `no_sync` context manager**""" )
test_noop_sync(lowercase__ )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print("""**Test Distributed `no_sync` context manager**""" )
test_distributed_sync(lowercase__ )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation, """ , f"`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**" , )
test_gradient_accumulation(lowercase__ , lowercase__ )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version("""<""" , """2.0""" ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation with optimizer and scheduler, """ , """`split_batches=False`, `dispatch_batches=False`**""" , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation with optimizer and scheduler, """ , f"`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**" , )
test_gradient_accumulation_with_opt_and_scheduler(lowercase__ , lowercase__ )
def snake_case_ ( lowercase__ : Tuple ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 149
| 0
|
import itertools
import random
import unittest
import numpy as np
from transformers import ASTFeatureExtractor
from transformers.testing_utils import require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
__snake_case :List[str] =random.Random()
if is_torch_available():
import torch
def lowerCamelCase_ ( lowerCAmelCase__ : str , lowerCAmelCase__ : List[Any]=1.0 , lowerCAmelCase__ : Any=None , lowerCAmelCase__ : List[str]=None ) -> Optional[Any]:
'''simple docstring'''
if rng is None:
A = global_rng
A = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class lowerCAmelCase__ ( unittest.TestCase ):
def __init__( self : Any , __UpperCamelCase : Tuple , __UpperCamelCase : str=7 , __UpperCamelCase : Any=400 , __UpperCamelCase : int=2_000 , __UpperCamelCase : Optional[Any]=1 , __UpperCamelCase : Tuple=0.0 , __UpperCamelCase : Dict=16_000 , __UpperCamelCase : Any=True , __UpperCamelCase : List[str]=True , ) -> Union[str, Any]:
A = parent
A = batch_size
A = min_seq_length
A = max_seq_length
A = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
A = feature_size
A = padding_value
A = sampling_rate
A = return_attention_mask
A = do_normalize
def __UpperCamelCase ( self : List[Any] ) -> Optional[Any]:
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def __UpperCamelCase ( self : Dict , __UpperCamelCase : Optional[int]=False , __UpperCamelCase : List[Any]=False ) -> str:
def _flatten(__UpperCamelCase : Dict ):
return list(itertools.chain(*UpperCamelCase_ ) )
if equal_length:
A = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
A = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
A = [np.asarray(UpperCamelCase_ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class lowerCAmelCase__ ( lowercase_ , unittest.TestCase ):
A_ : Tuple = ASTFeatureExtractor
def __UpperCamelCase ( self : List[str] ) -> Union[str, Any]:
A = ASTFeatureExtractionTester(self )
def __UpperCamelCase ( self : List[str] ) -> str:
A = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
A = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
A = [np.asarray(UpperCamelCase_ ) for speech_input in speech_inputs]
# Test not batched input
A = feat_extract(speech_inputs[0] , return_tensors='np' ).input_values
A = feat_extract(np_speech_inputs[0] , return_tensors='np' ).input_values
self.assertTrue(np.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1e-3 ) )
# Test batched
A = feat_extract(UpperCamelCase_ , padding=UpperCamelCase_ , return_tensors='np' ).input_values
A = feat_extract(UpperCamelCase_ , padding=UpperCamelCase_ , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(UpperCamelCase_ , UpperCamelCase_ ):
self.assertTrue(np.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
A = [floats_list((1, x) )[0] for x in (800, 800, 800)]
A = np.asarray(UpperCamelCase_ )
A = feat_extract(UpperCamelCase_ , return_tensors='np' ).input_values
A = feat_extract(UpperCamelCase_ , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(UpperCamelCase_ , UpperCamelCase_ ):
self.assertTrue(np.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1e-3 ) )
@require_torch
def __UpperCamelCase ( self : List[str] ) -> Tuple:
import torch
A = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
A = np.random.rand(100 ).astype(np.floataa )
A = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
A = feature_extractor.pad([{'input_values': inputs}] , return_tensors='np' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
A = feature_extractor.pad([{'input_values': inputs}] , return_tensors='pt' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def __UpperCamelCase ( self : List[str] , __UpperCamelCase : Union[str, Any] ) -> Dict:
from datasets import load_dataset
A = load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' )
# automatic decoding with librispeech
A = ds.sort('id' ).select(range(UpperCamelCase_ ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
@require_torch
def __UpperCamelCase ( self : Dict ) -> Optional[int]:
A = torch.tensor(
[-0.9_8_9_4, -1.2_7_7_6, -0.9_0_6_6, -1.2_7_7_6, -0.9_3_4_9, -1.2_6_0_9, -1.0_3_8_6, -1.2_7_7_6,
-1.1_5_6_1, -1.2_7_7_6, -1.2_0_5_2, -1.2_7_2_3, -1.2_1_9_0, -1.2_1_3_2, -1.2_7_7_6, -1.1_1_3_3,
-1.1_9_5_3, -1.1_3_4_3, -1.1_5_8_4, -1.2_2_0_3, -1.1_7_7_0, -1.2_4_7_4, -1.2_3_8_1, -1.1_9_3_6,
-0.9_2_7_0, -0.8_3_1_7, -0.8_0_4_9, -0.7_7_0_6, -0.7_5_6_5, -0.7_8_6_9] )
# fmt: on
A = self._load_datasamples(1 )
A = ASTFeatureExtractor()
A = feature_extractor(UpperCamelCase_ , return_tensors='pt' ).input_values
self.assertEquals(input_values.shape , (1, 1_024, 128) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , UpperCamelCase_ , atol=1e-4 ) )
| 106
|
def _SCREAMING_SNAKE_CASE ( __lowercase : str ) -> str:
"""simple docstring"""
__A = """"""
for ch in key:
if ch == " " or ch not in key_no_dups and ch.isalpha():
key_no_dups += ch
return key_no_dups
def _SCREAMING_SNAKE_CASE ( __lowercase : str ) -> dict[str, str]:
"""simple docstring"""
__A = [chr(i + 6_5 ) for i in range(2_6 )]
# Remove duplicate characters from key
__A = remove_duplicates(key.upper() )
__A = len(__lowercase )
# First fill cipher with key characters
__A = {alphabet[i]: char for i, char in enumerate(__lowercase )}
# Then map remaining characters in alphabet to
# the alphabet from the beginning
for i in range(len(__lowercase ) , 2_6 ):
__A = alphabet[i - offset]
# Ensure we are not mapping letters to letters previously mapped
while char in key:
offset -= 1
__A = alphabet[i - offset]
__A = char
return cipher_alphabet
def _SCREAMING_SNAKE_CASE ( __lowercase : str , __lowercase : dict[str, str] ) -> str:
"""simple docstring"""
return "".join(cipher_map.get(__lowercase , __lowercase ) for ch in message.upper() )
def _SCREAMING_SNAKE_CASE ( __lowercase : str , __lowercase : dict[str, str] ) -> str:
"""simple docstring"""
__A = {v: k for k, v in cipher_map.items()}
return "".join(rev_cipher_map.get(__lowercase , __lowercase ) for ch in message.upper() )
def _SCREAMING_SNAKE_CASE ( ) -> None:
"""simple docstring"""
__A = input("""Enter message to encode or decode: """ ).strip()
__A = input("""Enter keyword: """ ).strip()
__A = input("""Encipher or decipher? E/D:""" ).strip()[0].lower()
try:
__A = {"""e""": encipher, """d""": decipher}[option]
except KeyError:
raise KeyError("""invalid input option""" )
__A = create_cipher_map(__lowercase )
print(func(__lowercase , __lowercase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 637
| 0
|
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline
else:
from .pipeline_kandinsky import KandinskyPipeline
from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline
from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline
from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput
from .text_encoder import MultilingualCLIP
| 703
|
import os
import unittest
from transformers import FunnelTokenizer, FunnelTokenizerFast
from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __A ( a , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase__ : Optional[int] =FunnelTokenizer
UpperCamelCase__ : Any =FunnelTokenizerFast
UpperCamelCase__ : List[Any] =True
UpperCamelCase__ : Any =True
def __lowercase ( self ):
"""simple docstring"""
super().setUp()
__UpperCamelCase : str =[
'<unk>',
'<cls>',
'<sep>',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
__UpperCamelCase : str =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def __lowercase ( self , **lowerCamelCase__ ):
"""simple docstring"""
return FunnelTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase__ )
def __lowercase ( self , **lowerCamelCase__ ):
"""simple docstring"""
return FunnelTokenizerFast.from_pretrained(self.tmpdirname , **lowerCamelCase__ )
def __lowercase ( self , lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase : Optional[int] ='UNwant\u00E9d,running'
__UpperCamelCase : Optional[Any] ='unwanted, running'
return input_text, output_text
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Union[str, Any] =self.tokenizer_class(self.vocab_file )
__UpperCamelCase : List[Any] =tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(lowerCamelCase__ , ['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) , [7, 4, 5, 10, 8, 9] )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : List[str] =self.get_tokenizers(do_lower_case=lowerCamelCase__ )
for tokenizer in tokenizers:
__UpperCamelCase : str =tokenizer('UNwant\u00E9d,running' )
__UpperCamelCase : str =len(inputs['input_ids'] ) - 1
self.assertListEqual(inputs['token_type_ids'] , [2] + [0] * sentence_len )
__UpperCamelCase : List[Any] =tokenizer('UNwant\u00E9d,running' , 'UNwant\u00E9d,running' )
self.assertListEqual(inputs['token_type_ids'] , [2] + [0] * sentence_len + [1] * sentence_len )
| 154
| 0
|
"""simple docstring"""
from __future__ import annotations
import time
snake_case = list[tuple[int, int]]
snake_case = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
snake_case = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
class UpperCAmelCase :
def __init__( self : Any , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : Node | None ):
"""simple docstring"""
_snake_case = pos_x
_snake_case = pos_y
_snake_case = (pos_y, pos_x)
_snake_case = goal_x
_snake_case = goal_y
_snake_case = parent
class UpperCAmelCase :
def __init__( self : List[str] , __lowerCamelCase : tuple[int, int] , __lowerCamelCase : tuple[int, int] ):
"""simple docstring"""
_snake_case = Node(start[1] , start[0] , goal[1] , goal[0] , __lowerCamelCase )
_snake_case = Node(goal[1] , goal[0] , goal[1] , goal[0] , __lowerCamelCase )
_snake_case = [self.start]
_snake_case = False
def __UpperCAmelCase ( self : int ):
"""simple docstring"""
while self.node_queue:
_snake_case = self.node_queue.pop(0 )
if current_node.pos == self.target.pos:
_snake_case = True
return self.retrace_path(__lowerCamelCase )
_snake_case = self.get_successors(__lowerCamelCase )
for node in successors:
self.node_queue.append(__lowerCamelCase )
if not self.reached:
return [self.start.pos]
return None
def __UpperCAmelCase ( self : str , __lowerCamelCase : Node ):
"""simple docstring"""
_snake_case = []
for action in delta:
_snake_case = parent.pos_x + action[1]
_snake_case = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(__lowerCamelCase ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(__lowerCamelCase , __lowerCamelCase , self.target.pos_y , self.target.pos_x , __lowerCamelCase ) )
return successors
def __UpperCAmelCase ( self : str , __lowerCamelCase : Node | None ):
"""simple docstring"""
_snake_case = node
_snake_case = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
_snake_case = current_node.parent
path.reverse()
return path
class UpperCAmelCase :
def __init__( self : Tuple , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int] ):
"""simple docstring"""
_snake_case = BreadthFirstSearch(__lowerCamelCase , __lowerCamelCase )
_snake_case = BreadthFirstSearch(__lowerCamelCase , __lowerCamelCase )
_snake_case = False
def __UpperCAmelCase ( self : List[str] ):
"""simple docstring"""
while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue:
_snake_case = self.fwd_bfs.node_queue.pop(0 )
_snake_case = self.bwd_bfs.node_queue.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
_snake_case = True
return self.retrace_bidirectional_path(
__lowerCamelCase , __lowerCamelCase )
_snake_case = current_bwd_node
_snake_case = current_fwd_node
_snake_case = {
self.fwd_bfs: self.fwd_bfs.get_successors(__lowerCamelCase ),
self.bwd_bfs: self.bwd_bfs.get_successors(__lowerCamelCase ),
}
for bfs in [self.fwd_bfs, self.bwd_bfs]:
for node in successors[bfs]:
bfs.node_queue.append(__lowerCamelCase )
if not self.reached:
return [self.fwd_bfs.start.pos]
return None
def __UpperCAmelCase ( self : Union[str, Any] , __lowerCamelCase : Node , __lowerCamelCase : Node ):
"""simple docstring"""
_snake_case = self.fwd_bfs.retrace_path(__lowerCamelCase )
_snake_case = self.bwd_bfs.retrace_path(__lowerCamelCase )
bwd_path.pop()
bwd_path.reverse()
_snake_case = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
import doctest
doctest.testmod()
snake_case = (0, 0)
snake_case = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
snake_case = time.time()
snake_case = BreadthFirstSearch(init, goal)
snake_case = bfs.search()
snake_case = time.time() - start_bfs_time
print('''Unidirectional BFS computation time : ''', bfs_time)
snake_case = time.time()
snake_case = BidirectionalBreadthFirstSearch(init, goal)
snake_case = bd_bfs.search()
snake_case = time.time() - start_bd_bfs_time
print('''Bidirectional BFS computation time : ''', bd_bfs_time)
| 103
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase: List[str] = logging.get_logger(__name__)
_lowerCAmelCase: Tuple = torch.device('cpu')
def _lowercase( ):
a__ ='http://images.cocodataset.org/val2017/000000039769.jpg'
a__ =Image.open(requests.get(__a , stream=__a ).raw )
return im
def _lowercase( __a : Optional[Any] ):
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.1703e00, 2.1107e00, -2.0811e00, 8.8685e-01, 2.4360e-01] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.9636e-01, 2.3478e-01, -1.6963e00, -1.7381e00, -8.6337e-01] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.2768e-01, -4.7429e-01, -1.0897e00, -1.0248e00, 3.5523e-02] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.5330e-01, 2.4211e-01, -6.0185e-01, -8.2789e-01, -6.0446e-02] )
def _lowercase( __a : int , __a : int , __a : Optional[Any] ):
a__ =dct.pop(__a )
a__ =val
def _lowercase( __a : Optional[Any] ):
a__ =[]
for k in state_dict.keys():
a__ =k
if ".pwconv" in k:
a__ =k_new.replace('.pwconv' , '.point_wise_conv' )
if ".dwconv" in k:
a__ =k_new.replace('.dwconv' , '.depth_wise_conv' )
if ".Proj." in k:
a__ =k_new.replace('.Proj.' , '.proj.' )
if "patch_embed" in k_new:
a__ =k_new.replace('patch_embed' , 'swiftformer.patch_embed.patch_embedding' )
if "network" in k_new:
a__ =k_new.split('.' )
if ls[2].isdigit():
a__ ='swiftformer.encoder.network.' + ls[1] + '.blocks.' + ls[2] + '.' + '.'.join(ls[3:] )
else:
a__ =k_new.replace('network' , 'swiftformer.encoder.network' )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def _lowercase( __a : Union[str, Any] , __a : int , __a : str ):
a__ =SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
a__ =1000
a__ ='huggingface/label-files'
a__ ='imagenet-1k-id2label.json'
a__ =json.load(open(hf_hub_download(__a , __a , repo_type='dataset' ) , 'r' ) )
a__ ={int(__a ): v for k, v in idalabel.items()}
a__ =idalabel
a__ ={v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
a__ =[3, 3, 6, 4]
a__ =[48, 56, 112, 220]
elif swiftformer_name == "swiftformer_s":
a__ =[3, 3, 9, 6]
a__ =[48, 64, 168, 224]
elif swiftformer_name == "swiftformer_l1":
a__ =[4, 3, 10, 5]
a__ =[48, 96, 192, 384]
elif swiftformer_name == "swiftformer_l3":
a__ =[4, 4, 12, 6]
a__ =[64, 128, 320, 512]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith('https' ):
a__ =torch.hub.load_state_dict_from_url(__a , map_location='cpu' , check_hash=__a )
else:
a__ =torch.load(__a , map_location='cpu' )
a__ =checkpoint
a__ =create_rename_keys(__a )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(__a , __a , __a )
# load HuggingFace model
a__ =SwiftFormerForImageClassification(__a ).eval()
hf_model.load_state_dict(__a )
# prepare test inputs
a__ =prepare_img()
a__ =ViTImageProcessor.from_pretrained('preprocessor_config' )
a__ =processor(images=__a , return_tensors='pt' )
# compare outputs from both models
a__ =get_expected_output(__a )
a__ =hf_model(inputs['pixel_values'] ).logits
assert hf_logits.shape == torch.Size([1, 1000] )
assert torch.allclose(hf_logits[0, 0:5] , __a , atol=1e-3 )
Path(__a ).mkdir(exist_ok=__a )
print(f"""Saving model {swiftformer_name} to {pytorch_dump_folder_path}""" )
hf_model.save_pretrained(__a )
if __name__ == "__main__":
_lowerCAmelCase: Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--swiftformer_name',
default='swiftformer_xs',
choices=['swiftformer_xs', 'swiftformer_s', 'swiftformer_l1', 'swiftformer_l3'],
type=str,
help='Name of the SwiftFormer model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='./converted_outputs/',
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--original_ckpt', default=None, type=str, help='Path to the original model checkpoint.')
_lowerCAmelCase: Optional[int] = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 20
| 0
|
'''simple docstring'''
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def lowerCamelCase_ ( ):
lowerCamelCase__ = ArgumentParser("Transformers CLI tool" , usage="transformers-cli <command> [<args>]")
lowerCamelCase__ = parser.add_subparsers(help="transformers-cli command helpers")
# Register commands
ConvertCommand.register_subcommand(lowercase__)
DownloadCommand.register_subcommand(lowercase__)
EnvironmentCommand.register_subcommand(lowercase__)
RunCommand.register_subcommand(lowercase__)
ServeCommand.register_subcommand(lowercase__)
UserCommands.register_subcommand(lowercase__)
AddNewModelCommand.register_subcommand(lowercase__)
AddNewModelLikeCommand.register_subcommand(lowercase__)
LfsCommands.register_subcommand(lowercase__)
PTtoTFCommand.register_subcommand(lowercase__)
# Let's go
lowerCamelCase__ = parser.parse_args()
if not hasattr(lowercase__ , "func"):
parser.print_help()
exit(1)
# Run
lowerCamelCase__ = args.func(lowercase__)
service.run()
if __name__ == "__main__":
main()
| 187
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__A : Optional[int] = logging.get_logger(__name__)
__A : Tuple = {
"""google/mobilenet_v1_1.0_224""": """https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json""",
"""google/mobilenet_v1_0.75_192""": """https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json""",
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
}
class lowercase ( _lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = "mobilenet_v1"
def __init__( self : Union[str, Any] , __lowerCamelCase : Tuple=3 , __lowerCamelCase : Any=224 , __lowerCamelCase : Tuple=1.0 , __lowerCamelCase : Optional[int]=8 , __lowerCamelCase : str="relu6" , __lowerCamelCase : Any=True , __lowerCamelCase : Union[str, Any]=0.9_9_9 , __lowerCamelCase : List[Any]=0.0_2 , __lowerCamelCase : str=0.0_0_1 , **__lowerCamelCase : str , ) -> Dict:
'''simple docstring'''
super().__init__(**__lowerCamelCase )
if depth_multiplier <= 0:
raise ValueError("depth_multiplier must be greater than zero." )
lowerCamelCase__ = num_channels
lowerCamelCase__ = image_size
lowerCamelCase__ = depth_multiplier
lowerCamelCase__ = min_depth
lowerCamelCase__ = hidden_act
lowerCamelCase__ = tf_padding
lowerCamelCase__ = classifier_dropout_prob
lowerCamelCase__ = initializer_range
lowerCamelCase__ = layer_norm_eps
class lowercase ( _lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = version.parse("1.11" )
@property
def a__ ( self : List[str] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict([("pixel_values", {0: "batch"})] )
@property
def a__ ( self : str ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "image-classification":
return OrderedDict([("logits", {0: "batch"})] )
else:
return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})] )
@property
def a__ ( self : List[Any] ) -> float:
'''simple docstring'''
return 1E-4
| 187
| 1
|
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
lowerCAmelCase__ : int =argparse.ArgumentParser()
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument(
'--txt2img_unclip',
default='kakaobrain/karlo-v1-alpha',
type=str,
required=False,
help='The pretrained txt2img unclip.',
)
lowerCAmelCase__ : List[Any] =parser.parse_args()
lowerCAmelCase__ : Dict =UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
lowerCAmelCase__ : Any =CLIPImageProcessor()
lowerCAmelCase__ : Tuple =CLIPVisionModelWithProjection.from_pretrained('openai/clip-vit-large-patch14')
lowerCAmelCase__ : List[Any] =UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 101
|
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def UpperCamelCase_( __magic_name__ : str , __magic_name__ : str ):
"""simple docstring"""
_lowerCAmelCase :Optional[int] = list(__magic_name__ )
_lowerCAmelCase :Dict = list(__magic_name__ )
_lowerCAmelCase :Any = 0
for i in range(len(__magic_name__ ) ):
if lista[i] != lista[i]:
count += 1
_lowerCAmelCase :Union[str, Any] = '_'
if count > 1:
return False
else:
return "".join(__magic_name__ )
def UpperCamelCase_( __magic_name__ : list[str] ):
"""simple docstring"""
_lowerCAmelCase :int = []
while True:
_lowerCAmelCase :str = ['$'] * len(__magic_name__ )
_lowerCAmelCase :Optional[int] = []
for i in range(len(__magic_name__ ) ):
for j in range(i + 1 , len(__magic_name__ ) ):
_lowerCAmelCase :int = compare_string(binary[i] , binary[j] )
if k is False:
_lowerCAmelCase :str = '*'
_lowerCAmelCase :Union[str, Any] = '*'
temp.append('X' )
for i in range(len(__magic_name__ ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(__magic_name__ ) == 0:
return pi
_lowerCAmelCase :Any = list(set(__magic_name__ ) )
def UpperCamelCase_( __magic_name__ : int , __magic_name__ : Sequence[float] ):
"""simple docstring"""
_lowerCAmelCase :str = []
for minterm in minterms:
_lowerCAmelCase :Any = ''
for _ in range(__magic_name__ ):
_lowerCAmelCase :Tuple = str(minterm % 2 ) + string
minterm //= 2
temp.append(__magic_name__ )
return temp
def UpperCamelCase_( __magic_name__ : str , __magic_name__ : str , __magic_name__ : int ):
"""simple docstring"""
_lowerCAmelCase :Optional[Any] = list(__magic_name__ )
_lowerCAmelCase :List[Any] = list(__magic_name__ )
_lowerCAmelCase :Optional[Any] = 0
for i in range(len(__magic_name__ ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def UpperCamelCase_( __magic_name__ : list[list[int]] , __magic_name__ : list[str] ):
"""simple docstring"""
_lowerCAmelCase :str = []
_lowerCAmelCase :List[str] = [0] * len(__magic_name__ )
for i in range(len(chart[0] ) ):
_lowerCAmelCase :Dict = 0
_lowerCAmelCase :Optional[Any] = -1
for j in range(len(__magic_name__ ) ):
if chart[j][i] == 1:
count += 1
_lowerCAmelCase :List[Any] = j
if count == 1:
_lowerCAmelCase :Dict = 1
for i in range(len(__magic_name__ ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(__magic_name__ ) ):
_lowerCAmelCase :Dict = 0
temp.append(prime_implicants[i] )
while True:
_lowerCAmelCase :Dict = 0
_lowerCAmelCase :Any = -1
_lowerCAmelCase :Optional[Any] = 0
for i in range(len(__magic_name__ ) ):
_lowerCAmelCase :str = chart[i].count(1 )
if count_n > max_n:
_lowerCAmelCase :Optional[Any] = count_n
_lowerCAmelCase :Dict = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(__magic_name__ ) ):
_lowerCAmelCase :str = 0
def UpperCamelCase_( __magic_name__ : list[str] , __magic_name__ : list[str] ):
"""simple docstring"""
_lowerCAmelCase :str = [[0 for x in range(len(__magic_name__ ) )] for x in range(len(__magic_name__ ) )]
for i in range(len(__magic_name__ ) ):
_lowerCAmelCase :Tuple = prime_implicants[i].count('_' )
for j in range(len(__magic_name__ ) ):
if is_for_table(prime_implicants[i] , binary[j] , __magic_name__ ):
_lowerCAmelCase :str = 1
return chart
def UpperCamelCase_( ):
"""simple docstring"""
_lowerCAmelCase :Tuple = int(input('Enter the no. of variables\n' ) )
_lowerCAmelCase :Tuple = [
float(__magic_name__ )
for x in input(
'Enter the decimal representation of Minterms \'Spaces Separated\'\n' ).split()
]
_lowerCAmelCase :List[str] = decimal_to_binary(__magic_name__ , __magic_name__ )
_lowerCAmelCase :Any = check(__magic_name__ )
print('Prime Implicants are:' )
print(__magic_name__ )
_lowerCAmelCase :List[Any] = prime_implicant_chart(__magic_name__ , __magic_name__ )
_lowerCAmelCase :Tuple = selection(__magic_name__ , __magic_name__ )
print('Essential Prime Implicants are:' )
print(__magic_name__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 687
| 0
|
def __lowerCamelCase(UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : int = 1_0_0_0 ):
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = 0
for divide_by_number in range(UpperCAmelCase__ , digit + 1 ):
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = numerator
for _ in range(1 , digit + 1 ):
if now_divide in has_been_divided:
if longest_list_length < len(UpperCAmelCase__ ):
SCREAMING_SNAKE_CASE = len(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = divide_by_number
else:
has_been_divided.append(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = now_divide * 1_0 % divide_by_number
return the_digit
# Tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 721
|
import collections
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowerCamelCase : Dict = logging.get_logger(__name__)
_lowerCamelCase : List[Any] = '''▁'''
_lowerCamelCase : Optional[int] = {'''vocab_file''': '''prophetnet.tokenizer'''}
_lowerCamelCase : str = {
'''vocab_file''': {
'''microsoft/xprophetnet-large-wiki100-cased''': (
'''https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/prophetnet.tokenizer'''
),
}
}
_lowerCamelCase : Optional[Any] = {
'''microsoft/xprophetnet-large-wiki100-cased''': {'''do_lower_case''': False},
}
_lowerCamelCase : Optional[Any] = {
'''microsoft/xprophetnet-large-wiki100-cased''': 5_12,
}
def __lowerCamelCase (UpperCAmelCase__ : Optional[Any] ):
SCREAMING_SNAKE_CASE = collections.OrderedDict()
with open(UpperCAmelCase__ , "r" , encoding="utf-8" ) as reader:
SCREAMING_SNAKE_CASE = reader.readlines()
for index, token in enumerate(UpperCAmelCase__ ):
SCREAMING_SNAKE_CASE = token.rstrip("\n" )
SCREAMING_SNAKE_CASE = index
return vocab
class lowercase ( a ):
lowercase__ : Optional[int] = VOCAB_FILES_NAMES
lowercase__ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
lowercase__ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ : Any = ["""input_ids""", """attention_mask"""]
def __init__( self : Optional[int] , _UpperCamelCase : List[str] , _UpperCamelCase : str="[SEP]" , _UpperCamelCase : str="[SEP]" , _UpperCamelCase : Dict="[SEP]" , _UpperCamelCase : Tuple="[UNK]" , _UpperCamelCase : Dict="[PAD]" , _UpperCamelCase : Any="[CLS]" , _UpperCamelCase : Optional[Any]="[MASK]" , _UpperCamelCase : Optional[Dict[str, Any]] = None , **_UpperCamelCase : Dict , ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_UpperCamelCase , eos_token=_UpperCamelCase , sep_token=_UpperCamelCase , unk_token=_UpperCamelCase , pad_token=_UpperCamelCase , cls_token=_UpperCamelCase , mask_token=_UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , **_UpperCamelCase , )
try:
import sentencepiece as spm
except ImportError:
logger.warning(
"You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece"
" pip install sentencepiece" )
raise
SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_UpperCamelCase ) )
SCREAMING_SNAKE_CASE = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# put special tokens and [unused] tokens into the vocab
SCREAMING_SNAKE_CASE = {"[PAD]": 0, "[CLS]": 1, "[SEP]": 2, "[UNK]": 3, "[MASK]": 4}
for i in range(10 ):
SCREAMING_SNAKE_CASE = F"[unused{i}]"
SCREAMING_SNAKE_CASE = 5 + i
# The first "real" token "," has position 15 in the embedding vocab and position 3 in the spm vocab
SCREAMING_SNAKE_CASE = 12
SCREAMING_SNAKE_CASE = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
for k in self.fairseq_tokens_to_ids.keys():
self.unique_no_split_tokens.append(_UpperCamelCase )
def __getstate__( self : Dict ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.__dict__.copy()
SCREAMING_SNAKE_CASE = None
return state
def __setstate__( self : List[Any] , _UpperCamelCase : Union[str, Any] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = d
try:
import sentencepiece as spm
except ImportError:
logger.warning(
"You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece"
" pip install sentencepiece" )
raise
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
SCREAMING_SNAKE_CASE = {}
SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __snake_case( self : Dict , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None , _UpperCamelCase : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCamelCase , token_ids_a=_UpperCamelCase , already_has_special_tokens=_UpperCamelCase )
if token_ids_a is None:
return ([0] * len(_UpperCamelCase )) + [1]
return ([0] * len(_UpperCamelCase )) + [1] + ([0] * len(_UpperCamelCase )) + [1]
def __snake_case( self : str , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = [self.sep_token_id]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0]
return len(token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def __snake_case( self : Dict ) -> Optional[Any]:
'''simple docstring'''
return len(self.sp_model ) + self.fairseq_offset
def __snake_case( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = {self.convert_ids_to_tokens(_UpperCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __snake_case( self : Union[str, Any] , _UpperCamelCase : str ) -> str:
'''simple docstring'''
return self.sp_model.encode(_UpperCamelCase , out_type=_UpperCamelCase )
def __snake_case( self : Optional[Any] , _UpperCamelCase : List[Any] ) -> List[str]:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
SCREAMING_SNAKE_CASE = self.sp_model.PieceToId(_UpperCamelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def __snake_case( self : str , _UpperCamelCase : str ) -> int:
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __snake_case( self : List[str] , _UpperCamelCase : Dict ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = "".join(_UpperCamelCase ).replace(_UpperCamelCase , " " ).strip()
return out_string
def __snake_case( self : Optional[Any] , _UpperCamelCase : str , _UpperCamelCase : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(_UpperCamelCase ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
SCREAMING_SNAKE_CASE = os.path.join(
_UpperCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _UpperCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_UpperCamelCase , "wb" ) as fi:
SCREAMING_SNAKE_CASE = self.sp_model.serialized_model_proto()
fi.write(_UpperCamelCase )
return (out_vocab_file,)
def __snake_case( self : Optional[Any] , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return token_ids_a + [self.sep_token_id]
SCREAMING_SNAKE_CASE = [self.sep_token_id]
return token_ids_a + sep + token_ids_a + sep
| 647
| 0
|
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
_snake_case = logging.getLogger()
@unittest.skip("Temporarily disable the doc tests." )
@require_torch
@require_tf
@slow
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = True , ) -> Tuple:
__UpperCamelCase = [file for file in os.listdir(_SCREAMING_SNAKE_CASE ) if os.path.isfile(os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )]
if identifier is not None:
__UpperCamelCase = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
for n_ in n_identifier:
__UpperCamelCase = [file for file in files if n_ not in file]
else:
__UpperCamelCase = [file for file in files if n_identifier not in file]
__UpperCamelCase = ignore_files or []
ignore_files.append('__init__.py' )
__UpperCamelCase = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print('Testing' , _SCREAMING_SNAKE_CASE )
if only_modules:
__UpperCamelCase = file.split('.' )[0]
try:
__UpperCamelCase = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__UpperCamelCase = doctest.DocTestSuite(_SCREAMING_SNAKE_CASE )
__UpperCamelCase = unittest.TextTestRunner().run(_SCREAMING_SNAKE_CASE )
self.assertIs(len(result.failures ) , 0 )
except AttributeError:
logger.info(f"""{module_identifier} is not a module.""" )
else:
__UpperCamelCase = doctest.testfile(str('..' / directory / file ) , optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed , 0 )
def __lowercase( self ) -> List[str]:
__UpperCamelCase = Path('src/transformers' )
__UpperCamelCase = 'modeling'
__UpperCamelCase = [
'modeling_ctrl.py',
'modeling_tf_ctrl.py',
]
self.analyze_directory(_SCREAMING_SNAKE_CASE , identifier=_SCREAMING_SNAKE_CASE , ignore_files=_SCREAMING_SNAKE_CASE )
def __lowercase( self ) -> Optional[Any]:
__UpperCamelCase = Path('src/transformers' )
__UpperCamelCase = 'tokenization'
self.analyze_directory(_SCREAMING_SNAKE_CASE , identifier=_SCREAMING_SNAKE_CASE )
def __lowercase( self ) -> List[str]:
__UpperCamelCase = Path('src/transformers' )
__UpperCamelCase = 'configuration'
self.analyze_directory(_SCREAMING_SNAKE_CASE , identifier=_SCREAMING_SNAKE_CASE )
def __lowercase( self ) -> Tuple:
__UpperCamelCase = Path('src/transformers' )
__UpperCamelCase = ['configuration', 'modeling', 'tokenization']
self.analyze_directory(_SCREAMING_SNAKE_CASE , n_identifier=_SCREAMING_SNAKE_CASE )
def __lowercase( self ) -> List[Any]:
__UpperCamelCase = Path('docs/source' )
__UpperCamelCase = ['favicon.ico']
self.analyze_directory(_SCREAMING_SNAKE_CASE , ignore_files=_SCREAMING_SNAKE_CASE , only_modules=_SCREAMING_SNAKE_CASE )
| 383
|
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class lowerCAmelCase_ ( _lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ = BarthezTokenizer
UpperCAmelCase__ = BarthezTokenizerFast
UpperCAmelCase__ = True
UpperCAmelCase__ = True
def __lowercase( self ) -> Union[str, Any]:
super().setUp()
__UpperCamelCase = BarthezTokenizerFast.from_pretrained('moussaKam/mbarthez' )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=_SCREAMING_SNAKE_CASE )
__UpperCamelCase = tokenizer
def __lowercase( self ) -> Union[str, Any]:
__UpperCamelCase = '<pad>'
__UpperCamelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
def __lowercase( self ) -> Optional[Any]:
__UpperCamelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-1] , '<mask>' )
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , 101_122 )
def __lowercase( self ) -> Any:
self.assertEqual(self.get_tokenizer().vocab_size , 101_122 )
@require_torch
def __lowercase( self ) -> Optional[Any]:
__UpperCamelCase = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
__UpperCamelCase = [0, 57, 3_018, 70_307, 91, 2]
__UpperCamelCase = self.tokenizer(
_SCREAMING_SNAKE_CASE , max_length=len(_SCREAMING_SNAKE_CASE ) , padding=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE , return_tensors='pt' )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
__UpperCamelCase = batch.input_ids.tolist()[0]
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __lowercase( self ) -> int:
if not self.test_rust_tokenizer:
return
__UpperCamelCase = self.get_tokenizer()
__UpperCamelCase = self.get_rust_tokenizer()
__UpperCamelCase = 'I was born in 92000, and this is falsé.'
__UpperCamelCase = tokenizer.tokenize(_SCREAMING_SNAKE_CASE )
__UpperCamelCase = rust_tokenizer.tokenize(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__UpperCamelCase = tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE )
__UpperCamelCase = rust_tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__UpperCamelCase = self.get_rust_tokenizer()
__UpperCamelCase = tokenizer.encode(_SCREAMING_SNAKE_CASE )
__UpperCamelCase = rust_tokenizer.encode(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def __lowercase( self ) -> List[str]:
# fmt: off
__UpperCamelCase = {'input_ids': [[0, 490, 14_328, 4_507, 354, 47, 43_669, 95, 25, 78_117, 20_215, 19_779, 190, 22, 400, 4, 35_343, 80_310, 603, 86, 24_937, 105, 33_438, 94_762, 196, 39_642, 7, 15, 15_933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 10_534, 87, 25, 66, 3_358, 196, 55_289, 8, 82_961, 81, 2_204, 75_203, 7, 15, 763, 12_956, 216, 178, 14_328, 9_595, 1_377, 69_693, 7, 448, 71_021, 196, 18_106, 1_437, 13_974, 108, 9_083, 4, 49_315, 7, 39, 86, 1_326, 2_793, 46_333, 4, 448, 196, 74_588, 7, 49_315, 7, 39, 21, 822, 38_470, 74, 21, 66_723, 62_480, 8, 22_050, 5, 2]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
__UpperCamelCase = [
'Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '
'utilisé principalement dans le domaine du traitement automatique des langues (TAL).',
'À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '
'pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '
'telles que la traduction et la synthèse de texte.',
]
self.tokenizer_integration_test_util(
expected_encoding=_SCREAMING_SNAKE_CASE , model_name='moussaKam/mbarthez' , revision='c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6' , sequences=_SCREAMING_SNAKE_CASE , )
| 383
| 1
|
'''simple docstring'''
import argparse
import re
import numpy as np
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SamConfig,
SamImageProcessor,
SamModel,
SamProcessor,
SamVisionConfig,
)
snake_case__ : Tuple = {
"""iou_prediction_head.layers.0""": """iou_prediction_head.proj_in""",
"""iou_prediction_head.layers.1""": """iou_prediction_head.layers.0""",
"""iou_prediction_head.layers.2""": """iou_prediction_head.proj_out""",
"""mask_decoder.output_upscaling.0""": """mask_decoder.upscale_conv1""",
"""mask_decoder.output_upscaling.1""": """mask_decoder.upscale_layer_norm""",
"""mask_decoder.output_upscaling.3""": """mask_decoder.upscale_conv2""",
"""mask_downscaling.0""": """mask_embed.conv1""",
"""mask_downscaling.1""": """mask_embed.layer_norm1""",
"""mask_downscaling.3""": """mask_embed.conv2""",
"""mask_downscaling.4""": """mask_embed.layer_norm2""",
"""mask_downscaling.6""": """mask_embed.conv3""",
"""point_embeddings""": """point_embed""",
"""pe_layer.positional_encoding_gaussian_matrix""": """shared_embedding.positional_embedding""",
"""image_encoder""": """vision_encoder""",
"""neck.0""": """neck.conv1""",
"""neck.1""": """neck.layer_norm1""",
"""neck.2""": """neck.conv2""",
"""neck.3""": """neck.layer_norm2""",
"""patch_embed.proj""": """patch_embed.projection""",
""".norm""": """.layer_norm""",
"""blocks""": """layers""",
}
def snake_case_ ( _SCREAMING_SNAKE_CASE ):
__lowercase = {}
state_dict.pop("pixel_mean" , _lowerCamelCase )
state_dict.pop("pixel_std" , _lowerCamelCase )
__lowercase = r""".*.output_hypernetworks_mlps.(\d+).layers.(\d+).*"""
for key, value in state_dict.items():
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
__lowercase = key.replace(_lowerCamelCase , _lowerCamelCase )
if re.match(_lowerCamelCase , _lowerCamelCase ):
__lowercase = int(re.match(_lowerCamelCase , _lowerCamelCase ).group(2 ) )
if layer_nb == 0:
__lowercase = key.replace("layers.0" , "proj_in" )
elif layer_nb == 1:
__lowercase = key.replace("layers.1" , "layers.0" )
elif layer_nb == 2:
__lowercase = key.replace("layers.2" , "proj_out" )
__lowercase = value
__lowercase = model_state_dict[
"""prompt_encoder.shared_embedding.positional_embedding"""
]
return model_state_dict
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="ybelkada/segment-anything" ):
__lowercase = hf_hub_download(_lowerCamelCase , F"""checkpoints/{model_name}.pth""" )
if "sam_vit_b" in model_name:
__lowercase = SamConfig()
elif "sam_vit_l" in model_name:
__lowercase = SamVisionConfig(
hidden_size=1_0_2_4 , num_hidden_layers=2_4 , num_attention_heads=1_6 , global_attn_indexes=[5, 1_1, 1_7, 2_3] , )
__lowercase = SamConfig(
vision_config=_lowerCamelCase , )
elif "sam_vit_h" in model_name:
__lowercase = SamVisionConfig(
hidden_size=1_2_8_0 , num_hidden_layers=3_2 , num_attention_heads=1_6 , global_attn_indexes=[7, 1_5, 2_3, 3_1] , )
__lowercase = SamConfig(
vision_config=_lowerCamelCase , )
__lowercase = torch.load(_lowerCamelCase , map_location="cpu" )
__lowercase = replace_keys(_lowerCamelCase )
__lowercase = SamImageProcessor()
__lowercase = SamProcessor(image_processor=_lowerCamelCase )
__lowercase = SamModel(_lowerCamelCase )
hf_model.load_state_dict(_lowerCamelCase )
__lowercase = hf_model.to("cuda" )
__lowercase = """https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png"""
__lowercase = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw ).convert("RGB" )
__lowercase = [[[4_0_0, 6_5_0]]]
__lowercase = [[1]]
__lowercase = processor(images=np.array(_lowerCamelCase ) , return_tensors="pt" ).to("cuda" )
with torch.no_grad():
__lowercase = hf_model(**_lowerCamelCase )
__lowercase = output.iou_scores.squeeze()
if model_name == "sam_vit_h_4b8939":
assert scores[-1].item() == 0.5_7_9_8_9_0_2_5_1_1_5_9_6_6_8
__lowercase = processor(
images=np.array(_lowerCamelCase ) , input_points=_lowerCamelCase , input_labels=_lowerCamelCase , return_tensors="pt" ).to("cuda" )
with torch.no_grad():
__lowercase = hf_model(**_lowerCamelCase )
__lowercase = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9_7_1_2_6_0_3_0_9_2_1_9_3_6_0_4
__lowercase = ((7_5, 2_7_5, 1_7_2_5, 8_5_0),)
__lowercase = processor(images=np.array(_lowerCamelCase ) , input_boxes=_lowerCamelCase , return_tensors="pt" ).to("cuda" )
with torch.no_grad():
__lowercase = hf_model(**_lowerCamelCase )
__lowercase = output.iou_scores.squeeze()
assert scores[-1].item() == 0.8_6_8_6_0_1_5_6_0_5_9_2_6_5_1_4
# Test with 2 points and 1 image.
__lowercase = [[[4_0_0, 6_5_0], [8_0_0, 6_5_0]]]
__lowercase = [[1, 1]]
__lowercase = processor(
images=np.array(_lowerCamelCase ) , input_points=_lowerCamelCase , input_labels=_lowerCamelCase , return_tensors="pt" ).to("cuda" )
with torch.no_grad():
__lowercase = hf_model(**_lowerCamelCase )
__lowercase = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9_9_3_6_0_4_7_7_9_2_4_3_4_6_9_2
if __name__ == "__main__":
snake_case__ : int = argparse.ArgumentParser()
snake_case__ : List[Any] = ["""sam_vit_b_01ec64""", """sam_vit_h_4b8939""", """sam_vit_l_0b3195"""]
parser.add_argument(
"""--model_name""",
default="""sam_vit_h_4b8939""",
choices=choices,
type=str,
help="""Path to hf config.json of model to convert""",
)
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model and processor to the hub after converting""",
)
parser.add_argument(
"""--model_hub_id""",
default="""ybelkada/segment-anything""",
choices=choices,
type=str,
help="""Path to hf config.json of model to convert""",
)
snake_case__ : Optional[int] = parser.parse_args()
convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
| 719
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case__ : List[str] = {
"""configuration_mgp_str""": ["""MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MgpstrConfig"""],
"""processing_mgp_str""": ["""MgpstrProcessor"""],
"""tokenization_mgp_str""": ["""MgpstrTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : Dict = [
"""MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MgpstrModel""",
"""MgpstrPreTrainedModel""",
"""MgpstrForSceneTextRecognition""",
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
snake_case__ : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 655
| 0
|
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase : int = {"""configuration_mmbt""": ["""MMBTConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : List[Any] = ["""MMBTForClassification""", """MMBTModel""", """ModalEmbeddings"""]
if TYPE_CHECKING:
from .configuration_mmbt import MMBTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings
else:
import sys
UpperCAmelCase : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 563
|
import argparse
import logging
import pickle
from collections import Counter
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO
)
UpperCAmelCase : Union[str, Any] = logging.getLogger(__name__)
if __name__ == "__main__":
UpperCAmelCase : str = argparse.ArgumentParser(
description="""Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)"""
)
parser.add_argument(
"""--data_file""", type=str, default="""data/dump.bert-base-uncased.pickle""", help="""The binarized dataset."""
)
parser.add_argument(
"""--token_counts_dump""", type=str, default="""data/token_counts.bert-base-uncased.pickle""", help="""The dump file."""
)
parser.add_argument("""--vocab_size""", default=30522, type=int)
UpperCAmelCase : str = parser.parse_args()
logger.info(F"""Loading data from {args.data_file}""")
with open(args.data_file, """rb""") as fp:
UpperCAmelCase : Union[str, Any] = pickle.load(fp)
logger.info("""Counting occurrences for MLM.""")
UpperCAmelCase : List[str] = Counter()
for tk_ids in data:
counter.update(tk_ids)
UpperCAmelCase : Tuple = [0] * args.vocab_size
for k, v in counter.items():
UpperCAmelCase : Optional[int] = v
logger.info(F"""Dump to {args.token_counts_dump}""")
with open(args.token_counts_dump, """wb""") as handle:
pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
| 563
| 1
|
import json
import sys
def __UpperCamelCase ( _A : Any , _A : Any ) ->Optional[Any]:
"""simple docstring"""
with open(_A , encoding="""utf-8""" ) as f:
lowerCamelCase_ =json.load(_A )
lowerCamelCase_ =["""<details>""", """<summary>Show updated benchmarks!</summary>""", """ """]
for benchmark_name in sorted(_A ):
lowerCamelCase_ =results[benchmark_name]
lowerCamelCase_ =benchmark_name.split("""/""" )[-1]
output_md.append(f'### Benchmark: {benchmark_file_name}' )
lowerCamelCase_ ="""| metric |"""
lowerCamelCase_ ="""|--------|"""
lowerCamelCase_ ="""| new / old (diff) |"""
for metric_name in sorted(_A ):
lowerCamelCase_ =benchmark_res[metric_name]
lowerCamelCase_ =metric_vals["""new"""]
lowerCamelCase_ =metric_vals.get("""old""" , _A )
lowerCamelCase_ =metric_vals.get("""diff""" , _A )
lowerCamelCase_ =f' {new_val:f}' if isinstance(_A , (int, float) ) else """None"""
if old_val is not None:
val_str += f' / {old_val:f}' if isinstance(_A , (int, float) ) else "None"
if dif_val is not None:
val_str += f' ({dif_val:f})' if isinstance(_A , (int, float) ) else "None"
title += " " + metric_name + " |"
lines += "---|"
value += val_str + " |"
output_md += [title, lines, value, " "]
output_md.append("""</details>""" )
with open(_A , """w""" , encoding="""utf-8""" ) as f:
f.writelines("""\n""".join(_A ) )
if __name__ == "__main__":
__A : Dict = sys.argv[1]
__A : Dict = sys.argv[2]
format_json_to_md(input_json_file, output_md_file)
| 75
|
def __UpperCamelCase ( _A : str , _A : int ) ->str:
"""simple docstring"""
lowerCamelCase_ =[[] for _ in range(_A )]
lowerCamelCase_ =key - 1
if key <= 0:
raise ValueError("""Height of grid can't be 0 or negative""" )
if key == 1 or len(_A ) <= key:
return input_string
for position, character in enumerate(_A ):
lowerCamelCase_ =position % (lowest * 2) # puts it in bounds
lowerCamelCase_ =min(_A , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append(_A )
lowerCamelCase_ =["""""".join(_A ) for row in temp_grid]
lowerCamelCase_ ="""""".join(_A )
return output_string
def __UpperCamelCase ( _A : str , _A : int ) ->str:
"""simple docstring"""
lowerCamelCase_ =[]
lowerCamelCase_ =key - 1
if key <= 0:
raise ValueError("""Height of grid can't be 0 or negative""" )
if key == 1:
return input_string
lowerCamelCase_ =[[] for _ in range(_A )] # generates template
for position in range(len(_A ) ):
lowerCamelCase_ =position % (lowest * 2) # puts it in bounds
lowerCamelCase_ =min(_A , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append("""*""" )
lowerCamelCase_ =0
for row in temp_grid: # fills in the characters
lowerCamelCase_ =input_string[counter : counter + len(_A )]
grid.append(list(_A ) )
counter += len(_A )
lowerCamelCase_ ="""""" # reads as zigzag
for position in range(len(_A ) ):
lowerCamelCase_ =position % (lowest * 2) # puts it in bounds
lowerCamelCase_ =min(_A , lowest * 2 - num ) # creates zigzag pattern
output_string += grid[num][0]
grid[num].pop(0 )
return output_string
def __UpperCamelCase ( _A : str ) ->dict[int, str]:
"""simple docstring"""
lowerCamelCase_ ={}
for key_guess in range(1 , len(_A ) ): # tries every key
lowerCamelCase_ =decrypt(_A , _A )
return results
if __name__ == "__main__":
import doctest
doctest.testmod()
| 75
| 1
|
"""simple docstring"""
from typing import Any
class a__ :
def __init__( self : List[str] , UpperCamelCase_ : Any):
"""simple docstring"""
__UpperCAmelCase : str = data
__UpperCAmelCase : Optional[Any] = None
class a__ :
def __init__( self : Any):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = None
def a_ ( self : Union[str, Any]):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = self.head
while temp is not None:
print(temp.data , end=" ")
__UpperCAmelCase : Tuple = temp.next
print()
def a_ ( self : int , UpperCamelCase_ : Any):
"""simple docstring"""
__UpperCAmelCase : List[str] = Node(UpperCamelCase_)
__UpperCAmelCase : str = self.head
__UpperCAmelCase : Optional[int] = new_node
def a_ ( self : str , UpperCamelCase_ : List[Any] , UpperCamelCase_ : str):
"""simple docstring"""
if node_data_a == node_data_a:
return
else:
__UpperCAmelCase : int = self.head
while node_a is not None and node_a.data != node_data_a:
__UpperCAmelCase : Tuple = node_a.next
__UpperCAmelCase : List[Any] = self.head
while node_a is not None and node_a.data != node_data_a:
__UpperCAmelCase : Optional[Any] = node_a.next
if node_a is None or node_a is None:
return
__UpperCAmelCase , __UpperCAmelCase : Any = node_a.data, node_a.data
if __name__ == "__main__":
A = LinkedList()
for i in range(5, 0, -1):
ll.push(i)
ll.print_list()
ll.swap_nodes(1, 4)
print("""After swapping""")
ll.print_list()
| 77
|
'''simple docstring'''
def UpperCAmelCase ( UpperCAmelCase__ : int = 50):
lowerCamelCase : List[Any] = [1] * (length + 1)
for row_length in range(length + 1):
for tile_length in range(2 , 5):
for tile_start in range(row_length - tile_length + 1):
ways_number[row_length] += ways_number[
row_length - tile_start - tile_length
]
return ways_number[length]
if __name__ == "__main__":
print(f"""{solution() = }""")
| 320
| 0
|
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase ( self : int ):
"""simple docstring"""
_UpperCamelCase: List[str] = '''laion/clap-htsat-unfused'''
_UpperCamelCase: Optional[Any] = tempfile.mkdtemp()
def lowerCAmelCase ( self : Optional[int] , **_lowercase : Dict ):
"""simple docstring"""
return RobertaTokenizer.from_pretrained(self.checkpoint , **_lowercase )
def lowerCAmelCase ( self : Tuple , **_lowercase : List[Any] ):
"""simple docstring"""
return ClapFeatureExtractor.from_pretrained(self.checkpoint , **_lowercase )
def lowerCAmelCase ( self : Dict ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def lowerCAmelCase ( self : List[Any] ):
"""simple docstring"""
_UpperCamelCase: Optional[int] = self.get_tokenizer()
_UpperCamelCase: Any = self.get_feature_extractor()
_UpperCamelCase: Dict = ClapProcessor(tokenizer=_lowercase , feature_extractor=_lowercase )
processor.save_pretrained(self.tmpdirname )
_UpperCamelCase: List[str] = ClapProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , _lowercase )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , _lowercase )
def lowerCAmelCase ( self : str ):
"""simple docstring"""
_UpperCamelCase: Optional[int] = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() )
processor.save_pretrained(self.tmpdirname )
_UpperCamelCase: Any = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
_UpperCamelCase: Any = self.get_feature_extractor(do_normalize=_lowercase , padding_value=1.0 )
_UpperCamelCase: Dict = ClapProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_lowercase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _lowercase )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.feature_extractor , _lowercase )
def lowerCAmelCase ( self : str ):
"""simple docstring"""
_UpperCamelCase: Dict = self.get_feature_extractor()
_UpperCamelCase: int = self.get_tokenizer()
_UpperCamelCase: str = ClapProcessor(tokenizer=_lowercase , feature_extractor=_lowercase )
_UpperCamelCase: Any = floats_list((3, 1_000) )
_UpperCamelCase: Any = feature_extractor(_lowercase , return_tensors='''np''' )
_UpperCamelCase: Tuple = processor(audios=_lowercase , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def lowerCAmelCase ( self : Dict ):
"""simple docstring"""
_UpperCamelCase: Optional[Any] = self.get_feature_extractor()
_UpperCamelCase: Optional[int] = self.get_tokenizer()
_UpperCamelCase: Optional[int] = ClapProcessor(tokenizer=_lowercase , feature_extractor=_lowercase )
_UpperCamelCase: List[Any] = '''This is a test string'''
_UpperCamelCase: int = processor(text=_lowercase )
_UpperCamelCase: Optional[int] = tokenizer(_lowercase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCAmelCase ( self : int ):
"""simple docstring"""
_UpperCamelCase: int = self.get_feature_extractor()
_UpperCamelCase: Tuple = self.get_tokenizer()
_UpperCamelCase: Any = ClapProcessor(tokenizer=_lowercase , feature_extractor=_lowercase )
_UpperCamelCase: Optional[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_UpperCamelCase: int = processor.batch_decode(_lowercase )
_UpperCamelCase: List[Any] = tokenizer.batch_decode(_lowercase )
self.assertListEqual(_lowercase , _lowercase )
def lowerCAmelCase ( self : Dict ):
"""simple docstring"""
_UpperCamelCase: List[Any] = self.get_feature_extractor()
_UpperCamelCase: List[str] = self.get_tokenizer()
_UpperCamelCase: List[Any] = ClapProcessor(tokenizer=_lowercase , feature_extractor=_lowercase )
self.assertListEqual(
processor.model_input_names[2:] , feature_extractor.model_input_names , msg='''`processor` and `feature_extractor` model input names do not match''' , )
| 264
|
import numpy as np
from numpy import ndarray
from scipy.optimize import Bounds, LinearConstraint, minimize
def lowerCAmelCase_ ( lowercase: ndarray ) -> float:
'''simple docstring'''
return np.dot(lowercase , lowercase )
class __magic_name__ :
"""simple docstring"""
def __init__( self : int , *,
_lowercase : float = np.inf , _lowercase : str = "linear" , _lowercase : float = 0.0 , ):
"""simple docstring"""
_UpperCamelCase: int = regularization
_UpperCamelCase: Optional[int] = gamma
if kernel == "linear":
_UpperCamelCase: Optional[Any] = self.__linear
elif kernel == "rbf":
if self.gamma == 0:
raise ValueError('''rbf kernel requires gamma''' )
if not isinstance(self.gamma , (float, int) ):
raise ValueError('''gamma must be float or int''' )
if not self.gamma > 0:
raise ValueError('''gamma must be > 0''' )
_UpperCamelCase: Optional[Any] = self.__rbf
# in the future, there could be a default value like in sklearn
# sklear: def_gamma = 1/(n_features * X.var()) (wiki)
# previously it was 1/(n_features)
else:
_UpperCamelCase: Optional[int] = f"""Unknown kernel: {kernel}"""
raise ValueError(_lowercase )
def lowerCAmelCase ( self : Union[str, Any] , _lowercase : ndarray , _lowercase : ndarray ):
"""simple docstring"""
return np.dot(_lowercase , _lowercase )
def lowerCAmelCase ( self : str , _lowercase : ndarray , _lowercase : ndarray ):
"""simple docstring"""
return np.exp(-(self.gamma * norm_squared(vectora - vectora )) )
def lowerCAmelCase ( self : str , _lowercase : list[ndarray] , _lowercase : ndarray ):
"""simple docstring"""
_UpperCamelCase: List[str] = observations
_UpperCamelCase: Optional[int] = classes
# using Wolfe's Dual to calculate w.
# Primal problem: minimize 1/2*norm_squared(w)
# constraint: yn(w . xn + b) >= 1
#
# With l a vector
# Dual problem: maximize sum_n(ln) -
# 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm))
# constraint: self.C >= ln >= 0
# and sum_n(ln*yn) = 0
# Then we get w using w = sum_n(ln*yn*xn)
# At the end we can get b ~= mean(yn - w . xn)
#
# Since we use kernels, we only need l_star to calculate b
# and to classify observations
((_UpperCamelCase) , ): Any = np.shape(_lowercase )
def to_minimize(_lowercase : ndarray ) -> float:
_UpperCamelCase: Optional[int] = 0
((_UpperCamelCase) , ): str = np.shape(_lowercase )
for i in range(_lowercase ):
for j in range(_lowercase ):
s += (
candidate[i]
* candidate[j]
* classes[i]
* classes[j]
* self.kernel(observations[i] , observations[j] )
)
return 1 / 2 * s - sum(_lowercase )
_UpperCamelCase: Optional[Any] = LinearConstraint(_lowercase , 0 , 0 )
_UpperCamelCase: Optional[int] = Bounds(0 , self.regularization )
_UpperCamelCase: Dict = minimize(
_lowercase , np.ones(_lowercase ) , bounds=_lowercase , constraints=[ly_contraint] ).x
_UpperCamelCase: Union[str, Any] = l_star
# calculating mean offset of separation plane to points
_UpperCamelCase: List[str] = 0
for i in range(_lowercase ):
for j in range(_lowercase ):
s += classes[i] - classes[i] * self.optimum[i] * self.kernel(
observations[i] , observations[j] )
_UpperCamelCase: str = s / n
def lowerCAmelCase ( self : Optional[Any] , _lowercase : ndarray ):
"""simple docstring"""
_UpperCamelCase: Optional[int] = sum(
self.optimum[n]
* self.classes[n]
* self.kernel(self.observations[n] , _lowercase )
for n in range(len(self.classes ) ) )
return 1 if s + self.offset >= 0 else -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 264
| 1
|
'''simple docstring'''
import os
import unittest
from huggingface_hub.utils import are_progress_bars_disabled
import transformers.models.bart.tokenization_bart
from transformers import logging
from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context
from transformers.utils.logging import disable_progress_bar, enable_progress_bar
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase__ ( self ):
'''simple docstring'''
__A : Tuple = logging.get_logger()
# the current default level is logging.WARNING
__A : int = logging.get_verbosity()
logging.set_verbosity_error()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_warning()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_info()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_debug()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
# restore to the original level
logging.set_verbosity(lowerCamelCase )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
__A : int = logging.get_verbosity()
__A : Any = logging.get_logger("transformers.models.bart.tokenization_bart" )
__A : Dict = "Testing 1, 2, 3"
# should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`)
if level_origin <= logging.WARNING:
with CaptureLogger(lowerCamelCase ) as cl:
logger.warning(lowerCamelCase )
self.assertEqual(cl.out , msg + "\n" )
# this is setting the level for all of `transformers.*` loggers
logging.set_verbosity_error()
# should not be able to log warnings
with CaptureLogger(lowerCamelCase ) as cl:
logger.warning(lowerCamelCase )
self.assertEqual(cl.out , "" )
# should be able to log warnings again
logging.set_verbosity_warning()
with CaptureLogger(lowerCamelCase ) as cl:
logger.warning(lowerCamelCase )
self.assertEqual(cl.out , msg + "\n" )
# restore to the original level
logging.set_verbosity(lowerCamelCase )
@mockenv(TRANSFORMERS_VERBOSITY="error" )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
transformers.utils.logging._reset_library_root_logger()
# this action activates the env var
__A : List[str] = logging.get_logger("transformers.models.bart.tokenization_bart" )
__A : Tuple = os.getenv("TRANSFORMERS_VERBOSITY" , lowerCamelCase )
__A : int = logging.log_levels[env_level_str]
__A : Union[str, Any] = logging.get_verbosity()
self.assertEqual(
lowerCamelCase , lowerCamelCase , f"TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}" , )
# restore to the original level
__A : Tuple = ""
transformers.utils.logging._reset_library_root_logger()
@mockenv(TRANSFORMERS_VERBOSITY="super-error" )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
transformers.utils.logging._reset_library_root_logger()
__A : Dict = logging.logging.getLogger()
with CaptureLogger(lowerCamelCase ) as cl:
# this action activates the env var
logging.get_logger("transformers.models.bart.tokenization_bart" )
self.assertIn("Unknown option TRANSFORMERS_VERBOSITY=super-error" , cl.out )
# no need to restore as nothing was changed
def lowerCAmelCase__ ( self ):
'''simple docstring'''
transformers.utils.logging._reset_library_root_logger()
__A : List[Any] = logging.get_logger("transformers.models.bart.tokenization_bart" )
__A : Tuple = "Testing 1, 2, 3"
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS="1" ):
# nothing should be logged as env var disables this method
with CaptureLogger(lowerCamelCase ) as cl:
logger.warning_advice(lowerCamelCase )
self.assertEqual(cl.out , "" )
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS="" ):
# should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset
with CaptureLogger(lowerCamelCase ) as cl:
logger.warning_advice(lowerCamelCase )
self.assertEqual(cl.out , msg + "\n" )
def _lowercase ():
'''simple docstring'''
disable_progress_bar()
assert are_progress_bars_disabled()
enable_progress_bar()
assert not are_progress_bars_disabled()
| 111
|
'''simple docstring'''
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __magic_name__ :
"""simple docstring"""
def __init__( self , lowerCamelCase , lowerCamelCase=13 , lowerCamelCase=30 , lowerCamelCase=2 , lowerCamelCase=3 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=32 , lowerCamelCase=5 , lowerCamelCase=4 , lowerCamelCase=37 , lowerCamelCase="gelu" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=10 , lowerCamelCase=0.02 , lowerCamelCase=3 , lowerCamelCase=0.6 , lowerCamelCase=None , ):
'''simple docstring'''
__A : Tuple = parent
__A : Union[str, Any] = batch_size
__A : List[Any] = image_size
__A : Union[str, Any] = patch_size
__A : List[Any] = num_channels
__A : Optional[Any] = is_training
__A : str = use_labels
__A : Tuple = hidden_size
__A : int = num_hidden_layers
__A : Dict = num_attention_heads
__A : List[Any] = intermediate_size
__A : Tuple = hidden_act
__A : Tuple = hidden_dropout_prob
__A : str = attention_probs_dropout_prob
__A : Optional[Any] = type_sequence_label_size
__A : Union[str, Any] = initializer_range
__A : Optional[Any] = mask_ratio
__A : List[Any] = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
__A : Optional[int] = (image_size // patch_size) ** 2
__A : List[Any] = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
__A : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__A : Tuple = None
if self.use_labels:
__A : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__A : Optional[int] = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def lowerCAmelCase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__A : Optional[Any] = ViTMAEModel(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__A : List[Any] = model(lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__A : str = ViTMAEForPreTraining(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__A : Optional[Any] = model(lowerCamelCase )
__A : List[str] = (self.image_size // self.patch_size) ** 2
__A : List[Any] = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
__A : List[str] = 1
__A : str = ViTMAEForPreTraining(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__A : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__A : int = model(lowerCamelCase )
__A : Union[str, Any] = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
__A : List[str] = self.prepare_config_and_inputs()
__A ,__A ,__A : List[Any] = config_and_inputs
__A : Tuple = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __magic_name__ ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase__ = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
lowerCamelCase__ = {'feature-extraction': ViTMAEModel} if is_torch_available() else {}
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
def lowerCAmelCase__ ( self ):
'''simple docstring'''
__A : Union[str, Any] = ViTMAEModelTester(self )
__A : Tuple = ConfigTester(self , config_class=lowerCamelCase , has_text_modality=lowerCamelCase , hidden_size=37 )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="ViTMAE does not use inputs_embeds" )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
pass
def lowerCAmelCase__ ( self ):
'''simple docstring'''
__A ,__A : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A : List[Any] = model_class(lowerCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__A : str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase , nn.Linear ) )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
__A ,__A : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A : int = model_class(lowerCamelCase )
__A : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__A : Optional[Any] = [*signature.parameters.keys()]
__A : Any = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCamelCase )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
__A : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
__A : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowerCamelCase )
def lowerCAmelCase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
np.random.seed(2 )
__A : Tuple = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
__A : Optional[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
__A : Optional[Any] = torch.from_numpy(lowerCamelCase )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
__A : int = pt_noise
super().check_pt_tf_models(lowerCamelCase , lowerCamelCase , lowerCamelCase )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
__A ,__A : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A : int = model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
__A : List[str] = model(**self._prepare_for_class(lowerCamelCase , lowerCamelCase ) )
__A : Tuple = outputs[0].cpu().numpy()
__A : List[Any] = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCamelCase )
__A : List[Any] = model_class.from_pretrained(lowerCamelCase )
model.to(lowerCamelCase )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
__A : Optional[Any] = model(**self._prepare_for_class(lowerCamelCase , lowerCamelCase ) )
# Make sure we don't have nans
__A : List[Any] = after_outputs[0].cpu().numpy()
__A : List[str] = 0
__A : int = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowerCamelCase , 1E-5 )
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
pass
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
pass
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
pass
@unittest.skip(reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load" )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
pass
@slow
def lowerCAmelCase__ ( self ):
'''simple docstring'''
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__A : List[Any] = ViTMAEModel.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
def _lowercase ():
'''simple docstring'''
__A : Union[str, Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained("facebook/vit-mae-base" ) if is_vision_available() else None
@slow
def lowerCAmelCase__ ( self ):
'''simple docstring'''
np.random.seed(2 )
__A : Dict = ViTMAEForPreTraining.from_pretrained("facebook/vit-mae-base" ).to(lowerCamelCase )
__A : str = self.default_image_processor
__A : List[Any] = prepare_img()
__A : Union[str, Any] = image_processor(images=lowerCamelCase , return_tensors="pt" ).to(lowerCamelCase )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
__A : Optional[Any] = ViTMAEConfig()
__A : List[str] = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
__A : Any = np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
__A : str = model(**lowerCamelCase , noise=torch.from_numpy(lowerCamelCase ).to(device=lowerCamelCase ) )
# verify the logits
__A : Union[str, Any] = torch.Size((1, 196, 768) )
self.assertEqual(outputs.logits.shape , lowerCamelCase )
__A : int = torch.tensor(
[[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(lowerCamelCase ) , atol=1E-4 ) )
| 111
| 1
|
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SegformerConfig,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_lowercase = logging.get_logger(__name__)
def _snake_case ( snake_case__ : List[Any] , snake_case__ : Any=False ):
A = OrderedDict()
for key, value in state_dict.items():
if encoder_only and not key.startswith('head' ):
A = 'segformer.encoder.' + key
if key.startswith('backbone' ):
A = key.replace('backbone' , 'segformer.encoder' )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
A = key[key.find('patch_embed' ) + len('patch_embed' )]
A = key.replace(F'patch_embed{idx}' , F'patch_embeddings.{int(lowercase__ )-1}' )
if "norm" in key:
A = key.replace('norm' , 'layer_norm' )
if "segformer.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
A = key[key.find('segformer.encoder.layer_norm' ) + len('segformer.encoder.layer_norm' )]
A = key.replace(F'layer_norm{idx}' , F'layer_norm.{int(lowercase__ )-1}' )
if "layer_norm1" in key:
A = key.replace('layer_norm1' , 'layer_norm_1' )
if "layer_norm2" in key:
A = key.replace('layer_norm2' , 'layer_norm_2' )
if "block" in key:
# replace for example block1 by block.0
A = key[key.find('block' ) + len('block' )]
A = key.replace(F'block{idx}' , F'block.{int(lowercase__ )-1}' )
if "attn.q" in key:
A = key.replace('attn.q' , 'attention.self.query' )
if "attn.proj" in key:
A = key.replace('attn.proj' , 'attention.output.dense' )
if "attn" in key:
A = key.replace('attn' , 'attention.self' )
if "fc1" in key:
A = key.replace('fc1' , 'dense1' )
if "fc2" in key:
A = key.replace('fc2' , 'dense2' )
if "linear_pred" in key:
A = key.replace('linear_pred' , 'classifier' )
if "linear_fuse" in key:
A = key.replace('linear_fuse.conv' , 'linear_fuse' )
A = key.replace('linear_fuse.bn' , 'batch_norm' )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
A = key[key.find('linear_c' ) + len('linear_c' )]
A = key.replace(F'linear_c{idx}' , F'linear_c.{int(lowercase__ )-1}' )
if key.startswith('head' ):
A = key.replace('head' , 'classifier' )
A = value
return new_state_dict
def _snake_case ( snake_case__ : Dict , snake_case__ : List[str] ):
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
A = state_dict.pop(F'segformer.encoder.block.{i}.{j}.attention.self.kv.weight' )
A = state_dict.pop(F'segformer.encoder.block.{i}.{j}.attention.self.kv.bias' )
# next, add keys and values (in that order) to the state dict
A = kv_weight[
: config.hidden_sizes[i], :
]
A = kv_bias[: config.hidden_sizes[i]]
A = kv_weight[
config.hidden_sizes[i] :, :
]
A = kv_bias[
config.hidden_sizes[i] :
]
def _snake_case ( ):
A = 'http://images.cocodataset.org/val2017/000000039769.jpg'
A = Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw )
return image
@torch.no_grad()
def _snake_case ( snake_case__ : List[str] , snake_case__ : int , snake_case__ : Optional[int] ):
A = SegformerConfig()
A = False
# set attributes based on model_name
A = 'huggingface/label-files'
if "segformer" in model_name:
A = model_name[len('segformer.' ) : len('segformer.' ) + 2]
if "ade" in model_name:
A = 150
A = 'ade20k-id2label.json'
A = (1, 150, 128, 128)
elif "city" in model_name:
A = 19
A = 'cityscapes-id2label.json'
A = (1, 19, 128, 128)
else:
raise ValueError(F'Model {model_name} not supported' )
elif "mit" in model_name:
A = True
A = model_name[4:6]
A = 1000
A = 'imagenet-1k-id2label.json'
A = (1, 1000)
else:
raise ValueError(F'Model {model_name} not supported' )
# set config attributes
A = json.load(open(hf_hub_download(lowercase__ , lowercase__ , repo_type='dataset' ) , 'r' ) )
A = {int(lowercase__ ): v for k, v in idalabel.items()}
A = idalabel
A = {v: k for k, v in idalabel.items()}
if size == "b0":
pass
elif size == "b1":
A = [64, 128, 320, 512]
A = 256
elif size == "b2":
A = [64, 128, 320, 512]
A = 768
A = [3, 4, 6, 3]
elif size == "b3":
A = [64, 128, 320, 512]
A = 768
A = [3, 4, 18, 3]
elif size == "b4":
A = [64, 128, 320, 512]
A = 768
A = [3, 8, 27, 3]
elif size == "b5":
A = [64, 128, 320, 512]
A = 768
A = [3, 6, 40, 3]
else:
raise ValueError(F'Size {size} not supported' )
# load image processor (only resize + normalize)
A = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=lowercase__ , align=lowercase__ , do_random_crop=lowercase__ )
# prepare image
A = prepare_img()
A = image_processor(images=lowercase__ , return_tensors='pt' ).pixel_values
logger.info(F'Converting model {model_name}...' )
# load original state dict
if encoder_only:
A = torch.load(lowercase__ , map_location=torch.device('cpu' ) )
else:
A = torch.load(lowercase__ , map_location=torch.device('cpu' ) )['state_dict']
# rename keys
A = rename_keys(lowercase__ , encoder_only=lowercase__ )
if not encoder_only:
del state_dict["decode_head.conv_seg.weight"]
del state_dict["decode_head.conv_seg.bias"]
# key and value matrices need special treatment
read_in_k_v(lowercase__ , lowercase__ )
# create HuggingFace model and load state dict
if encoder_only:
A = False
A = SegformerForImageClassification(lowercase__ )
else:
A = SegformerForSemanticSegmentation(lowercase__ )
model.load_state_dict(lowercase__ )
model.eval()
# forward pass
A = model(lowercase__ )
A = outputs.logits
# set expected_slice based on model name
# ADE20k checkpoints
if model_name == "segformer.b0.512x512.ade.160k":
A = torch.tensor(
[
[[-4.6310, -5.5232, -6.2356], [-5.1921, -6.1444, -6.5996], [-5.4424, -6.2790, -6.7574]],
[[-12.1391, -13.3122, -13.9554], [-12.8732, -13.9352, -14.3563], [-12.9438, -13.8226, -14.2513]],
[[-12.5134, -13.4686, -14.4915], [-12.8669, -14.4343, -14.7758], [-13.2523, -14.5819, -15.0694]],
] )
elif model_name == "segformer.b1.512x512.ade.160k":
A = torch.tensor(
[
[[-7.5820, -8.7231, -8.3215], [-8.0600, -10.3529, -10.0304], [-7.5208, -9.4103, -9.6239]],
[[-12.6918, -13.8994, -13.7137], [-13.3196, -15.7523, -15.4789], [-12.9343, -14.8757, -14.9689]],
[[-11.1911, -11.9421, -11.3243], [-11.3342, -13.6839, -13.3581], [-10.3909, -12.1832, -12.4858]],
] )
elif model_name == "segformer.b2.512x512.ade.160k":
A = torch.tensor(
[
[[-11.8173, -14.3850, -16.3128], [-14.5648, -16.5804, -18.6568], [-14.7223, -15.7387, -18.4218]],
[[-15.7290, -17.9171, -19.4423], [-18.3105, -19.9448, -21.4661], [-17.9296, -18.6497, -20.7910]],
[[-15.0783, -17.0336, -18.2789], [-16.8771, -18.6870, -20.1612], [-16.2454, -17.1426, -19.5055]],
] )
elif model_name == "segformer.b3.512x512.ade.160k":
A = torch.tensor(
[
[[-9.0878, -10.2081, -10.1891], [-9.3144, -10.7941, -10.9843], [-9.2294, -10.3855, -10.5704]],
[[-12.2316, -13.9068, -13.6102], [-12.9161, -14.3702, -14.3235], [-12.5233, -13.7174, -13.7932]],
[[-14.6275, -15.2490, -14.9727], [-14.3400, -15.9687, -16.2827], [-14.1484, -15.4033, -15.8937]],
] )
elif model_name == "segformer.b4.512x512.ade.160k":
A = torch.tensor(
[
[[-12.3144, -13.2447, -14.0802], [-13.3614, -14.5816, -15.6117], [-13.3340, -14.4433, -16.2219]],
[[-19.2781, -20.4128, -20.7506], [-20.6153, -21.6566, -22.0998], [-19.9800, -21.0430, -22.1494]],
[[-18.8739, -19.7804, -21.1834], [-20.1233, -21.6765, -23.2944], [-20.0315, -21.2641, -23.6944]],
] )
elif model_name == "segformer.b5.640x640.ade.160k":
A = torch.tensor(
[
[[-9.5524, -12.0835, -11.7348], [-10.5229, -13.6446, -14.5662], [-9.5842, -12.8851, -13.9414]],
[[-15.3432, -17.5323, -17.0818], [-16.3330, -18.9255, -19.2101], [-15.1340, -17.7848, -18.3971]],
[[-12.6072, -14.9486, -14.6631], [-13.7629, -17.0907, -17.7745], [-12.7899, -16.1695, -17.1671]],
] )
# Cityscapes checkpoints
elif model_name == "segformer.b0.1024x1024.city.160k":
A = torch.tensor(
[
[[-11.9295, -13.4057, -14.8106], [-13.3431, -14.8179, -15.3781], [-14.2836, -15.5942, -16.1588]],
[[-11.4906, -12.8067, -13.6564], [-13.1189, -14.0500, -14.1543], [-13.8748, -14.5136, -14.8789]],
[[0.5374, 0.1067, -0.4742], [0.1141, -0.2255, -0.7099], [-0.3000, -0.5924, -1.3105]],
] )
elif model_name == "segformer.b0.512x1024.city.160k":
A = torch.tensor(
[
[[-7.8217, -9.8767, -10.1717], [-9.4438, -10.9058, -11.4047], [-9.7939, -12.3495, -12.1079]],
[[-7.1514, -9.5336, -10.0860], [-9.7776, -11.6822, -11.8439], [-10.1411, -12.7655, -12.8972]],
[[0.3021, 0.0805, -0.2310], [-0.0328, -0.1605, -0.2714], [-0.1408, -0.5477, -0.6976]],
] )
elif model_name == "segformer.b0.640x1280.city.160k":
A = torch.tensor(
[
[
[-1.13_72e01, -1.27_87e01, -1.34_77e01],
[-1.25_36e01, -1.41_94e01, -1.44_09e01],
[-1.32_17e01, -1.48_88e01, -1.53_27e01],
],
[
[-1.47_91e01, -1.71_22e01, -1.82_77e01],
[-1.71_63e01, -1.91_92e01, -1.95_33e01],
[-1.78_97e01, -1.99_91e01, -2.03_15e01],
],
[
[7.67_23e-01, 4.19_21e-01, -7.78_78e-02],
[4.77_72e-01, 9.55_57e-03, -2.80_82e-01],
[3.60_32e-01, -2.48_26e-01, -5.11_68e-01],
],
] )
elif model_name == "segformer.b0.768x768.city.160k":
A = torch.tensor(
[
[[-9.4959, -11.3087, -11.7479], [-11.0025, -12.6540, -12.3319], [-11.4064, -13.0487, -12.9905]],
[[-9.8905, -11.3084, -12.0854], [-11.1726, -12.7698, -12.9583], [-11.5985, -13.3278, -14.1774]],
[[0.2213, 0.0192, -0.2466], [-0.1731, -0.4213, -0.4874], [-0.3126, -0.6541, -1.1389]],
] )
elif model_name == "segformer.b1.1024x1024.city.160k":
A = torch.tensor(
[
[[-13.5748, -13.9111, -12.6500], [-14.3500, -15.3683, -14.2328], [-14.7532, -16.0424, -15.6087]],
[[-17.1651, -15.8725, -12.9653], [-17.2580, -17.3718, -14.8223], [-16.6058, -16.8783, -16.7452]],
[[-3.6456, -3.0209, -1.4203], [-3.0797, -3.1959, -2.0000], [-1.8757, -1.9217, -1.6997]],
] )
elif model_name == "segformer.b2.1024x1024.city.160k":
A = torch.tensor(
[
[[-16.0976, -16.4856, -17.3962], [-16.6234, -19.0342, -19.7685], [-16.0900, -18.0661, -19.1180]],
[[-18.4750, -18.8488, -19.5074], [-19.4030, -22.1570, -22.5977], [-19.1191, -20.8486, -22.3783]],
[[-4.5178, -5.5037, -6.5109], [-5.0884, -7.2174, -8.0334], [-4.4156, -5.8117, -7.2970]],
] )
elif model_name == "segformer.b3.1024x1024.city.160k":
A = torch.tensor(
[
[[-14.2081, -14.4732, -14.1977], [-14.5867, -16.4423, -16.6356], [-13.4441, -14.9685, -16.8696]],
[[-14.4576, -14.7073, -15.0451], [-15.0816, -17.6237, -17.9873], [-14.4213, -16.0199, -18.5992]],
[[-4.7349, -4.9588, -5.0966], [-4.3210, -6.9325, -7.2591], [-3.4312, -4.7484, -7.1917]],
] )
elif model_name == "segformer.b4.1024x1024.city.160k":
A = torch.tensor(
[
[[-11.7737, -11.9526, -11.3273], [-13.6692, -14.4574, -13.8878], [-13.8937, -14.6924, -15.9345]],
[[-14.6706, -14.5330, -14.1306], [-16.1502, -16.8180, -16.4269], [-16.8338, -17.8939, -20.1746]],
[[1.0491, 0.8289, 1.0310], [1.1044, 0.5219, 0.8055], [1.0899, 0.6926, 0.5590]],
] )
elif model_name == "segformer.b5.1024x1024.city.160k":
A = torch.tensor(
[
[[-12.5641, -13.4777, -13.0684], [-13.9587, -15.8983, -16.6557], [-13.3109, -15.7350, -16.3141]],
[[-14.7074, -15.4352, -14.5944], [-16.6353, -18.1663, -18.6120], [-15.1702, -18.0329, -18.1547]],
[[-1.7990, -2.0951, -1.7784], [-2.6397, -3.8245, -3.9686], [-1.5264, -2.8126, -2.9316]],
] )
else:
A = logits.argmax(-1 ).item()
print('Predicted class:' , model.config.idalabel[predicted_class_idx] )
# verify logits
if not encoder_only:
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3, :3, :3] , lowercase__ , atol=1e-2 )
# finally, save model and image processor
logger.info(F'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' )
Path(lowercase__ ).mkdir(exist_ok=lowercase__ )
model.save_pretrained(lowercase__ )
image_processor.save_pretrained(lowercase__ )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
default='''segformer.b0.512x512.ade.160k''',
type=str,
help='''Name of the model you\'d like to convert.''',
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, help='''Path to the original PyTorch checkpoint (.pth file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
_lowercase = parser.parse_args()
convert_segformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 718
|
"""simple docstring"""
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : int ,A_ : Tuple ,A_ : str=7 ,A_ : Tuple=3 ,A_ : List[Any]=18 ,A_ : List[str]=30 ,A_ : Optional[Any]=400 ,A_ : Any=True ,A_ : Optional[Any]=None ,A_ : List[str]=True ,) -> str:
A = size if size is not None else {'height': 18, 'width': 18}
A = parent
A = batch_size
A = num_channels
A = image_size
A = min_resolution
A = max_resolution
A = do_resize
A = size
A = do_normalize
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[Any]:
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.88_66_44_36_34_03_32_03, 0.66_18_82_93_69_54_49_83, 0.38_91_74_64_01_78_68_04],
[-0.60_42_55_91_46_88_11_04, -0.0_22_95_00_88_60_52_84_69, 0.54_23_79_73_69_00_32_96],
] ),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class lowerCAmelCase_ ( _lowercase , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase: List[Any] = ImageGPTImageProcessor if is_vision_available() else None
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any:
A = ImageGPTImageProcessingTester(self )
@property
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[Any]:
A = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A_ ,'clusters' ) )
self.assertTrue(hasattr(A_ ,'do_resize' ) )
self.assertTrue(hasattr(A_ ,'size' ) )
self.assertTrue(hasattr(A_ ,'do_normalize' ) )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Tuple:
A = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{'height': 18, 'width': 18} )
A = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 )
self.assertEqual(image_processor.size ,{'height': 42, 'width': 42} )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> str:
A = self.image_processing_class(**self.image_processor_dict )
A = json.loads(image_processor.to_json_string() )
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(A_ ,obj[key] ) )
else:
self.assertEqual(obj[key] ,A_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Union[str, Any]:
A = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
A = os.path.join(A_ ,'image_processor.json' )
image_processor_first.to_json_file(A_ )
A = self.image_processing_class.from_json_file(A_ ).to_dict()
A = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(A_ ,image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] ,A_ )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> int:
A = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(A_ )
A = self.image_processing_class.from_pretrained(A_ ).to_dict()
A = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(A_ ,image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] ,A_ )
@unittest.skip('ImageGPT requires clusters at initialization' )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Union[str, Any]:
pass
def _snake_case ( ):
A = load_dataset('hf-internal-testing/fixtures_image_utils' , split='test' )
A = Image.open(dataset[4]['file'] )
A = Image.open(dataset[5]['file'] )
A = [imagea, imagea]
return images
@require_vision
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _SCREAMING_SNAKE_CASE ( self : str ) -> int:
A = ImageGPTImageProcessor.from_pretrained('openai/imagegpt-small' )
A = prepare_images()
# test non-batched
A = image_processing(images[0] ,return_tensors='pt' )
self.assertIsInstance(encoding.input_ids ,torch.LongTensor )
self.assertEqual(encoding.input_ids.shape ,(1, 1024) )
A = [306, 191, 191]
self.assertEqual(encoding.input_ids[0, :3].tolist() ,A_ )
# test batched
A = image_processing(A_ ,return_tensors='pt' )
self.assertIsInstance(encoding.input_ids ,torch.LongTensor )
self.assertEqual(encoding.input_ids.shape ,(2, 1024) )
A = [303, 13, 13]
self.assertEqual(encoding.input_ids[1, -3:].tolist() ,A_ )
| 22
| 0
|
import json
import os
import tempfile
import datasets
from utils import generate_example_dataset, get_duration
_lowercase : List[str] =5_0000
_lowercase : str =5000
_lowercase , _lowercase : List[str] =os.path.split(__file__)
_lowercase : Union[str, Any] =os.path.join(RESULTS_BASEPATH, """results""", RESULTS_FILENAME.replace(""".py""", """.json"""))
@get_duration
def _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ ,lowerCAmelCase__ ):
for i in range(lowerCAmelCase__ ):
lowerCamelCase_ : Tuple = dataset[i]
@get_duration
def _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ):
for i in range(0 ,len(lowerCAmelCase__ ) ,lowerCAmelCase__ ):
lowerCamelCase_ : Tuple = dataset[i : i + batch_size]
@get_duration
def _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ):
with dataset.formatted_as(type=lowerCAmelCase__ ):
for i in range(lowerCAmelCase__ ):
lowerCamelCase_ : Tuple = dataset[i]
@get_duration
def _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ):
with dataset.formatted_as(type=lowerCAmelCase__ ):
for i in range(0 ,lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCamelCase_ : Optional[int] = dataset[i : i + batch_size]
def _SCREAMING_SNAKE_CASE ( ):
lowerCamelCase_ : Any = {'num examples': SPEED_TEST_N_EXAMPLES}
lowerCamelCase_ : Optional[Any] = [
(read, {'length': SMALL_TEST}),
(read, {'length': SPEED_TEST_N_EXAMPLES}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 10}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 1_00}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 10_00}),
(read_formatted, {'type': 'numpy', 'length': SMALL_TEST}),
(read_formatted, {'type': 'pandas', 'length': SMALL_TEST}),
(read_formatted, {'type': 'torch', 'length': SMALL_TEST}),
(read_formatted, {'type': 'tensorflow', 'length': SMALL_TEST}),
(read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 10}),
(read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 10_00}),
]
lowerCamelCase_ : Optional[Any] = [
(read, {'length': SMALL_TEST}),
(read, {'length': SPEED_TEST_N_EXAMPLES}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 10}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 1_00}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 10_00}),
(read_formatted, {'type': 'numpy', 'length': SMALL_TEST}),
(read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 10}),
(read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 10_00}),
]
with tempfile.TemporaryDirectory() as tmp_dir:
print('generating dataset' )
lowerCamelCase_ : List[Any] = datasets.Features(
{'list': datasets.Sequence(datasets.Value('float32' ) ), 'numbers': datasets.Value('float32' )} )
lowerCamelCase_ : Union[str, Any] = generate_example_dataset(
os.path.join(lowerCAmelCase__ ,'dataset.arrow' ) ,lowerCAmelCase__ ,num_examples=lowerCAmelCase__ ,seq_shapes={'list': (1_00,)} ,)
print('first set of iterations' )
for func, kwargs in functions:
print(func.__name__ ,str(lowerCAmelCase__ ) )
lowerCamelCase_ : Optional[int] = func(lowerCAmelCase__ ,**lowerCAmelCase__ )
print('shuffling dataset' )
lowerCamelCase_ : int = dataset.shuffle()
print('Second set of iterations (after shuffling' )
for func, kwargs in functions_shuffled:
print('shuffled ' ,func.__name__ ,str(lowerCAmelCase__ ) )
lowerCamelCase_ : Optional[Any] = func(
lowerCAmelCase__ ,**lowerCAmelCase__ )
with open(lowerCAmelCase__ ,'wb' ) as f:
f.write(json.dumps(lowerCAmelCase__ ).encode('utf-8' ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_iterating()
| 364
|
import logging
import os
import sys
import warnings
from dataclasses import dataclass, field
from random import randint
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import DatasetDict, load_dataset
import transformers
from transformers import (
AutoConfig,
AutoFeatureExtractor,
AutoModelForAudioClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
_lowercase : Union[str, Any] =logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
require_version("""datasets>=1.14.0""", """To fix: pip install -r examples/pytorch/audio-classification/requirements.txt""")
def _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ = 1_60_00 ):
lowerCamelCase_ : List[str] = int(round(sample_rate * max_length ) )
if len(lowerCAmelCase__ ) <= sample_length:
return wav
lowerCamelCase_ : int = randint(0 ,len(lowerCAmelCase__ ) - sample_length - 1 )
return wav[random_offset : random_offset + sample_length]
@dataclass
class UpperCamelCase_ :
_a : Optional[str] = field(default=snake_case__ , metadata={'help': 'Name of a dataset from the datasets package'} )
_a : Optional[str] = field(
default=snake_case__ , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
_a : Optional[str] = field(
default=snake_case__ , metadata={'help': 'A file containing the training audio paths and labels.'} )
_a : Optional[str] = field(
default=snake_case__ , metadata={'help': 'A file containing the validation audio paths and labels.'} )
_a : str = field(
default='train' , metadata={
'help': 'The name of the training data set split to use (via the datasets library). Defaults to \'train\''
} , )
_a : str = field(
default='validation' , metadata={
'help': (
'The name of the training data set split to use (via the datasets library). Defaults to \'validation\''
)
} , )
_a : str = field(
default='audio' , metadata={'help': 'The name of the dataset column containing the audio data. Defaults to \'audio\''} , )
_a : str = field(
default='label' , metadata={'help': 'The name of the dataset column containing the labels. Defaults to \'label\''} )
_a : Optional[int] = field(
default=snake_case__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
_a : Optional[int] = field(
default=snake_case__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
_a : float = field(
default=2_0 , metadata={'help': 'Audio clips will be randomly cut to this length during training if the value is set.'} , )
@dataclass
class UpperCamelCase_ :
_a : str = field(
default='facebook/wav2vec2-base' , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} , )
_a : Optional[str] = field(
default=snake_case__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
_a : Optional[str] = field(
default=snake_case__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from the Hub'} )
_a : str = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
_a : Optional[str] = field(
default=snake_case__ , metadata={'help': 'Name or path of preprocessor config.'} )
_a : bool = field(
default=snake_case__ , metadata={'help': 'Whether to freeze the feature encoder layers of the model.'} )
_a : bool = field(
default=snake_case__ , metadata={'help': 'Whether to generate an attention mask in the feature extractor.'} )
_a : bool = field(
default=snake_case__ , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
_a : Optional[bool] = field(
default=snake_case__ , metadata={'help': 'Whether to freeze the feature extractor layers of the model.'} )
_a : bool = field(
default=snake_case__ , metadata={'help': 'Will enable to load a pretrained model whose head dimensions are different.'} , )
def __a ( self : Optional[int] ):
if not self.freeze_feature_extractor and self.freeze_feature_encoder:
warnings.warn(
'The argument `--freeze_feature_extractor` is deprecated and '
'will be removed in a future version. Use `--freeze_feature_encoder`'
'instead. Setting `freeze_feature_encoder==True`.' , lowerCamelCase , )
if self.freeze_feature_extractor and not self.freeze_feature_encoder:
raise ValueError(
'The argument `--freeze_feature_extractor` is deprecated and '
'should not be used in combination with `--freeze_feature_encoder`.'
'Only make use of `--freeze_feature_encoder`.' )
def _SCREAMING_SNAKE_CASE ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowerCamelCase_ : Optional[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : Tuple = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : Dict = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_audio_classification' ,lowerCAmelCase__ ,lowerCAmelCase__ )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' ,datefmt='%m/%d/%Y %H:%M:%S' ,handlers=[logging.StreamHandler(sys.stdout )] ,)
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowerCamelCase_ : List[str] = training_args.get_process_log_level()
logger.setLevel(lowerCAmelCase__ )
transformers.utils.logging.set_verbosity(lowerCAmelCase__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} "
+ F"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(F"Training/evaluation parameters {training_args}" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Detecting last checkpoint.
lowerCamelCase_ : Union[str, Any] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCamelCase_ : Optional[Any] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. "
'Use --overwrite_output_dir to train from scratch.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Initialize our dataset and prepare it for the audio classification task.
lowerCamelCase_ : Optional[int] = DatasetDict()
lowerCamelCase_ : Dict = load_dataset(
data_args.dataset_name ,data_args.dataset_config_name ,split=data_args.train_split_name ,use_auth_token=True if model_args.use_auth_token else None ,)
lowerCamelCase_ : List[str] = load_dataset(
data_args.dataset_name ,data_args.dataset_config_name ,split=data_args.eval_split_name ,use_auth_token=True if model_args.use_auth_token else None ,)
if data_args.audio_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F"--audio_column_name {data_args.audio_column_name} not found in dataset '{data_args.dataset_name}'. "
'Make sure to set `--audio_column_name` to the correct audio column - one of '
F"{', '.join(raw_datasets['train'].column_names )}." )
if data_args.label_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F"--label_column_name {data_args.label_column_name} not found in dataset '{data_args.dataset_name}'. "
'Make sure to set `--label_column_name` to the correct text column - one of '
F"{', '.join(raw_datasets['train'].column_names )}." )
# Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over
# transformer outputs in the classifier, but it doesn't always lead to better accuracy
lowerCamelCase_ : Dict = AutoFeatureExtractor.from_pretrained(
model_args.feature_extractor_name or model_args.model_name_or_path ,return_attention_mask=model_args.attention_mask ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,)
# `datasets` takes care of automatically loading and resampling the audio,
# so we just need to set the correct target sampling rate.
lowerCamelCase_ : Optional[Any] = raw_datasets.cast_column(
data_args.audio_column_name ,datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) )
lowerCamelCase_ : Optional[int] = feature_extractor.model_input_names[0]
def train_transforms(lowerCAmelCase__ ):
lowerCamelCase_ : Optional[int] = []
for audio in batch[data_args.audio_column_name]:
lowerCamelCase_ : Union[str, Any] = random_subsample(
audio['array'] ,max_length=data_args.max_length_seconds ,sample_rate=feature_extractor.sampling_rate )
subsampled_wavs.append(lowerCAmelCase__ )
lowerCamelCase_ : int = feature_extractor(lowerCAmelCase__ ,sampling_rate=feature_extractor.sampling_rate )
lowerCamelCase_ : Optional[Any] = {model_input_name: inputs.get(lowerCAmelCase__ )}
lowerCamelCase_ : Any = list(batch[data_args.label_column_name] )
return output_batch
def val_transforms(lowerCAmelCase__ ):
lowerCamelCase_ : Dict = [audio['array'] for audio in batch[data_args.audio_column_name]]
lowerCamelCase_ : Optional[Any] = feature_extractor(lowerCAmelCase__ ,sampling_rate=feature_extractor.sampling_rate )
lowerCamelCase_ : Optional[int] = {model_input_name: inputs.get(lowerCAmelCase__ )}
lowerCamelCase_ : Tuple = list(batch[data_args.label_column_name] )
return output_batch
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
lowerCamelCase_ : Optional[int] = raw_datasets['train'].features[data_args.label_column_name].names
lowerCamelCase_ , lowerCamelCase_ : Optional[int] = {}, {}
for i, label in enumerate(lowerCAmelCase__ ):
lowerCamelCase_ : List[Any] = str(lowerCAmelCase__ )
lowerCamelCase_ : Union[str, Any] = label
# Load the accuracy metric from the datasets package
lowerCamelCase_ : Tuple = evaluate.load('accuracy' )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with
# `predictions` and `label_ids` fields) and has to return a dictionary string to float.
def compute_metrics(lowerCAmelCase__ ):
lowerCamelCase_ : Tuple = np.argmax(eval_pred.predictions ,axis=1 )
return metric.compute(predictions=lowerCAmelCase__ ,references=eval_pred.label_ids )
lowerCamelCase_ : Union[str, Any] = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path ,num_labels=len(lowerCAmelCase__ ) ,labelaid=lowerCAmelCase__ ,idalabel=lowerCAmelCase__ ,finetuning_task='audio-classification' ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,)
lowerCamelCase_ : Optional[int] = AutoModelForAudioClassification.from_pretrained(
model_args.model_name_or_path ,from_tf=bool('.ckpt' in model_args.model_name_or_path ) ,config=lowerCAmelCase__ ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,ignore_mismatched_sizes=model_args.ignore_mismatched_sizes ,)
# freeze the convolutional waveform encoder
if model_args.freeze_feature_encoder:
model.freeze_feature_encoder()
if training_args.do_train:
if data_args.max_train_samples is not None:
lowerCamelCase_ : List[Any] = (
raw_datasets['train'].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
raw_datasets["train"].set_transform(lowerCAmelCase__ ,output_all_columns=lowerCAmelCase__ )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
lowerCamelCase_ : List[str] = (
raw_datasets['eval'].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
raw_datasets["eval"].set_transform(lowerCAmelCase__ ,output_all_columns=lowerCAmelCase__ )
# Initialize our trainer
lowerCamelCase_ : str = Trainer(
model=lowerCAmelCase__ ,args=lowerCAmelCase__ ,train_dataset=raw_datasets['train'] if training_args.do_train else None ,eval_dataset=raw_datasets['eval'] if training_args.do_eval else None ,compute_metrics=lowerCAmelCase__ ,tokenizer=lowerCAmelCase__ ,)
# Training
if training_args.do_train:
lowerCamelCase_ : List[Any] = None
if training_args.resume_from_checkpoint is not None:
lowerCamelCase_ : List[Any] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowerCamelCase_ : Dict = last_checkpoint
lowerCamelCase_ : Optional[int] = trainer.train(resume_from_checkpoint=lowerCAmelCase__ )
trainer.save_model()
trainer.log_metrics('train' ,train_result.metrics )
trainer.save_metrics('train' ,train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
lowerCamelCase_ : str = trainer.evaluate()
trainer.log_metrics('eval' ,lowerCAmelCase__ )
trainer.save_metrics('eval' ,lowerCAmelCase__ )
# Write model card and (optionally) push to hub
lowerCamelCase_ : List[Any] = {
'finetuned_from': model_args.model_name_or_path,
'tasks': 'audio-classification',
'dataset': data_args.dataset_name,
'tags': ['audio-classification'],
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowerCAmelCase__ )
else:
trainer.create_model_card(**lowerCAmelCase__ )
if __name__ == "__main__":
main()
| 364
| 1
|
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
UpperCamelCase__ = logging.get_logger(__name__)
if is_vision_available():
import PIL
class lowerCamelCase_ ( lowerCamelCase__ ):
lowerCAmelCase__ = ['pixel_values']
def __init__( self : Dict , _A : str = True , _A : str = None , _A : Optional[int] = PILImageResampling.BICUBIC , _A : List[Any] = True , _A : Tuple = None , _A : str = True , _A : Tuple = 1 / 255 , _A : List[Any] = True , _A : str = None , _A : int = None , _A : Optional[Any] = True , **_A : Dict , ):
'''simple docstring'''
super().__init__(**__lowerCamelCase )
UpperCAmelCase__ : Union[str, Any] = size if size is not None else {'''shortest_edge''': 224}
UpperCAmelCase__ : Union[str, Any] = get_size_dict(__lowerCamelCase , default_to_square=__lowerCamelCase )
UpperCAmelCase__ : List[Any] = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
UpperCAmelCase__ : Optional[int] = get_size_dict(__lowerCamelCase , default_to_square=__lowerCamelCase , param_name='''crop_size''' )
UpperCAmelCase__ : Optional[int] = do_resize
UpperCAmelCase__ : Any = size
UpperCAmelCase__ : int = resample
UpperCAmelCase__ : Dict = do_center_crop
UpperCAmelCase__ : str = crop_size
UpperCAmelCase__ : Dict = do_rescale
UpperCAmelCase__ : Optional[Any] = rescale_factor
UpperCAmelCase__ : Dict = do_normalize
UpperCAmelCase__ : Tuple = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
UpperCAmelCase__ : Union[str, Any] = image_std if image_std is not None else OPENAI_CLIP_STD
UpperCAmelCase__ : int = do_convert_rgb
def lowercase_ ( self : Optional[int] , _A : Tuple , _A : Dict , _A : Optional[int] = PILImageResampling.BICUBIC , _A : Union[str, Any] = None , **_A : Any , ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = get_size_dict(__lowerCamelCase , default_to_square=__lowerCamelCase )
if "shortest_edge" not in size:
raise ValueError(f"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
UpperCAmelCase__ : Optional[int] = get_resize_output_image_size(__lowerCamelCase , size=size['''shortest_edge'''] , default_to_square=__lowerCamelCase )
return resize(__lowerCamelCase , size=__lowerCamelCase , resample=__lowerCamelCase , data_format=__lowerCamelCase , **__lowerCamelCase )
def lowercase_ ( self : List[Any] , _A : Optional[Any] , _A : Any , _A : List[str] = None , **_A : List[Any] , ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = get_size_dict(__lowerCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""" )
return center_crop(__lowerCamelCase , size=(size['''height'''], size['''width''']) , data_format=__lowerCamelCase , **__lowerCamelCase )
def lowercase_ ( self : Optional[Any] , _A : Optional[int] , _A : str , _A : Optional[int] = None , **_A : Any , ):
'''simple docstring'''
return rescale(__lowerCamelCase , scale=__lowerCamelCase , data_format=__lowerCamelCase , **__lowerCamelCase )
def lowercase_ ( self : Optional[Any] , _A : Optional[int] , _A : Dict , _A : Dict , _A : Any = None , **_A : Dict , ):
'''simple docstring'''
return normalize(__lowerCamelCase , mean=__lowerCamelCase , std=__lowerCamelCase , data_format=__lowerCamelCase , **__lowerCamelCase )
def lowercase_ ( self : int , _A : List[Any] , _A : int = None , _A : Any = None , _A : str = None , _A : Optional[Any] = None , _A : int = None , _A : List[str] = None , _A : Optional[int] = None , _A : Tuple = None , _A : Union[str, Any] = None , _A : Optional[int] = None , _A : int = None , _A : List[str] = None , _A : List[Any] = ChannelDimension.FIRST , **_A : Optional[int] , ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase__ : Tuple = size if size is not None else self.size
UpperCAmelCase__ : Any = get_size_dict(__lowerCamelCase , param_name='''size''' , default_to_square=__lowerCamelCase )
UpperCAmelCase__ : Tuple = resample if resample is not None else self.resample
UpperCAmelCase__ : Dict = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase__ : List[Any] = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase__ : str = get_size_dict(__lowerCamelCase , param_name='''crop_size''' , default_to_square=__lowerCamelCase )
UpperCAmelCase__ : Dict = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase__ : int = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase__ : List[Any] = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase__ : Dict = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase__ : int = image_std if image_std is not None else self.image_std
UpperCAmelCase__ : Dict = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
UpperCAmelCase__ : Optional[int] = make_list_of_images(__lowerCamelCase )
if not valid_images(__lowerCamelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
UpperCAmelCase__ : Union[str, Any] = [convert_to_rgb(__lowerCamelCase ) for image in images]
# All transformations expect numpy arrays.
UpperCAmelCase__ : Any = [to_numpy_array(__lowerCamelCase ) for image in images]
if do_resize:
UpperCAmelCase__ : str = [self.resize(image=__lowerCamelCase , size=__lowerCamelCase , resample=__lowerCamelCase ) for image in images]
if do_center_crop:
UpperCAmelCase__ : Dict = [self.center_crop(image=__lowerCamelCase , size=__lowerCamelCase ) for image in images]
if do_rescale:
UpperCAmelCase__ : Union[str, Any] = [self.rescale(image=__lowerCamelCase , scale=__lowerCamelCase ) for image in images]
if do_normalize:
UpperCAmelCase__ : Optional[Any] = [self.normalize(image=__lowerCamelCase , mean=__lowerCamelCase , std=__lowerCamelCase ) for image in images]
UpperCAmelCase__ : List[Any] = [to_channel_dimension_format(__lowerCamelCase , __lowerCamelCase ) for image in images]
UpperCAmelCase__ : Optional[int] = {'''pixel_values''': images}
return BatchFeature(data=__lowerCamelCase , tensor_type=__lowerCamelCase )
| 702
|
'''simple docstring'''
from __future__ import annotations
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = None ) -> None:
if start is None:
UpperCAmelCase__ : List[Any] = 0
if end is None:
UpperCAmelCase__ : Optional[int] = len(lowerCAmelCase__ ) - 1
if start >= end:
return
UpperCAmelCase__ : List[Any] = (start + end) // 2
slowsort(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
slowsort(lowerCAmelCase__ , mid + 1 , lowerCAmelCase__ )
if sequence[end] < sequence[mid]:
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = sequence[mid], sequence[end]
slowsort(lowerCAmelCase__ , lowerCAmelCase__ , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 312
| 0
|
'''simple docstring'''
import numpy as np
import torch
from imwatermark import WatermarkEncoder
# Copied from https://github.com/Stability-AI/generative-models/blob/613af104c6b85184091d42d374fef420eddb356d/scripts/demo/streamlit_helpers.py#L66
A__ : str =0b1_0_1_1_0_0_1_1_1_1_1_0_1_1_0_0_1_0_0_1_0_0_0_0_0_1_1_1_1_0_1_1_1_0_1_1_0_0_0_1_1_0_0_1_1_1_1_0
# bin(x)[2:] gives bits of x as str, use int to convert them to 0/1
A__ : Any =[int(bit) for bit in bin(WATERMARK_MESSAGE)[2:]]
class UpperCAmelCase :
def __init__( self : Union[str, Any] ) -> int:
_lowerCAmelCase = WATERMARK_BITS
_lowerCAmelCase = WatermarkEncoder()
self.encoder.set_watermark("""bits""" , self.watermark )
def lowercase__ ( self : Dict , __snake_case : torch.FloatTensor ) -> Dict:
# can't encode images that are smaller than 256
if images.shape[-1] < 2_56:
return images
_lowerCAmelCase = (2_55 * (images / 2 + 0.5)).cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
_lowerCAmelCase = [self.encoder.encode(__snake_case , """dwtDct""" ) for image in images]
_lowerCAmelCase = torch.from_numpy(np.array(__snake_case ) ).permute(0 , 3 , 1 , 2 )
_lowerCAmelCase = torch.clamp(2 * (images / 2_55 - 0.5) , min=-1.0 , max=1.0 )
return images
| 207
|
'''simple docstring'''
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCAmelCase ( snake_case_ , unittest.TestCase ):
_lowercase: List[str] = DebertaTokenizer
_lowercase: Union[str, Any] = True
_lowercase: Union[str, Any] = DebertaTokenizerFast
def lowercase__ ( self : Any ) -> List[Any]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_lowerCAmelCase = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""[UNK]""",
]
_lowerCAmelCase = dict(zip(__snake_case , range(len(__snake_case ) ) ) )
_lowerCAmelCase = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
_lowerCAmelCase = {"""unk_token""": """[UNK]"""}
_lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
_lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__snake_case ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(__snake_case ) )
def lowercase__ ( self : List[str] , **__snake_case : Any ) -> Dict:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__snake_case )
def lowercase__ ( self : Dict , __snake_case : Dict ) -> Optional[Any]:
_lowerCAmelCase = """lower newer"""
_lowerCAmelCase = """lower newer"""
return input_text, output_text
def lowercase__ ( self : Union[str, Any] ) -> List[str]:
_lowerCAmelCase = self.get_tokenizer()
_lowerCAmelCase = """lower newer"""
_lowerCAmelCase = ["""l""", """o""", """w""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
_lowerCAmelCase = tokenizer.tokenize(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
_lowerCAmelCase = tokens + [tokenizer.unk_token]
_lowerCAmelCase = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__snake_case ) , __snake_case )
def lowercase__ ( self : Union[str, Any] ) -> Tuple:
_lowerCAmelCase = self.get_tokenizer()
_lowerCAmelCase = tokenizer("""Hello""" , """World""" )
_lowerCAmelCase = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd["""token_type_ids"""] , __snake_case )
@slow
def lowercase__ ( self : int ) -> Union[str, Any]:
_lowerCAmelCase = self.tokenizer_class.from_pretrained("""microsoft/deberta-base""" )
_lowerCAmelCase = tokenizer.encode("""sequence builders""" , add_special_tokens=__snake_case )
_lowerCAmelCase = tokenizer.encode("""multi-sequence build""" , add_special_tokens=__snake_case )
_lowerCAmelCase = tokenizer.encode(
"""sequence builders""" , add_special_tokens=__snake_case , add_prefix_space=__snake_case )
_lowerCAmelCase = tokenizer.encode(
"""sequence builders""" , """multi-sequence build""" , add_special_tokens=__snake_case , add_prefix_space=__snake_case )
_lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(__snake_case )
_lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(__snake_case , __snake_case )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def lowercase__ ( self : Any ) -> str:
_lowerCAmelCase = [self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class )
for tokenizer_class in tokenizer_classes:
_lowerCAmelCase = tokenizer_class.from_pretrained("""microsoft/deberta-base""" )
_lowerCAmelCase = [
"""ALBERT: A Lite BERT for Self-supervised Learning of Language Representations""",
"""ALBERT incorporates two parameter reduction techniques""",
"""The first one is a factorized embedding parameterization. By decomposing the large vocabulary"""
""" embedding matrix into two small matrices, we separate the size of the hidden layers from the size of"""
""" vocabulary embedding.""",
]
_lowerCAmelCase = tokenizer(__snake_case , padding=__snake_case )
_lowerCAmelCase = [tokenizer.decode(__snake_case , skip_special_tokens=__snake_case ) for seq in encoding["""input_ids"""]]
# fmt: off
_lowerCAmelCase = {
"""input_ids""": [
[1, 21_18, 1_11_26, 5_65, 35, 83, 2_51_91, 1_63, 1_88_54, 13, 1_21_56, 12, 1_61_01, 2_53_76, 1_38_07, 9, 2_22_05, 2_78_93, 16_35, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 21_18, 1_11_26, 5_65, 2_45_36, 80, 4_37_97, 48_78, 73_73, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1_33, 78, 65, 16, 10, 37_24, 15_38, 3_31_83, 1_13_03, 4_37_97, 19_38, 4, 8_70, 2_41_65, 2_91_05, 5, 7_39, 3_26_44, 3_31_83, 1_13_03, 3_61_73, 88, 80, 6_50, 78_21, 4_59_40, 6, 52, 25_59, 5, 18_36, 9, 5, 73_97, 1_31_71, 31, 5, 18_36, 9, 3_26_44, 3_31_83, 1_13_03, 4, 2]
],
"""token_type_ids""": [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
"""attention_mask""": [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
_lowerCAmelCase = [
"""ALBERT: A Lite BERT for Self-supervised Learning of Language Representations""",
"""ALBERT incorporates two parameter reduction techniques""",
"""The first one is a factorized embedding parameterization. By decomposing the large vocabulary"""
""" embedding matrix into two small matrices, we separate the size of the hidden layers from the size of"""
""" vocabulary embedding.""",
]
self.assertDictEqual(encoding.data , __snake_case )
for expected, decoded in zip(__snake_case , __snake_case ):
self.assertEqual(__snake_case , __snake_case )
| 207
| 1
|
"""simple docstring"""
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def lowercase ( _SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
def is_in_circle(_SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : float ) -> bool:
_UpperCAmelCase = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
_UpperCAmelCase = mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(_SCREAMING_SNAKE_CASE ) )
# The ratio of the area for circle to square is pi/4.
_UpperCAmelCase = proportion * 4
print(f'The estimated value of pi is {pi_estimate}' )
print(f'The numpy value of pi is {pi}' )
print(f'The total error is {abs(pi - pi_estimate )}' )
def lowercase ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Callable[[float], float] , _SCREAMING_SNAKE_CASE : float = 0.0 , _SCREAMING_SNAKE_CASE : float = 1.0 , ):
'''simple docstring'''
return mean(
function_to_integrate(uniform(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) for _ in range(_SCREAMING_SNAKE_CASE ) ) * (max_value - min_value)
def lowercase ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : float = 0.0 , _SCREAMING_SNAKE_CASE : float = 1.0 ):
'''simple docstring'''
def identity_function(_SCREAMING_SNAKE_CASE : float ) -> float:
return x
_UpperCAmelCase = area_under_curve_estimator(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = (max_value * max_value - min_value * min_value) / 2
print('''******************''' )
print(f'Estimating area under y=x where x varies from {min_value} to {max_value}' )
print(f'Estimated value is {estimated_value}' )
print(f'Expected value is {expected_value}' )
print(f'Total error is {abs(estimated_value - expected_value )}' )
print('''******************''' )
def lowercase ( _SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
def function_to_integrate(_SCREAMING_SNAKE_CASE : float ) -> float:
return sqrt(4.0 - x * x )
_UpperCAmelCase = area_under_curve_estimator(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 0.0 , 2.0 )
print('''******************''' )
print('''Estimating pi using area_under_curve_estimator''' )
print(f'Estimated value is {estimated_value}' )
print(f'Expected value is {pi}' )
print(f'Total error is {abs(estimated_value - pi )}' )
print('''******************''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 95
|
"""simple docstring"""
import operator
def lowercase ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : bool = False , _SCREAMING_SNAKE_CASE : list | None = None ):
'''simple docstring'''
_UpperCAmelCase = operator.lt if reverse else operator.gt
_UpperCAmelCase = solution or []
if not arr:
return solution
_UpperCAmelCase = [arr.pop(0 )]
for i, item in enumerate(_SCREAMING_SNAKE_CASE ):
if _operator(_SCREAMING_SNAKE_CASE , sublist[-1] ):
sublist.append(_SCREAMING_SNAKE_CASE )
arr.pop(_SCREAMING_SNAKE_CASE )
# merging sublist into solution list
if not solution:
solution.extend(_SCREAMING_SNAKE_CASE )
else:
while sublist:
_UpperCAmelCase = sublist.pop(0 )
for i, xx in enumerate(_SCREAMING_SNAKE_CASE ):
if not _operator(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
solution.insert(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
break
else:
solution.append(_SCREAMING_SNAKE_CASE )
strand_sort(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
| 95
| 1
|
def __snake_case ( __UpperCamelCase : List[Any] ):
"""simple docstring"""
if collection == []:
return []
# get some information about the collection
A_ = len(__UpperCamelCase )
A_ = max(__UpperCamelCase )
A_ = min(__UpperCamelCase )
# create the counting array
A_ = coll_max + 1 - coll_min
A_ = [0] * counting_arr_length
# count how much a number appears in the collection
for number in collection:
counting_arr[number - coll_min] += 1
# sum each position with it's predecessors. now, counting_arr[i] tells
# us how many elements <= i has in the collection
for i in range(1 ,__UpperCamelCase ):
A_ = counting_arr[i] + counting_arr[i - 1]
# create the output collection
A_ = [0] * coll_len
# place the elements in the output, respecting the original order (stable
# sort) from end to begin, updating counting_arr
for i in reversed(range(0 ,__UpperCamelCase ) ):
A_ = collection[i]
counting_arr[collection[i] - coll_min] -= 1
return ordered
def __snake_case ( __UpperCamelCase : Optional[int] ):
"""simple docstring"""
return "".join([chr(__UpperCamelCase ) for i in counting_sort([ord(__UpperCamelCase ) for c in string] )] )
if __name__ == "__main__":
# Test string sort
assert counting_sort_string('thisisthestring') == "eghhiiinrsssttt"
__a :Tuple = input('Enter numbers separated by a comma:\n').strip()
__a :Dict = [int(item) for item in user_input.split(',')]
print(counting_sort(unsorted))
| 86
|
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
__a :Optional[Any] = 'true'
def __snake_case ( __UpperCamelCase : Any ,__UpperCamelCase : List[Any]=82 ,__UpperCamelCase : Dict=16 ):
"""simple docstring"""
set_seed(42 )
A_ = RegressionModel()
A_ = deepcopy(__UpperCamelCase )
A_ = RegressionDataset(length=__UpperCamelCase )
A_ = DataLoader(__UpperCamelCase ,batch_size=__UpperCamelCase )
model.to(accelerator.device )
A_ , A_ = accelerator.prepare(__UpperCamelCase ,__UpperCamelCase )
return model, ddp_model, dataloader
def __snake_case ( __UpperCamelCase : Accelerator ,__UpperCamelCase : Dict=False ):
"""simple docstring"""
A_ = AutoTokenizer.from_pretrained("hf-internal-testing/mrpc-bert-base-cased" )
A_ = load_dataset("glue" ,"mrpc" ,split="validation" )
def tokenize_function(__UpperCamelCase : Optional[Any] ):
A_ = tokenizer(examples["sentence1"] ,examples["sentence2"] ,truncation=__UpperCamelCase ,max_length=__UpperCamelCase )
return outputs
with accelerator.main_process_first():
A_ = dataset.map(
__UpperCamelCase ,batched=__UpperCamelCase ,remove_columns=["idx", "sentence1", "sentence2"] ,)
A_ = tokenized_datasets.rename_column("label" ,"labels" )
def collate_fn(__UpperCamelCase : Union[str, Any] ):
if use_longest:
return tokenizer.pad(__UpperCamelCase ,padding="longest" ,return_tensors="pt" )
return tokenizer.pad(__UpperCamelCase ,padding="max_length" ,max_length=128 ,return_tensors="pt" )
return DataLoader(__UpperCamelCase ,shuffle=__UpperCamelCase ,collate_fn=__UpperCamelCase ,batch_size=16 )
def __snake_case ( __UpperCamelCase : Optional[Any] ,__UpperCamelCase : str ):
"""simple docstring"""
A_ = Accelerator(dispatch_batches=__UpperCamelCase ,split_batches=__UpperCamelCase )
A_ = get_dataloader(__UpperCamelCase ,not dispatch_batches )
A_ = AutoModelForSequenceClassification.from_pretrained(
"hf-internal-testing/mrpc-bert-base-cased" ,return_dict=__UpperCamelCase )
A_ , A_ = accelerator.prepare(__UpperCamelCase ,__UpperCamelCase )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def __snake_case ( __UpperCamelCase : Dict ,__UpperCamelCase : int ,__UpperCamelCase : Optional[Any] ):
"""simple docstring"""
A_ = []
for batch in dataloader:
A_ , A_ = batch.values()
with torch.no_grad():
A_ = model(__UpperCamelCase )
A_ , A_ = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
A_ , A_ = [], []
for logit, targ in logits_and_targets:
logits.append(__UpperCamelCase )
targs.append(__UpperCamelCase )
A_ , A_ = torch.cat(__UpperCamelCase ), torch.cat(__UpperCamelCase )
return logits, targs
def __snake_case ( __UpperCamelCase : Accelerator ,__UpperCamelCase : Dict=82 ,__UpperCamelCase : List[Any]=False ,__UpperCamelCase : Dict=False ,__UpperCamelCase : Optional[int]=16 ):
"""simple docstring"""
A_ , A_ , A_ = get_basic_setup(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
A_ , A_ = generate_predictions(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
assert (
len(__UpperCamelCase ) == num_samples
), f'''Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(__UpperCamelCase )}'''
def __snake_case ( __UpperCamelCase : bool = False ,__UpperCamelCase : bool = False ):
"""simple docstring"""
A_ = evaluate.load("glue" ,"mrpc" )
A_ , A_ = get_mrpc_setup(__UpperCamelCase ,__UpperCamelCase )
# First do baseline
A_ , A_ , A_ = setup["no"]
model.to(__UpperCamelCase )
model.eval()
for batch in dataloader:
batch.to(__UpperCamelCase )
with torch.inference_mode():
A_ = model(**__UpperCamelCase )
A_ = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=__UpperCamelCase ,references=batch["labels"] )
A_ = metric.compute()
# Then do distributed
A_ , A_ , A_ = setup["ddp"]
model.eval()
for batch in dataloader:
with torch.inference_mode():
A_ = model(**__UpperCamelCase )
A_ = outputs.logits.argmax(dim=-1 )
A_ = batch["labels"]
A_ , A_ = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=__UpperCamelCase ,references=__UpperCamelCase )
A_ = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] ,distributed[key] ), f'''Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n'''
def __snake_case ( ):
"""simple docstring"""
A_ = Accelerator(split_batches=__UpperCamelCase ,dispatch_batches=__UpperCamelCase )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print("**Testing gather_for_metrics**" )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(f'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`''' )
test_mrpc(__UpperCamelCase ,__UpperCamelCase )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print("**Test torch metrics**" )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
A_ = Accelerator(split_batches=__UpperCamelCase ,dispatch_batches=__UpperCamelCase )
if accelerator.is_local_main_process:
print(f'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99''' )
test_torch_metrics(__UpperCamelCase ,99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print("**Test last batch is not dropped when perfectly divisible**" )
A_ = Accelerator()
test_torch_metrics(__UpperCamelCase ,512 )
accelerator.state._reset_state()
def __snake_case ( __UpperCamelCase : List[Any] ):
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 86
| 1
|
'''simple docstring'''
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class __SCREAMING_SNAKE_CASE ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
@register_to_config
def __init__( self : Dict , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : str , UpperCAmelCase__ : bool = False , ):
'''simple docstring'''
super().__init__()
lowercase : Tuple =nn.Embedding(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase : List[str] =nn.Embedding(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase : Dict =False
lowercase : Optional[Any] =nn.Dropout(p=UpperCAmelCase__ )
lowercase : Optional[Any] =TaConfig(
vocab_size=UpperCAmelCase__ , d_model=UpperCAmelCase__ , num_heads=UpperCAmelCase__ , d_kv=UpperCAmelCase__ , d_ff=UpperCAmelCase__ , dropout_rate=UpperCAmelCase__ , feed_forward_proj=UpperCAmelCase__ , is_decoder=UpperCAmelCase__ , is_encoder_decoder=UpperCAmelCase__ , )
lowercase : Optional[int] =nn.ModuleList()
for lyr_num in range(UpperCAmelCase__ ):
lowercase : int =TaBlock(UpperCAmelCase__ )
self.encoders.append(UpperCAmelCase__ )
lowercase : Optional[Any] =TaLayerNorm(UpperCAmelCase__ )
lowercase : Tuple =nn.Dropout(p=UpperCAmelCase__ )
def lowerCamelCase_ ( self : int , UpperCAmelCase__ : int , UpperCAmelCase__ : Dict ):
'''simple docstring'''
lowercase : Union[str, Any] =self.token_embedder(UpperCAmelCase__ )
lowercase : Union[str, Any] =encoder_input_tokens.shape[1]
lowercase : Union[str, Any] =torch.arange(UpperCAmelCase__ , device=encoder_input_tokens.device )
x += self.position_encoding(UpperCAmelCase__ )
lowercase : List[str] =self.dropout_pre(UpperCAmelCase__ )
# inverted the attention mask
lowercase : List[Any] =encoder_input_tokens.size()
lowercase : Dict =self.get_extended_attention_mask(UpperCAmelCase__ , UpperCAmelCase__ )
for lyr in self.encoders:
lowercase : Dict =lyr(UpperCAmelCase__ , UpperCAmelCase__ )[0]
lowercase : int =self.layer_norm(UpperCAmelCase__ )
return self.dropout_post(UpperCAmelCase__ ), encoder_inputs_mask
| 714
|
'''simple docstring'''
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __init__( self : Optional[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : str=13 , UpperCAmelCase__ : Optional[Any]=7 , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : int=True , UpperCAmelCase__ : int=True , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : Dict=99 , UpperCAmelCase__ : str=32 , UpperCAmelCase__ : Optional[Any]=5 , UpperCAmelCase__ : Any=4 , UpperCAmelCase__ : List[Any]=37 , UpperCAmelCase__ : str="gelu" , UpperCAmelCase__ : Dict=0.1 , UpperCAmelCase__ : List[Any]=0.1 , UpperCAmelCase__ : Optional[Any]=512 , UpperCAmelCase__ : Any=16 , UpperCAmelCase__ : str=2 , UpperCAmelCase__ : Tuple=0.02 , UpperCAmelCase__ : int=4 , ):
'''simple docstring'''
lowercase : int =parent
lowercase : List[str] =batch_size
lowercase : str =seq_length
lowercase : Optional[Any] =is_training
lowercase : Union[str, Any] =use_attention_mask
lowercase : Optional[Any] =use_token_type_ids
lowercase : Tuple =use_labels
lowercase : List[str] =vocab_size
lowercase : List[str] =hidden_size
lowercase : Tuple =num_hidden_layers
lowercase : Any =num_attention_heads
lowercase : List[str] =intermediate_size
lowercase : Optional[Any] =hidden_act
lowercase : Dict =hidden_dropout_prob
lowercase : List[Any] =attention_probs_dropout_prob
lowercase : Optional[Any] =max_position_embeddings
lowercase : Tuple =type_vocab_size
lowercase : Optional[int] =type_sequence_label_size
lowercase : Optional[Any] =initializer_range
lowercase : Optional[int] =num_choices
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
lowercase : Optional[Any] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase : Union[str, Any] =None
if self.use_attention_mask:
lowercase : List[str] =random_attention_mask([self.batch_size, self.seq_length] )
lowercase : Tuple =None
if self.use_token_type_ids:
lowercase : str =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase : int =RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
lowercase : List[Any] =self.prepare_config_and_inputs()
lowercase , lowercase , lowercase , lowercase : str =config_and_inputs
lowercase : Optional[Any] ={'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
lowercase : List[str] =self.prepare_config_and_inputs()
lowercase , lowercase , lowercase , lowercase : Any =config_and_inputs
lowercase : List[str] =True
lowercase : Optional[Any] =floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowercase : str =ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class __SCREAMING_SNAKE_CASE ( lowercase__ , unittest.TestCase ):
lowerCamelCase_ = True
lowerCamelCase_ = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
lowercase : str =FlaxRobertaModelTester(self )
@slow
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
lowercase : Optional[int] =model_class_name.from_pretrained('''roberta-base''' , from_pt=UpperCAmelCase__ )
lowercase : List[Any] =model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCAmelCase__ )
| 88
| 0
|
import math
import qiskit
def _SCREAMING_SNAKE_CASE ( lowercase : int = 1 , lowercase : int = 1 , lowercase : int = 1 ):
'''simple docstring'''
if (
isinstance(lowercase , lowercase )
or isinstance(lowercase , lowercase )
or isinstance(lowercase , lowercase )
):
raise TypeError('inputs must be integers.' )
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError('inputs must be positive.' )
if (
(math.floor(lowercase ) != input_a)
or (math.floor(lowercase ) != input_a)
or (math.floor(lowercase ) != carry_in)
):
raise ValueError('inputs must be exact integers.' )
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError('inputs must be less or equal to 2.' )
# build registers
lowerCamelCase_ = qiskit.QuantumRegister(4 , 'qr' )
lowerCamelCase_ = qiskit.ClassicalRegister(2 , 'cr' )
# list the entries
lowerCamelCase_ = [input_a, input_a, carry_in]
lowerCamelCase_ = qiskit.QuantumCircuit(lowercase , lowercase )
for i in range(0 , 3 ):
if entry[i] == 2:
quantum_circuit.h(lowercase ) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(lowercase ) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(lowercase ) # for 0 entries
# build the circuit
quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate
quantum_circuit.cx(0 , 1 )
quantum_circuit.ccx(1 , 2 , 3 )
quantum_circuit.cx(1 , 2 )
quantum_circuit.cx(0 , 1 )
quantum_circuit.measure([2, 3] , lowercase ) # measure the last two qbits
lowerCamelCase_ = qiskit.Aer.get_backend('aer_simulator' )
lowerCamelCase_ = qiskit.execute(lowercase , lowercase , shots=10_00 )
return job.result().get_counts(lowercase )
if __name__ == "__main__":
print(F"""Total sum count for state is: {quantum_full_adder(1, 1, 1)}""")
| 70
|
"""simple docstring"""
def lowercase (_snake_case ) -> int:
'''simple docstring'''
__UpperCamelCase = len(_snake_case )
__UpperCamelCase = len(matrix[0] )
__UpperCamelCase = min(_snake_case ,_snake_case )
for row in range(_snake_case ):
# Check if diagonal element is not zero
if matrix[row][row] != 0:
# Eliminate all the elements below the diagonal
for col in range(row + 1 ,_snake_case ):
__UpperCamelCase = matrix[col][row] / matrix[row][row]
for i in range(_snake_case ,_snake_case ):
matrix[col][i] -= multiplier * matrix[row][i]
else:
# Find a non-zero diagonal element to swap rows
__UpperCamelCase = True
for i in range(row + 1 ,_snake_case ):
if matrix[i][row] != 0:
__UpperCamelCase , __UpperCamelCase = matrix[i], matrix[row]
__UpperCamelCase = False
break
if reduce:
rank -= 1
for i in range(_snake_case ):
__UpperCamelCase = matrix[i][rank]
# Reduce the row pointer by one to stay on the same row
row -= 1
return rank
if __name__ == "__main__":
import doctest
doctest.testmod()
| 505
| 0
|
"""simple docstring"""
class SCREAMING_SNAKE_CASE__ :
def __init__( self : str , SCREAMING_SNAKE_CASE_ : str = "" , SCREAMING_SNAKE_CASE_ : bool = False ):
# Mapping from the first character of the prefix of the node
lowerCamelCase__ = {}
# A node will be a leaf if the tree contains its word
lowerCamelCase__ = is_leaf
lowerCamelCase__ = prefix
def __UpperCAmelCase ( self : int , SCREAMING_SNAKE_CASE_ : str ):
lowerCamelCase__ = 0
for q, w in zip(self.prefix , SCREAMING_SNAKE_CASE_ ):
if q != w:
break
x += 1
return self.prefix[:x], self.prefix[x:], word[x:]
def __UpperCAmelCase ( self : str , SCREAMING_SNAKE_CASE_ : list[str] ):
for word in words:
self.insert(SCREAMING_SNAKE_CASE_ )
def __UpperCAmelCase ( self : List[str] , SCREAMING_SNAKE_CASE_ : str ):
# Case 1: If the word is the prefix of the node
# Solution: We set the current node as leaf
if self.prefix == word:
lowerCamelCase__ = True
# Case 2: The node has no edges that have a prefix to the word
# Solution: We create an edge from the current node to a new one
# containing the word
elif word[0] not in self.nodes:
lowerCamelCase__ = RadixNode(prefix=SCREAMING_SNAKE_CASE_ , is_leaf=SCREAMING_SNAKE_CASE_ )
else:
lowerCamelCase__ = self.nodes[word[0]]
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = incoming_node.match(
SCREAMING_SNAKE_CASE_ )
# Case 3: The node prefix is equal to the matching
# Solution: We insert remaining word on the next node
if remaining_prefix == "":
self.nodes[matching_string[0]].insert(SCREAMING_SNAKE_CASE_ )
# Case 4: The word is greater equal to the matching
# Solution: Create a node in between both nodes, change
# prefixes and add the new node for the remaining word
else:
lowerCamelCase__ = remaining_prefix
lowerCamelCase__ = self.nodes[matching_string[0]]
lowerCamelCase__ = RadixNode(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCamelCase__ = aux_node
if remaining_word == "":
lowerCamelCase__ = True
else:
self.nodes[matching_string[0]].insert(SCREAMING_SNAKE_CASE_ )
def __UpperCAmelCase ( self : List[str] , SCREAMING_SNAKE_CASE_ : str ):
lowerCamelCase__ = self.nodes.get(word[0] , SCREAMING_SNAKE_CASE_ )
if not incoming_node:
return False
else:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = incoming_node.match(
SCREAMING_SNAKE_CASE_ )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# This applies when the word and the prefix are equal
elif remaining_word == "":
return incoming_node.is_leaf
# We have word remaining so we check the next node
else:
return incoming_node.find(SCREAMING_SNAKE_CASE_ )
def __UpperCAmelCase ( self : List[str] , SCREAMING_SNAKE_CASE_ : str ):
lowerCamelCase__ = self.nodes.get(word[0] , SCREAMING_SNAKE_CASE_ )
if not incoming_node:
return False
else:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = incoming_node.match(
SCREAMING_SNAKE_CASE_ )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# We have word remaining so we check the next node
elif remaining_word != "":
return incoming_node.delete(SCREAMING_SNAKE_CASE_ )
else:
# If it is not a leaf, we don't have to delete
if not incoming_node.is_leaf:
return False
else:
# We delete the nodes if no edges go from it
if len(incoming_node.nodes ) == 0:
del self.nodes[word[0]]
# We merge the current node with its only child
if len(self.nodes ) == 1 and not self.is_leaf:
lowerCamelCase__ = list(self.nodes.values() )[0]
lowerCamelCase__ = merging_node.is_leaf
self.prefix += merging_node.prefix
lowerCamelCase__ = merging_node.nodes
# If there is more than 1 edge, we just mark it as non-leaf
elif len(incoming_node.nodes ) > 1:
lowerCamelCase__ = False
# If there is 1 edge, we merge it with its child
else:
lowerCamelCase__ = list(incoming_node.nodes.values() )[0]
lowerCamelCase__ = merging_node.is_leaf
incoming_node.prefix += merging_node.prefix
lowerCamelCase__ = merging_node.nodes
return True
def __UpperCAmelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : int = 0 ):
if self.prefix != "":
print("""-""" * height , self.prefix , """ (leaf)""" if self.is_leaf else """""" )
for value in self.nodes.values():
value.print_tree(height + 1 )
def _A ( ):
"""simple docstring"""
lowerCamelCase__ = """banana bananas bandana band apple all beast""".split()
lowerCamelCase__ = RadixNode()
root.insert_many(__lowercase )
assert all(root.find(__lowercase ) for word in words )
assert not root.find("""bandanas""" )
assert not root.find("""apps""" )
root.delete("""all""" )
assert not root.find("""all""" )
root.delete("""banana""" )
assert not root.find("""banana""" )
assert root.find("""bananas""" )
return True
def _A ( ):
"""simple docstring"""
assert test_trie()
def _A ( ):
"""simple docstring"""
lowerCamelCase__ = RadixNode()
lowerCamelCase__ = """banana bananas bandanas bandana band apple all beast""".split()
root.insert_many(__lowercase )
print("""Words:""" , __lowercase )
print("""Tree:""" )
root.print_tree()
if __name__ == "__main__":
main()
| 258
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__magic_name__ = {
"""configuration_gpt_bigcode""": ["""GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTBigCodeConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
"""GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTBigCodeForSequenceClassification""",
"""GPTBigCodeForTokenClassification""",
"""GPTBigCodeForCausalLM""",
"""GPTBigCodeModel""",
"""GPTBigCodePreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 258
| 1
|
from __future__ import annotations
import inspect
import unittest
from math import floor
import numpy as np
from transformers import CvtConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFCvtForImageClassification, TFCvtModel
from transformers.models.cvt.modeling_tf_cvt import TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class snake_case_ (lowerCamelCase_ ):
def lowerCamelCase__( self :Union[str, Any] ) -> List[Any]:
a__ = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__snake_case ,'embed_dim' ) )
self.parent.assertTrue(hasattr(__snake_case ,'num_heads' ) )
class snake_case_ :
def __init__( self :Any ,__snake_case :int ,__snake_case :int=13 ,__snake_case :str=64 ,__snake_case :Optional[Any]=3 ,__snake_case :Tuple=[16, 48, 96] ,__snake_case :Optional[int]=[1, 3, 6] ,__snake_case :Tuple=[1, 2, 10] ,__snake_case :Tuple=[7, 3, 3] ,__snake_case :Optional[int]=[4, 2, 2] ,__snake_case :List[str]=[2, 1, 1] ,__snake_case :str=[2, 2, 2] ,__snake_case :List[str]=[False, False, True] ,__snake_case :Any=[0.0, 0.0, 0.0] ,__snake_case :Union[str, Any]=0.02 ,__snake_case :int=1E-12 ,__snake_case :Optional[int]=True ,__snake_case :Union[str, Any]=True ,__snake_case :Any=2 ,) -> List[Any]:
a__ = parent
a__ = batch_size
a__ = image_size
a__ = patch_sizes
a__ = patch_stride
a__ = patch_padding
a__ = is_training
a__ = use_labels
a__ = num_labels
a__ = num_channels
a__ = embed_dim
a__ = num_heads
a__ = stride_kv
a__ = depth
a__ = cls_token
a__ = attention_drop_rate
a__ = initializer_range
a__ = layer_norm_eps
def lowerCamelCase__( self :Union[str, Any] ) -> Optional[int]:
a__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a__ = None
if self.use_labels:
# create a random int32 tensor of given shape
a__ = ids_tensor([self.batch_size] ,self.num_labels )
a__ = self.get_config()
return config, pixel_values, labels
def lowerCamelCase__( self :str ) -> Tuple:
return CvtConfig(
image_size=self.image_size ,num_labels=self.num_labels ,num_channels=self.num_channels ,embed_dim=self.embed_dim ,num_heads=self.num_heads ,patch_sizes=self.patch_sizes ,patch_padding=self.patch_padding ,patch_stride=self.patch_stride ,stride_kv=self.stride_kv ,depth=self.depth ,cls_token=self.cls_token ,attention_drop_rate=self.attention_drop_rate ,initializer_range=self.initializer_range ,)
def lowerCamelCase__( self :Union[str, Any] ,__snake_case :Optional[int] ,__snake_case :List[str] ,__snake_case :Dict ) -> Any:
a__ = TFCvtModel(config=__snake_case )
a__ = model(__snake_case ,training=__snake_case )
a__ = (self.image_size, self.image_size)
a__ , a__ = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
a__ = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
a__ = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.embed_dim[-1], height, width) )
def lowerCamelCase__( self :int ,__snake_case :Optional[int] ,__snake_case :int ,__snake_case :Optional[Any] ) -> Union[str, Any]:
a__ = self.num_labels
a__ = TFCvtForImageClassification(__snake_case )
a__ = model(__snake_case ,labels=__snake_case ,training=__snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def lowerCamelCase__( self :Tuple ) -> Any:
a__ = self.prepare_config_and_inputs()
a__ , a__ , a__ = config_and_inputs
a__ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class snake_case_ (lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
UpperCAmelCase__ : Union[str, Any] = (TFCvtModel, TFCvtForImageClassification) if is_tf_available() else ()
UpperCAmelCase__ : List[str] = (
{'''feature-extraction''': TFCvtModel, '''image-classification''': TFCvtForImageClassification}
if is_tf_available()
else {}
)
UpperCAmelCase__ : List[Any] = False
UpperCAmelCase__ : Any = False
UpperCAmelCase__ : List[str] = False
UpperCAmelCase__ : str = False
UpperCAmelCase__ : int = False
def lowerCamelCase__( self :Optional[int] ) -> Optional[Any]:
a__ = TFCvtModelTester(self )
a__ = TFCvtConfigTester(self ,config_class=__snake_case ,has_text_modality=__snake_case ,hidden_size=37 )
def lowerCamelCase__( self :str ) -> Optional[int]:
self.config_tester.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
@unittest.skip(reason='Cvt does not output attentions' )
def lowerCamelCase__( self :Tuple ) -> str:
pass
@unittest.skip(reason='Cvt does not use inputs_embeds' )
def lowerCamelCase__( self :Any ) -> Dict:
pass
@unittest.skip(reason='Cvt does not support input and output embeddings' )
def lowerCamelCase__( self :Any ) -> int:
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices('GPU' ) ) == 0 ,reason='TF does not support backprop for grouped convolutions on CPU.' ,)
def lowerCamelCase__( self :Union[str, Any] ) -> int:
super().test_dataset_conversion()
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices('GPU' ) ) == 0 ,reason='TF does not support backprop for grouped convolutions on CPU.' ,)
@slow
def lowerCamelCase__( self :Optional[int] ) -> Optional[int]:
super().test_keras_fit()
@unittest.skip(reason='Get `Failed to determine best cudnn convolution algo.` error after using TF 2.12+cuda 11.8' )
def lowerCamelCase__( self :Union[str, Any] ) -> List[str]:
a__ = tf.keras.mixed_precision.Policy('mixed_float16' )
tf.keras.mixed_precision.set_global_policy(__snake_case )
super().test_keras_fit()
tf.keras.mixed_precision.set_global_policy('float32' )
def lowerCamelCase__( self :int ) -> Union[str, Any]:
a__ , a__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ = model_class(__snake_case )
a__ = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a__ = [*signature.parameters.keys()]
a__ = ['pixel_values']
self.assertListEqual(arg_names[:1] ,__snake_case )
def lowerCamelCase__( self :Optional[int] ) -> Optional[int]:
def check_hidden_states_output(__snake_case :int ,__snake_case :int ,__snake_case :List[str] ):
a__ = model_class(__snake_case )
a__ = model(**self._prepare_for_class(__snake_case ,__snake_case ) )
a__ = outputs.hidden_states
a__ = len(self.model_tester.depth )
self.assertEqual(len(__snake_case ) ,__snake_case )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) ,[
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] ,)
a__ , a__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ = True
check_hidden_states_output(__snake_case ,__snake_case ,__snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
a__ = True
check_hidden_states_output(__snake_case ,__snake_case ,__snake_case )
def lowerCamelCase__( self :Optional[int] ) -> List[Any]:
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__snake_case )
def lowerCamelCase__( self :List[str] ) -> Optional[Any]:
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__snake_case )
@slow
def lowerCamelCase__( self :Any ) -> Any:
for model_name in TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ = TFCvtModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
def __lowercase ( ):
a__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class snake_case_ (unittest.TestCase ):
@cached_property
def lowerCamelCase__( self :str ) -> Optional[int]:
return AutoImageProcessor.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def lowerCamelCase__( self :Any ) -> Any:
a__ = TFCvtForImageClassification.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
a__ = self.default_image_processor
a__ = prepare_img()
a__ = image_processor(images=__snake_case ,return_tensors='tf' )
# forward pass
a__ = model(**__snake_case )
# verify the logits
a__ = tf.TensorShape((1, 10_00) )
self.assertEqual(outputs.logits.shape ,__snake_case )
a__ = tf.constant([0.92_85, 0.90_15, -0.31_50] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() ,__snake_case ,atol=1E-4 ) )
| 335
|
import numpy as np
def __lowercase ( __lowerCAmelCase : np.ndarray , __lowerCAmelCase : float ):
return np.where(vector > 0 , __lowerCAmelCase , (alpha * (np.exp(__lowerCAmelCase ) - 1)) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 335
| 1
|
import os
import sys
import unittest
__magic_name__ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
__magic_name__ = os.path.join(git_repo_path, '''src''', '''transformers''')
__magic_name__ = '''
{0} = None
'''
__magic_name__ = '''
class {0}(metaclass=DummyObject):
_backends = {1}
def __init__(self, *args, **kwargs):
requires_backends(self, {1})
'''
__magic_name__ = '''
def {0}(*args, **kwargs):
requires_backends({0}, {1})
'''
class a__ ( unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self :Optional[Any] ):
lowercase = find_backend(' _import_structure["models.albert"].append("AlbertTokenizerFast")' )
self.assertIsNone(lowercase__ )
lowercase = find_backend(' if not is_tokenizers_available():' )
self.assertEqual(lowercase__ , 'tokenizers' )
lowercase = find_backend(' if not is_tensorflow_text_available():' )
self.assertEqual(lowercase__ , 'tensorflow_text' )
lowercase = find_backend(' if not (is_sentencepiece_available() and is_tokenizers_available()):' )
self.assertEqual(lowercase__ , 'sentencepiece_and_tokenizers' )
lowercase = find_backend(
' if not (is_sentencepiece_available() and is_tensorflow_text_available()):' )
self.assertEqual(lowercase__ , 'sentencepiece_and_tensorflow_text' )
lowercase = find_backend(
' if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):' )
self.assertEqual(lowercase__ , 'sentencepiece_and_tokenizers_and_vision' )
def __UpperCAmelCase ( self :Optional[int] ):
lowercase = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('torch' , lowercase__ )
self.assertIn('tensorflow_text' , lowercase__ )
self.assertIn('sentencepiece_and_tokenizers' , lowercase__ )
# Likewise, we can't assert on the exact content of a key
self.assertIn('BertModel' , objects['torch'] )
self.assertIn('TFBertModel' , objects['tf'] )
self.assertIn('FlaxBertModel' , objects['flax'] )
self.assertIn('BertModel' , objects['torch'] )
self.assertIn('TFBertTokenizer' , objects['tensorflow_text'] )
self.assertIn('convert_slow_tokenizer' , objects['sentencepiece_and_tokenizers'] )
def __UpperCAmelCase ( self :Optional[int] ):
lowercase = create_dummy_object('CONSTANT' , '\'torch\'' )
self.assertEqual(lowercase__ , '\nCONSTANT = None\n' )
lowercase = create_dummy_object('function' , '\'torch\'' )
self.assertEqual(
lowercase__ , '\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n' )
lowercase = '\nclass FakeClass(metaclass=DummyObject):\n _backends = \'torch\'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, \'torch\')\n'
lowercase = create_dummy_object('FakeClass' , '\'torch\'' )
self.assertEqual(lowercase__ , lowercase__ )
def __UpperCAmelCase ( self :Optional[Any] ):
lowercase = '# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, ["torch"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = ["torch"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, ["torch"])\n'
lowercase = create_dummy_files({'torch': ['CONSTANT', 'function', 'FakeClass']} )
self.assertEqual(dummy_files['torch'] , lowercase__ )
| 314
|
from queue import PriorityQueue
from typing import Any
import numpy as np
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ):
"""simple docstring"""
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
lowercase = cst_fwd.get(_UpperCAmelCase , np.inf )
lowercase = cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
lowercase = new_cost_f
lowercase = v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
lowercase = cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
lowercase = -1
lowercase = set()
lowercase = set()
lowercase = {source: 0}
lowercase = {destination: 0}
lowercase = {source: None}
lowercase = {destination: None}
lowercase = PriorityQueue()
lowercase = PriorityQueue()
lowercase = np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
lowercase , lowercase = queue_forward.get()
visited_forward.add(_UpperCAmelCase )
lowercase , lowercase = queue_backward.get()
visited_backward.add(_UpperCAmelCase )
lowercase = pass_and_relaxation(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , )
lowercase = pass_and_relaxation(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , )
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
lowercase = shortest_distance
return shortest_path_distance
__magic_name__ = {
'''B''': [['''C''', 1]],
'''C''': [['''D''', 1]],
'''D''': [['''F''', 1]],
'''E''': [['''B''', 1], ['''G''', 2]],
'''F''': [],
'''G''': [['''F''', 1]],
}
__magic_name__ = {
'''B''': [['''E''', 1]],
'''C''': [['''B''', 1]],
'''D''': [['''C''', 1]],
'''F''': [['''D''', 1], ['''G''', 1]],
'''E''': [[None, np.inf]],
'''G''': [['''E''', 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 314
| 1
|
def __SCREAMING_SNAKE_CASE ( a__ : str ) -> bool:
if not all(x.isalpha() for x in string ):
raise ValueError("""String must only contain alphabetic characters.""" )
__A : Optional[int] = sorted(string.lower() )
return len(a__ ) == len(set(a__ ) )
if __name__ == "__main__":
UpperCAmelCase_ : Any = input('''Enter a string ''').strip()
UpperCAmelCase_ : Tuple = is_isogram(input_str)
print(f"""{input_str} is {"an" if isogram else "not an"} isogram.""")
| 17
|
import argparse
import torch
from transformers import YosoConfig, YosoForMaskedLM
def lowercase__ ( __snake_case : Optional[Any] ):
'''simple docstring'''
if "model" in orig_key:
UpperCAmelCase_ : Optional[int] = orig_key.replace('model.' , '' )
if "norm1" in orig_key:
UpperCAmelCase_ : Optional[Any] = orig_key.replace('norm1' , 'attention.output.LayerNorm' )
if "norm2" in orig_key:
UpperCAmelCase_ : List[str] = orig_key.replace('norm2' , 'output.LayerNorm' )
if "norm" in orig_key:
UpperCAmelCase_ : Dict = orig_key.replace('norm' , 'LayerNorm' )
if "transformer" in orig_key:
UpperCAmelCase_ : Any = orig_key.split('.' )[0].split('_' )[-1]
UpperCAmelCase_ : Optional[Any] = orig_key.replace(F"transformer_{layer_num}" , F"encoder.layer.{layer_num}" )
if "mha.attn" in orig_key:
UpperCAmelCase_ : List[str] = orig_key.replace('mha.attn' , 'attention.self' )
if "mha" in orig_key:
UpperCAmelCase_ : Union[str, Any] = orig_key.replace('mha' , 'attention' )
if "W_q" in orig_key:
UpperCAmelCase_ : Any = orig_key.replace('W_q' , 'self.query' )
if "W_k" in orig_key:
UpperCAmelCase_ : Tuple = orig_key.replace('W_k' , 'self.key' )
if "W_v" in orig_key:
UpperCAmelCase_ : List[str] = orig_key.replace('W_v' , 'self.value' )
if "ff1" in orig_key:
UpperCAmelCase_ : str = orig_key.replace('ff1' , 'intermediate.dense' )
if "ff2" in orig_key:
UpperCAmelCase_ : Dict = orig_key.replace('ff2' , 'output.dense' )
if "ff" in orig_key:
UpperCAmelCase_ : Optional[int] = orig_key.replace('ff' , 'output.dense' )
if "mlm_class" in orig_key:
UpperCAmelCase_ : Optional[Any] = orig_key.replace('mlm.mlm_class' , 'cls.predictions.decoder' )
if "mlm" in orig_key:
UpperCAmelCase_ : Union[str, Any] = orig_key.replace('mlm' , 'cls.predictions.transform' )
if "cls" not in orig_key:
UpperCAmelCase_ : List[Any] = 'yoso.' + orig_key
return orig_key
def lowercase__ ( __snake_case : str , __snake_case : int ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
UpperCAmelCase_ : Any = orig_state_dict.pop(__snake_case )
if ("pooler" in key) or ("sen_class" in key):
continue
else:
UpperCAmelCase_ : Union[str, Any] = val
UpperCAmelCase_ : List[Any] = orig_state_dict['cls.predictions.decoder.bias']
UpperCAmelCase_ : Tuple = torch.arange(__snake_case ).expand((1, -1) ) + 2
return orig_state_dict
def lowercase__ ( __snake_case : List[Any] , __snake_case : Union[str, Any] , __snake_case : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Any = torch.load(__snake_case , map_location='cpu' )['model_state_dict']
UpperCAmelCase_ : Dict = YosoConfig.from_json_file(__snake_case )
UpperCAmelCase_ : str = YosoForMaskedLM(__snake_case )
UpperCAmelCase_ : Dict = convert_checkpoint_helper(config.max_position_embeddings , __snake_case )
print(model.load_state_dict(__snake_case ) )
model.eval()
model.save_pretrained(__snake_case )
print(F"Checkpoint successfuly converted. Model saved at {pytorch_dump_path}" )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--pytorch_model_path', default=None, type=str, required=True, help='Path to YOSO pytorch checkpoint.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The json file for YOSO model config.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__UpperCAmelCase = parser.parse_args()
convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
| 406
| 0
|
"""simple docstring"""
import unittest
from transformers.testing_utils import require_bsa
from transformers.utils import is_bsa_available
from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin
if is_bsa_available():
from transformers import MarkupLMFeatureExtractor
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Optional[int] , UpperCAmelCase_ : Union[str, Any] ) -> int:
"""simple docstring"""
_lowerCAmelCase = parent
def __lowerCamelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
return {}
def __snake_case ( ):
"""simple docstring"""
_lowerCAmelCase = '<HTML>\n\n <HEAD>\n <TITLE>sample document</TITLE>\n </HEAD>\n\n <BODY BGCOLOR=\"FFFFFF\">\n <HR>\n <a href=\"http://google.com\">Goog</a>\n <H1>This is one header</H1>\n <H2>This is a another Header</H2>\n <P>Travel from\n <P>\n <B>SFO to JFK</B>\n <BR>\n <B><I>on May 2, 2015 at 2:00 pm. For details go to confirm.com </I></B>\n <HR>\n <div style=\"color:#0000FF\">\n <h3>Traveler <b> name </b> is\n <p> John Doe </p>\n </div>'
_lowerCAmelCase = '\n <!DOCTYPE html>\n <html>\n <body>\n\n <h1>My First Heading</h1>\n <p>My first paragraph.</p>\n\n </body>\n </html>\n '
return [html_string_a, html_string_a]
@require_bsa
class _SCREAMING_SNAKE_CASE ( __a , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[int] = MarkupLMFeatureExtractor if is_bsa_available() else None
def __lowerCamelCase ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
_lowerCAmelCase = MarkupLMFeatureExtractionTester(self )
@property
def __lowerCamelCase ( self : Dict ) -> Dict:
"""simple docstring"""
return self.feature_extract_tester.prepare_feat_extract_dict()
def __lowerCamelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
_lowerCAmelCase = self.feature_extraction_class()
# Test not batched input
_lowerCAmelCase = get_html_strings()[0]
_lowerCAmelCase = feature_extractor(lowerCAmelCase_ )
# fmt: off
_lowerCAmelCase = [['sample document', 'Goog', 'This is one header', 'This is a another Header', 'Travel from', 'SFO to JFK', 'on May 2, 2015 at 2:00 pm. For details go to confirm.com', 'Traveler', 'name', 'is', 'John Doe']]
_lowerCAmelCase = [['/html/head/title', '/html/body/a', '/html/body/h1', '/html/body/h2', '/html/body/p', '/html/body/p/p/b[1]', '/html/body/p/p/b[2]/i', '/html/body/p/p/div/h3', '/html/body/p/p/div/h3/b', '/html/body/p/p/div/h3', '/html/body/p/p/div/h3/p']]
# fmt: on
self.assertEqual(encoding.nodes , lowerCAmelCase_ )
self.assertEqual(encoding.xpaths , lowerCAmelCase_ )
# Test batched
_lowerCAmelCase = get_html_strings()
_lowerCAmelCase = feature_extractor(lowerCAmelCase_ )
# fmt: off
_lowerCAmelCase = expected_nodes + [['My First Heading', 'My first paragraph.']]
_lowerCAmelCase = expected_xpaths + [['/html/body/h1', '/html/body/p']]
self.assertEqual(len(encoding.nodes ) , 2 )
self.assertEqual(len(encoding.xpaths ) , 2 )
self.assertEqual(encoding.nodes , lowerCAmelCase_ )
self.assertEqual(encoding.xpaths , lowerCAmelCase_ )
| 719
|
"""simple docstring"""
def __snake_case ( SCREAMING_SNAKE_CASE: list[int] ):
"""simple docstring"""
_lowerCAmelCase = []
if len(SCREAMING_SNAKE_CASE ) == 1:
return [nums.copy()]
for _ in range(len(SCREAMING_SNAKE_CASE ) ):
_lowerCAmelCase = nums.pop(0 )
_lowerCAmelCase = permute(SCREAMING_SNAKE_CASE )
for perm in permutations:
perm.append(SCREAMING_SNAKE_CASE )
result.extend(SCREAMING_SNAKE_CASE )
nums.append(SCREAMING_SNAKE_CASE )
return result
def __snake_case ( SCREAMING_SNAKE_CASE: Any ):
"""simple docstring"""
def backtrack(SCREAMING_SNAKE_CASE: Tuple ):
if start == len(SCREAMING_SNAKE_CASE ) - 1:
output.append(nums[:] )
else:
for i in range(SCREAMING_SNAKE_CASE , len(SCREAMING_SNAKE_CASE ) ):
_lowerCAmelCase , _lowerCAmelCase = nums[i], nums[start]
backtrack(start + 1 )
_lowerCAmelCase , _lowerCAmelCase = nums[i], nums[start] # backtrack
_lowerCAmelCase = []
backtrack(0 )
return output
if __name__ == "__main__":
import doctest
# use res to print the data in permute2 function
_snake_case = permutea([1, 2, 3])
print(res)
doctest.testmod()
| 491
| 0
|
from math import ceil
def __snake_case ( __UpperCamelCase : int = 1001 ):
"""simple docstring"""
A_ = 1
for i in range(1 ,int(ceil(n / 2.0 ) ) ):
A_ = 2 * i + 1
A_ = 2 * i
A_ = total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
__a :Union[str, Any] = int(sys.argv[1])
print(solution(n))
except ValueError:
print('Invalid entry - please enter a number')
| 86
|
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
a = logging.get_logger(__name__)
a = ['model.decoder.embed_positions.weights']
def UpperCAmelCase_ ( UpperCAmelCase__ ):
if "emb" in name:
lowercase_ = name.replace("""emb""" , """model.decoder.embed_tokens""" )
if "transformer" in name:
lowercase_ = name.replace("""transformer""" , """model.decoder""" )
if "cross_attention" in name:
lowercase_ = name.replace("""cross_attention""" , """encoder_attn""" )
if "linear1" in name:
lowercase_ = name.replace("""linear1""" , """fc1""" )
if "linear2" in name:
lowercase_ = name.replace("""linear2""" , """fc2""" )
if "norm1" in name:
lowercase_ = name.replace("""norm1""" , """self_attn_layer_norm""" )
if "norm_cross" in name:
lowercase_ = name.replace("""norm_cross""" , """encoder_attn_layer_norm""" )
if "norm2" in name:
lowercase_ = name.replace("""norm2""" , """final_layer_norm""" )
if "out_norm" in name:
lowercase_ = name.replace("""out_norm""" , """model.decoder.layer_norm""" )
if "linears" in name:
lowercase_ = name.replace("""linears""" , """lm_heads""" )
if "condition_provider.conditioners.description.output_proj" in name:
lowercase_ = name.replace("""condition_provider.conditioners.description.output_proj""" , """enc_to_dec_proj""" )
return name
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ):
lowercase_ = list(state_dict.keys() )
lowercase_ = {}
for key in keys:
lowercase_ = state_dict.pop(UpperCAmelCase__ )
lowercase_ = rename_keys(UpperCAmelCase__ )
if "in_proj_weight" in key:
# split fused qkv proj
lowercase_ = val[:hidden_size, :]
lowercase_ = val[hidden_size : 2 * hidden_size, :]
lowercase_ = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
lowercase_ = val
else:
lowercase_ = val
return state_dict, enc_dec_proj_state_dict
def UpperCAmelCase_ ( UpperCAmelCase__ ):
if checkpoint == "small":
# default config values
lowercase_ = 1_0_2_4
lowercase_ = 2_4
lowercase_ = 1_6
elif checkpoint == "medium":
lowercase_ = 1_5_3_6
lowercase_ = 4_8
lowercase_ = 2_4
elif checkpoint == "large":
lowercase_ = 2_0_4_8
lowercase_ = 4_8
lowercase_ = 3_2
else:
raise ValueError(F'''Checkpoint should be one of `[\'small\', \'medium\', \'large\']`, got {checkpoint}.''' )
lowercase_ = MusicgenDecoderConfig(
hidden_size=UpperCAmelCase__ , ffn_dim=hidden_size * 4 , num_hidden_layers=UpperCAmelCase__ , num_attention_heads=UpperCAmelCase__ , )
return config
@torch.no_grad()
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__="cpu" ):
lowercase_ = MusicGen.get_pretrained(UpperCAmelCase__ , device=UpperCAmelCase__ )
lowercase_ = decoder_config_from_checkpoint(UpperCAmelCase__ )
lowercase_ = fairseq_model.lm.state_dict()
lowercase_ , lowercase_ = rename_state_dict(
UpperCAmelCase__ , hidden_size=decoder_config.hidden_size )
lowercase_ = TaEncoderModel.from_pretrained("""t5-base""" )
lowercase_ = EncodecModel.from_pretrained("""facebook/encodec_32khz""" )
lowercase_ = MusicgenForCausalLM(UpperCAmelCase__ ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
lowercase_ , lowercase_ = decoder.load_state_dict(UpperCAmelCase__ , strict=UpperCAmelCase__ )
for key in missing_keys.copy():
if key.startswith(("""text_encoder""", """audio_encoder""") ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(UpperCAmelCase__ )
if len(UpperCAmelCase__ ) > 0:
raise ValueError(F'''Missing key(s) in state_dict: {missing_keys}''' )
if len(UpperCAmelCase__ ) > 0:
raise ValueError(F'''Unexpected key(s) in state_dict: {unexpected_keys}''' )
# init the composite model
lowercase_ = MusicgenForConditionalGeneration(text_encoder=UpperCAmelCase__ , audio_encoder=UpperCAmelCase__ , decoder=UpperCAmelCase__ )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(UpperCAmelCase__ )
# check we can do a forward pass
lowercase_ = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
lowercase_ = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
lowercase_ = model(input_ids=UpperCAmelCase__ , decoder_input_ids=UpperCAmelCase__ ).logits
if logits.shape != (8, 1, 2_0_4_8):
raise ValueError("""Incorrect shape for logits""" )
# now construct the processor
lowercase_ = AutoTokenizer.from_pretrained("""t5-base""" )
lowercase_ = AutoFeatureExtractor.from_pretrained("""facebook/encodec_32khz""" , padding_side="""left""" )
lowercase_ = MusicgenProcessor(feature_extractor=UpperCAmelCase__ , tokenizer=UpperCAmelCase__ )
# set the appropriate bos/pad token ids
lowercase_ = 2_0_4_8
lowercase_ = 2_0_4_8
# set other default generation config params
lowercase_ = int(3_0 * audio_encoder.config.frame_rate )
lowercase_ = True
lowercase_ = 3.0
if pytorch_dump_folder is not None:
Path(UpperCAmelCase__ ).mkdir(exist_ok=UpperCAmelCase__ )
logger.info(F'''Saving model {checkpoint} to {pytorch_dump_folder}''' )
model.save_pretrained(UpperCAmelCase__ )
processor.save_pretrained(UpperCAmelCase__ )
if repo_id:
logger.info(F'''Pushing model {checkpoint} to {repo_id}''' )
model.push_to_hub(UpperCAmelCase__ )
processor.push_to_hub(UpperCAmelCase__ )
if __name__ == "__main__":
a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint',
default='small',
type=str,
help='Checkpoint size of the MusicGen model you\'d like to convert. Can be one of: `[\'small\', \'medium\', \'large\']`.',
)
parser.add_argument(
'--pytorch_dump_folder',
required=True,
default=None,
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.'
)
parser.add_argument(
'--device', default='cpu', type=str, help='Torch device to run the conversion, either cpu or cuda.'
)
a = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 412
| 0
|
from collections import deque
from .hash_table import HashTable
class A_ ( __lowerCamelCase ):
'''simple docstring'''
def __init__( self , *snake_case , **snake_case ):
super().__init__(*UpperCAmelCase__ , **UpperCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case ):
lowercase = deque([] ) if self.values[key] is None else self.values[key]
self.values[key].appendleft(UpperCAmelCase__ )
lowercase = self.values[key]
def SCREAMING_SNAKE_CASE__ ( self ):
return (
sum(self.charge_factor - len(UpperCAmelCase__ ) for slot in self.values )
/ self.size_table
* self.charge_factor
)
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case=None ):
if not (
len(self.values[key] ) == self.charge_factor and self.values.count(UpperCAmelCase__ ) == 0
):
return key
return super()._collision_resolution(UpperCAmelCase__ , UpperCAmelCase__ )
| 721
|
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase = [[] for _ in range(__SCREAMING_SNAKE_CASE )]
lowercase = key - 1
if key <= 0:
raise ValueError('Height of grid can\'t be 0 or negative' )
if key == 1 or len(__SCREAMING_SNAKE_CASE ) <= key:
return input_string
for position, character in enumerate(__SCREAMING_SNAKE_CASE ):
lowercase = position % (lowest * 2) # puts it in bounds
lowercase = min(__SCREAMING_SNAKE_CASE , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append(__SCREAMING_SNAKE_CASE )
lowercase = [''.join(__SCREAMING_SNAKE_CASE ) for row in temp_grid]
lowercase = ''.join(__SCREAMING_SNAKE_CASE )
return output_string
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase = []
lowercase = key - 1
if key <= 0:
raise ValueError('Height of grid can\'t be 0 or negative' )
if key == 1:
return input_string
lowercase = [[] for _ in range(__SCREAMING_SNAKE_CASE )] # generates template
for position in range(len(__SCREAMING_SNAKE_CASE ) ):
lowercase = position % (lowest * 2) # puts it in bounds
lowercase = min(__SCREAMING_SNAKE_CASE , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append('*' )
lowercase = 0
for row in temp_grid: # fills in the characters
lowercase = input_string[counter : counter + len(__SCREAMING_SNAKE_CASE )]
grid.append(list(__SCREAMING_SNAKE_CASE ) )
counter += len(__SCREAMING_SNAKE_CASE )
lowercase = '' # reads as zigzag
for position in range(len(__SCREAMING_SNAKE_CASE ) ):
lowercase = position % (lowest * 2) # puts it in bounds
lowercase = min(__SCREAMING_SNAKE_CASE , lowest * 2 - num ) # creates zigzag pattern
output_string += grid[num][0]
grid[num].pop(0 )
return output_string
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
lowercase = {}
for key_guess in range(1 , len(__SCREAMING_SNAKE_CASE ) ): # tries every key
lowercase = decrypt(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return results
if __name__ == "__main__":
import doctest
doctest.testmod()
| 565
| 0
|
"""simple docstring"""
from cva import destroyAllWindows, imread, imshow, waitKey
def __magic_name__ ( UpperCamelCase : int ) -> int:
a__ = img.shape[0], img.shape[1]
# converting each pixel's color to its negative
for i in range(__A ):
for j in range(__A ):
a__ = [255, 255, 255] - img[i][j]
return img
if __name__ == "__main__":
# read original image
a : str = imread('image_data/lena.jpg', 1)
# convert to its negative
a : Tuple = convert_to_negative(img)
# show result image
imshow('negative of original image', img)
waitKey(0)
destroyAllWindows()
| 273
|
'''simple docstring'''
import operator as op
def lowerCAmelCase_ ( __A : int ):
'''simple docstring'''
snake_case: List[Any] = []
snake_case: Optional[Any] = lambda __A , __A : int(x / y ) # noqa: E731 integer division operation
snake_case: Dict = {
'^': op.pow,
'*': op.mul,
'/': div,
'+': op.add,
'-': op.sub,
} # operators & their respective operation
# print table header
print('Symbol'.center(8 ) , 'Action'.center(12 ) , 'Stack' , sep=' | ' )
print('-' * (30 + len(__A )) )
for x in post_fix:
if x.isdigit(): # if x in digit
stack.append(__A ) # append x to stack
# output in tabular format
print(x.rjust(8 ) , ('push(' + x + ')').ljust(12 ) , ','.join(__A ) , sep=' | ' )
else:
snake_case: Tuple = stack.pop() # pop stack
# output in tabular format
print(''.rjust(8 ) , ('pop(' + b + ')').ljust(12 ) , ','.join(__A ) , sep=' | ' )
snake_case: Any = stack.pop() # pop stack
# output in tabular format
print(''.rjust(8 ) , ('pop(' + a + ')').ljust(12 ) , ','.join(__A ) , sep=' | ' )
stack.append(
str(opr[x](int(__A ) , int(__A ) ) ) ) # evaluate the 2 values popped from stack & push result to stack
# output in tabular format
print(
x.rjust(8 ) , ('push(' + a + x + b + ')').ljust(12 ) , ','.join(__A ) , sep=' | ' , )
return int(stack[0] )
if __name__ == "__main__":
__UpperCAmelCase = input("\n\nEnter a Postfix Equation (space separated) = ").split(" ")
print("\n\tResult = ", solve(Postfix))
| 329
| 0
|
import warnings
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _lowerCamelCase ( UpperCamelCase_ ):
__a = ["image_processor", "tokenizer"]
__a = "FlavaImageProcessor"
__a = ("BertTokenizer", "BertTokenizerFast")
def __init__( self , lowerCAmelCase=None , lowerCAmelCase=None , **lowerCAmelCase ) -> str:
SCREAMING_SNAKE_CASE__: List[Any]= None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , lowerCAmelCase , )
SCREAMING_SNAKE_CASE__: Optional[Any]= kwargs.pop('''feature_extractor''' )
SCREAMING_SNAKE_CASE__: str= image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(lowerCAmelCase , lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Union[str, Any]= self.image_processor
def __call__( self , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = True , lowerCAmelCase = False , lowerCAmelCase = False , lowerCAmelCase = None , lowerCAmelCase = 0 , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = False , lowerCAmelCase = False , lowerCAmelCase = False , lowerCAmelCase = False , lowerCAmelCase = True , lowerCAmelCase = None , **lowerCAmelCase , ) -> List[Any]:
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
SCREAMING_SNAKE_CASE__: Any= self.tokenizer(
text=lowerCAmelCase , add_special_tokens=lowerCAmelCase , padding=lowerCAmelCase , truncation=lowerCAmelCase , max_length=lowerCAmelCase , stride=lowerCAmelCase , pad_to_multiple_of=lowerCAmelCase , return_token_type_ids=lowerCAmelCase , return_attention_mask=lowerCAmelCase , return_overflowing_tokens=lowerCAmelCase , return_special_tokens_mask=lowerCAmelCase , return_offsets_mapping=lowerCAmelCase , return_length=lowerCAmelCase , verbose=lowerCAmelCase , return_tensors=lowerCAmelCase , **lowerCAmelCase , )
if images is not None:
SCREAMING_SNAKE_CASE__: Tuple= self.image_processor(
lowerCAmelCase , return_image_mask=lowerCAmelCase , return_codebook_pixels=lowerCAmelCase , return_tensors=lowerCAmelCase , **lowerCAmelCase , )
if text is not None and images is not None:
encoding.update(lowerCAmelCase )
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowerCAmelCase ) , tensor_type=lowerCAmelCase )
def UpperCamelCase_ ( self , *lowerCAmelCase , **lowerCAmelCase ) -> Dict:
return self.tokenizer.batch_decode(*lowerCAmelCase , **lowerCAmelCase )
def UpperCamelCase_ ( self , *lowerCAmelCase , **lowerCAmelCase ) -> Any:
return self.tokenizer.decode(*lowerCAmelCase , **lowerCAmelCase )
@property
def UpperCamelCase_ ( self ) -> Tuple:
SCREAMING_SNAKE_CASE__: List[str]= self.tokenizer.model_input_names
SCREAMING_SNAKE_CASE__: str= self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def UpperCamelCase_ ( self ) -> Union[str, Any]:
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , lowerCAmelCase , )
return self.image_processor_class
@property
def UpperCamelCase_ ( self ) -> Optional[Any]:
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , lowerCAmelCase , )
return self.image_processor
| 107
|
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class _lowerCamelCase ( unittest.TestCase ):
def UpperCamelCase_ ( self ) -> str:
SCREAMING_SNAKE_CASE__: List[Any]= [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(lowerCAmelCase ) )
def UpperCamelCase_ ( self ) -> int:
SCREAMING_SNAKE_CASE__: List[str]= [
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(lowerCAmelCase ) )
def UpperCamelCase_ ( self ) -> Tuple:
SCREAMING_SNAKE_CASE__: Optional[int]= [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
'''unet/diffusion_pytorch_model.bin''',
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(lowerCAmelCase ) )
def UpperCamelCase_ ( self ) -> Dict:
SCREAMING_SNAKE_CASE__: str= [
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(lowerCAmelCase ) )
def UpperCamelCase_ ( self ) -> str:
SCREAMING_SNAKE_CASE__: List[str]= [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
# Removed: 'text_encoder/model.safetensors',
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertFalse(is_safetensors_compatible(lowerCAmelCase ) )
def UpperCamelCase_ ( self ) -> List[str]:
SCREAMING_SNAKE_CASE__: Optional[int]= [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
SCREAMING_SNAKE_CASE__: Any= '''fp16'''
self.assertTrue(is_safetensors_compatible(lowerCAmelCase , variant=lowerCAmelCase ) )
def UpperCamelCase_ ( self ) -> Any:
SCREAMING_SNAKE_CASE__: List[str]= [
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
SCREAMING_SNAKE_CASE__: Dict= '''fp16'''
self.assertTrue(is_safetensors_compatible(lowerCAmelCase , variant=lowerCAmelCase ) )
def UpperCamelCase_ ( self ) -> Dict:
# pass variant but use the non-variant filenames
SCREAMING_SNAKE_CASE__: List[str]= [
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
SCREAMING_SNAKE_CASE__: Union[str, Any]= '''fp16'''
self.assertTrue(is_safetensors_compatible(lowerCAmelCase , variant=lowerCAmelCase ) )
def UpperCamelCase_ ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE__: Dict= [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
'''unet/diffusion_pytorch_model.fp16.bin''',
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
SCREAMING_SNAKE_CASE__: List[Any]= '''fp16'''
self.assertFalse(is_safetensors_compatible(lowerCAmelCase , variant=lowerCAmelCase ) )
def UpperCamelCase_ ( self ) -> List[str]:
SCREAMING_SNAKE_CASE__: int= [
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
]
SCREAMING_SNAKE_CASE__: Any= '''fp16'''
self.assertTrue(is_safetensors_compatible(lowerCAmelCase , variant=lowerCAmelCase ) )
def UpperCamelCase_ ( self ) -> Optional[Any]:
# pass variant but use the non-variant filenames
SCREAMING_SNAKE_CASE__: Optional[int]= [
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
]
SCREAMING_SNAKE_CASE__: str= '''fp16'''
self.assertTrue(is_safetensors_compatible(lowerCAmelCase , variant=lowerCAmelCase ) )
def UpperCamelCase_ ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__: Tuple= [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
# 'text_encoder/model.fp16.safetensors',
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
SCREAMING_SNAKE_CASE__: str= '''fp16'''
self.assertFalse(is_safetensors_compatible(lowerCAmelCase , variant=lowerCAmelCase ) )
| 107
| 1
|
from __future__ import annotations
from collections.abc import Callable
def UpperCAmelCase_ ( __UpperCamelCase, __UpperCamelCase, __UpperCamelCase, __UpperCamelCase = 100, ):
SCREAMING_SNAKE_CASE__ =x_start
SCREAMING_SNAKE_CASE__ =fnc(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ =0.0
for _ in range(__UpperCamelCase ):
# Approximates small segments of curve as linear and solve
# for trapezoidal area
SCREAMING_SNAKE_CASE__ =(x_end - x_start) / steps + xa
SCREAMING_SNAKE_CASE__ =fnc(__UpperCamelCase )
area += abs(fxa + fxa ) * (xa - xa) / 2
# Increment step
SCREAMING_SNAKE_CASE__ =xa
SCREAMING_SNAKE_CASE__ =fxa
return area
if __name__ == "__main__":
def UpperCAmelCase_ ( __UpperCamelCase ):
return x**3 + x**2
print("f(x) = x^3 + x^2")
print("The area between the curve, x = -5, x = 5 and the x axis is:")
lowerCamelCase_ = 10
while i <= 100000:
print(f"""with {i} steps: {trapezoidal_area(f, -5, 5, i)}""")
i *= 10
| 151
|
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
lowerCamelCase_ = np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
lowerCamelCase_ = [0, 25, 50]
lowerCamelCase_ = [25, 50, 75]
lowerCamelCase_ = fuzz.membership.trimf(X, abca)
lowerCamelCase_ = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
lowerCamelCase_ = np.ones(75)
lowerCamelCase_ = np.zeros((75,))
# 1. Union = max(µA(x), µB(x))
lowerCamelCase_ = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
lowerCamelCase_ = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
lowerCamelCase_ = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
lowerCamelCase_ = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
lowerCamelCase_ = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
lowerCamelCase_ = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
lowerCamelCase_ = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
lowerCamelCase_ = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title("Young")
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title("Middle aged")
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title("union")
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title("intersection")
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title("complement_a")
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title("difference a/b")
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title("alg_sum")
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title("alg_product")
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title("bdd_sum")
plt.grid(True)
plt.subplot(4, 3, 10)
plt.plot(X, bdd_difference)
plt.title("bdd_difference")
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 151
| 1
|
import pprint
import requests
_lowercase : Tuple = "https://zenquotes.io/api"
def _lowerCAmelCase ( ) -> list:
"""simple docstring"""
return requests.get(API_ENDPOINT_URL + """/today""" ).json()
def _lowerCAmelCase ( ) -> list:
"""simple docstring"""
return requests.get(API_ENDPOINT_URL + """/random""" ).json()
if __name__ == "__main__":
_lowercase : Optional[Any] = random_quotes()
pprint.pprint(response)
| 546
|
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class _UpperCamelCase :
"""simple docstring"""
@property
def _UpperCAmelCase ( self ) -> Any:
return self.get_dummy_input()
@property
def _UpperCAmelCase ( self ) -> Union[str, Any]:
if self.block_type == "down":
return (4, 32, 16, 16)
elif self.block_type == "mid":
return (4, 32, 32, 32)
elif self.block_type == "up":
return (4, 32, 64, 64)
raise ValueError(f'\'{self.block_type}\' is not a supported block_type. Set it to \'up\', \'mid\', or \'down\'.' )
def _UpperCAmelCase ( self , a__=True , a__=False , a__=False , a__=False , ) -> Optional[Any]:
A = 4
A = 32
A = (32, 32)
A = torch.manual_seed(0 )
A = torch.device(a__ )
A = (batch_size, num_channels) + sizes
A = randn_tensor(a__ , generator=a__ , device=a__ )
A = {"""hidden_states""": hidden_states}
if include_temb:
A = 128
A = randn_tensor((batch_size, temb_channels) , generator=a__ , device=a__ )
if include_res_hidden_states_tuple:
A = torch.manual_seed(1 )
A = (randn_tensor(a__ , generator=a__ , device=a__ ),)
if include_encoder_hidden_states:
A = floats_tensor((batch_size, 32, 32) ).to(a__ )
if include_skip_sample:
A = randn_tensor(((batch_size, 3) + sizes) , generator=a__ , device=a__ )
return dummy_input
def _UpperCAmelCase ( self ) -> int:
A = {
"""in_channels""": 32,
"""out_channels""": 32,
"""temb_channels""": 128,
}
if self.block_type == "up":
A = 32
if self.block_type == "mid":
init_dict.pop("""out_channels""" )
A = self.dummy_input
return init_dict, inputs_dict
def _UpperCAmelCase ( self , a__ ) -> Optional[int]:
A , A = self.prepare_init_args_and_inputs_for_common()
A = self.block_class(**a__ )
unet_block.to(a__ )
unet_block.eval()
with torch.no_grad():
A = unet_block(**a__ )
if isinstance(a__ , a__ ):
A = output[0]
self.assertEqual(output.shape , self.output_shape )
A = output[0, -1, -3:, -3:]
A = torch.tensor(a__ ).to(a__ )
assert torch_all_close(output_slice.flatten() , a__ , atol=5e-3 )
@unittest.skipIf(torch_device == """mps""" , """Training is not supported in mps""" )
def _UpperCAmelCase ( self ) -> str:
A , A = self.prepare_init_args_and_inputs_for_common()
A = self.block_class(**a__ )
model.to(a__ )
model.train()
A = model(**a__ )
if isinstance(a__ , a__ ):
A = output[0]
A = torch.device(a__ )
A = randn_tensor(output.shape , device=a__ )
A = torch.nn.functional.mse_loss(a__ , a__ )
loss.backward()
| 546
| 1
|
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format="""%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s""",
datefmt="""%Y-%m-%d %H:%M:%S""",
level=os.environ.get("""LOGLEVEL""", """INFO""").upper(),
stream=sys.stdout,
)
lowerCamelCase__ : Optional[int] = logging.getLogger(__name__)
lowerCamelCase__ : int = {"""facebook/bart-base""": BartForConditionalGeneration}
lowerCamelCase__ : List[Any] = {"""facebook/bart-base""": BartTokenizer}
def UpperCamelCase ( ) -> Dict:
'''simple docstring'''
lowercase__ : Union[str, Any] = argparse.ArgumentParser(description="""Export Bart model + Beam Search to ONNX graph.""" )
parser.add_argument(
"""--validation_file""" , type=lowercase_ , default=lowercase_ , help="""A csv or a json file containing the validation data.""" )
parser.add_argument(
"""--max_length""" , type=lowercase_ , default=5 , help="""The maximum total input sequence length after tokenization.""" , )
parser.add_argument(
"""--num_beams""" , type=lowercase_ , default=lowercase_ , help=(
"""Number of beams to use for evaluation. This argument will be """
"""passed to ``model.generate``, which is used during ``evaluate`` and ``predict``."""
) , )
parser.add_argument(
"""--model_name_or_path""" , type=lowercase_ , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=lowercase_ , )
parser.add_argument(
"""--config_name""" , type=lowercase_ , default=lowercase_ , help="""Pretrained config name or path if not the same as model_name""" , )
parser.add_argument(
"""--device""" , type=lowercase_ , default="""cpu""" , help="""Device where the model will be run""" , )
parser.add_argument("""--output_file_path""" , type=lowercase_ , default=lowercase_ , help="""Where to store the final ONNX file.""" )
lowercase__ : str = parser.parse_args()
return args
def UpperCamelCase ( lowercase_ , lowercase_="cpu" ) -> Any:
'''simple docstring'''
lowercase__ : Optional[Any] = model_dict[model_name].from_pretrained(lowercase_ ).to(lowercase_ )
lowercase__ : Tuple = tokenizer_dict[model_name].from_pretrained(lowercase_ )
if model_name in ["facebook/bart-base"]:
lowercase__ : Optional[Any] = 0
lowercase__ : Optional[int] = None
lowercase__ : List[Any] = 0
return huggingface_model, tokenizer
def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Union[str, Any]:
'''simple docstring'''
model.eval()
lowercase__ : int = None
lowercase__ : Any = torch.jit.script(BARTBeamSearchGenerator(lowercase_ ) )
with torch.no_grad():
lowercase__ : Union[str, Any] = """My friends are cool but they eat too many carbs."""
lowercase__ : Dict = tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=10_24 , return_tensors="""pt""" ).to(model.device )
lowercase__ : Optional[int] = model.generate(
inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , num_beams=lowercase_ , max_length=lowercase_ , early_stopping=lowercase_ , decoder_start_token_id=model.config.decoder_start_token_id , )
torch.onnx.export(
lowercase_ , (
inputs["""input_ids"""],
inputs["""attention_mask"""],
num_beams,
max_length,
model.config.decoder_start_token_id,
) , lowercase_ , opset_version=14 , input_names=["""input_ids""", """attention_mask""", """num_beams""", """max_length""", """decoder_start_token_id"""] , output_names=["""output_ids"""] , dynamic_axes={
"""input_ids""": {0: """batch""", 1: """seq"""},
"""output_ids""": {0: """batch""", 1: """seq_out"""},
} , example_outputs=lowercase_ , )
logger.info("""Model exported to {}""".format(lowercase_ ) )
lowercase__ : str = remove_dup_initializers(os.path.abspath(lowercase_ ) )
logger.info("""Deduplicated and optimized model written to {}""".format(lowercase_ ) )
lowercase__ : Optional[Any] = onnxruntime.InferenceSession(lowercase_ )
lowercase__ : Optional[int] = ort_sess.run(
lowercase_ , {
"""input_ids""": inputs["""input_ids"""].cpu().numpy(),
"""attention_mask""": inputs["""attention_mask"""].cpu().numpy(),
"""num_beams""": np.array(lowercase_ ),
"""max_length""": np.array(lowercase_ ),
"""decoder_start_token_id""": np.array(model.config.decoder_start_token_id ),
} , )
np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1E-3 , atol=1E-3 )
logger.info("""Model outputs from torch and ONNX Runtime are similar.""" )
logger.info("""Success.""" )
def UpperCamelCase ( ) -> Optional[int]:
'''simple docstring'''
lowercase__ : Dict = parse_args()
lowercase__ : List[Any] = 5
lowercase__ : Any = 4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO , )
logger.setLevel(logging.INFO )
transformers.utils.logging.set_verbosity_error()
lowercase__ : Optional[Any] = torch.device(args.device )
lowercase__ , lowercase__ : str = load_model_tokenizer(args.model_name_or_path , lowercase_ )
if model.config.decoder_start_token_id is None:
raise ValueError("""Make sure that `config.decoder_start_token_id` is correctly defined""" )
model.to(lowercase_ )
if args.max_length:
lowercase__ : List[Any] = args.max_length
if args.num_beams:
lowercase__ : List[Any] = args.num_beams
if args.output_file_path:
lowercase__ : str = args.output_file_path
else:
lowercase__ : int = """BART.onnx"""
logger.info("""Exporting model to ONNX""" )
export_and_validate_model(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
if __name__ == "__main__":
main()
| 12
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class lowerCamelCase_ ( metaclass=__a ):
lowerCAmelCase__ = ['torch', 'torchsde']
def __init__( self : Tuple , *_A : Any , **_A : Optional[Any] ):
'''simple docstring'''
requires_backends(self , ['''torch''', '''torchsde'''] )
@classmethod
def lowercase_ ( cls : List[Any] , *_A : Tuple , **_A : Tuple ):
'''simple docstring'''
requires_backends(cls , ['''torch''', '''torchsde'''] )
@classmethod
def lowercase_ ( cls : List[str] , *_A : Optional[int] , **_A : Any ):
'''simple docstring'''
requires_backends(cls , ['''torch''', '''torchsde'''] )
| 75
| 0
|
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class A__ :
def __init__( self : List[Any] , _a : Union[str, Any] , _a : List[str]=3 , _a : Any=32 , _a : List[str]=3 , _a : List[str]=10 , _a : int=[8, 16, 32, 64] , _a : Union[str, Any]=[1, 1, 2, 1] , _a : Optional[int]=True , _a : List[str]=True , _a : Tuple="relu" , _a : int=3 , _a : int=None , _a : Dict=["stage2", "stage3", "stage4"] , _a : str=[2, 3, 4] , _a : Dict=1 , ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =parent
_SCREAMING_SNAKE_CASE =batch_size
_SCREAMING_SNAKE_CASE =image_size
_SCREAMING_SNAKE_CASE =num_channels
_SCREAMING_SNAKE_CASE =embeddings_size
_SCREAMING_SNAKE_CASE =hidden_sizes
_SCREAMING_SNAKE_CASE =depths
_SCREAMING_SNAKE_CASE =is_training
_SCREAMING_SNAKE_CASE =use_labels
_SCREAMING_SNAKE_CASE =hidden_act
_SCREAMING_SNAKE_CASE =num_labels
_SCREAMING_SNAKE_CASE =scope
_SCREAMING_SNAKE_CASE =len(_a )
_SCREAMING_SNAKE_CASE =out_features
_SCREAMING_SNAKE_CASE =out_indices
_SCREAMING_SNAKE_CASE =num_groups
def __UpperCamelCase ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_SCREAMING_SNAKE_CASE =None
if self.use_labels:
_SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size] , self.num_labels )
_SCREAMING_SNAKE_CASE =self.get_config()
return config, pixel_values, labels
def __UpperCamelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def __UpperCamelCase ( self : Dict , _a : Dict , _a : List[Any] , _a : Dict ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =BitModel(config=_a )
model.to(_a )
model.eval()
_SCREAMING_SNAKE_CASE =model(_a )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def __UpperCamelCase ( self : int , _a : int , _a : Dict , _a : Optional[int] ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.num_labels
_SCREAMING_SNAKE_CASE =BitForImageClassification(_a )
model.to(_a )
model.eval()
_SCREAMING_SNAKE_CASE =model(_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCamelCase ( self : Tuple , _a : Optional[Any] , _a : List[Any] , _a : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =BitBackbone(config=_a )
model.to(_a )
model.eval()
_SCREAMING_SNAKE_CASE =model(_a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =BitBackbone(config=_a )
model.to(_a )
model.eval()
_SCREAMING_SNAKE_CASE =model(_a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def __UpperCamelCase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.prepare_config_and_inputs()
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =config_and_inputs
_SCREAMING_SNAKE_CASE ={'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class A__ ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
UpperCAmelCase = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
UpperCAmelCase = (
{"feature-extraction": BitModel, "image-classification": BitForImageClassification}
if is_torch_available()
else {}
)
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
def __UpperCamelCase ( self : Dict ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =BitModelTester(self )
_SCREAMING_SNAKE_CASE =ConfigTester(self , config_class=_a , has_text_modality=_a )
def __UpperCamelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __UpperCamelCase ( self : List[str] ) -> Dict:
"""simple docstring"""
return
@unittest.skip(reason='''Bit does not output attentions''' )
def __UpperCamelCase ( self : Any ) -> Optional[int]:
"""simple docstring"""
pass
@unittest.skip(reason='''Bit does not use inputs_embeds''' )
def __UpperCamelCase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
pass
@unittest.skip(reason='''Bit does not support input and output embeddings''' )
def __UpperCamelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
pass
def __UpperCamelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE =model_class(_a )
_SCREAMING_SNAKE_CASE =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_SCREAMING_SNAKE_CASE =[*signature.parameters.keys()]
_SCREAMING_SNAKE_CASE =['''pixel_values''']
self.assertListEqual(arg_names[:1] , _a )
def __UpperCamelCase ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def __UpperCamelCase ( self : Optional[Any] ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_a )
def __UpperCamelCase ( self : int ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE =model_class(config=_a )
for name, module in model.named_modules():
if isinstance(_a , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , )
def __UpperCamelCase ( self : List[Any] ) -> str:
"""simple docstring"""
def check_hidden_states_output(_a : Optional[Any] , _a : Optional[Any] , _a : Tuple ):
_SCREAMING_SNAKE_CASE =model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
_SCREAMING_SNAKE_CASE =model(**self._prepare_for_class(_a , _a ) )
_SCREAMING_SNAKE_CASE =outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_SCREAMING_SNAKE_CASE =self.model_tester.num_stages
self.assertEqual(len(_a ) , expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
_SCREAMING_SNAKE_CASE =['''preactivation''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
_SCREAMING_SNAKE_CASE =layer_type
_SCREAMING_SNAKE_CASE =True
check_hidden_states_output(_a , _a , _a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_SCREAMING_SNAKE_CASE =True
check_hidden_states_output(_a , _a , _a )
@unittest.skip(reason='''Bit does not use feedforward chunking''' )
def __UpperCamelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
pass
def __UpperCamelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
@slow
def __UpperCamelCase ( self : List[str] ) -> List[str]:
"""simple docstring"""
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_SCREAMING_SNAKE_CASE =BitModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def lowerCamelCase( ):
_SCREAMING_SNAKE_CASE =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''')
return image
@require_torch
@require_vision
class A__ ( unittest.TestCase ):
@cached_property
def __UpperCamelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def __UpperCamelCase ( self : Any ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(_a )
_SCREAMING_SNAKE_CASE =self.default_image_processor
_SCREAMING_SNAKE_CASE =prepare_img()
_SCREAMING_SNAKE_CASE =image_processor(images=_a , return_tensors='''pt''' ).to(_a )
# forward pass
with torch.no_grad():
_SCREAMING_SNAKE_CASE =model(**_a )
# verify the logits
_SCREAMING_SNAKE_CASE =torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _a )
_SCREAMING_SNAKE_CASE =torch.tensor([[-0.65_26, -0.52_63, -1.43_98]] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _a , atol=1E-4 ) )
@require_torch
class A__ ( UpperCamelCase__ , unittest.TestCase ):
UpperCAmelCase = (BitBackbone,) if is_torch_available() else ()
UpperCAmelCase = BitConfig
UpperCAmelCase = False
def __UpperCamelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =BitModelTester(self )
| 720
|
import importlib
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Union
import torch
from ..utils import BaseOutput
snake_case_ : Optional[Any] = '''scheduler_config.json'''
class A__ ( UpperCamelCase__ ):
UpperCAmelCase = 1
UpperCAmelCase = 2
UpperCAmelCase = 3
UpperCAmelCase = 4
UpperCAmelCase = 5
UpperCAmelCase = 6
UpperCAmelCase = 7
UpperCAmelCase = 8
UpperCAmelCase = 9
UpperCAmelCase = 10
UpperCAmelCase = 11
UpperCAmelCase = 12
UpperCAmelCase = 13
UpperCAmelCase = 14
@dataclass
class A__ ( UpperCamelCase__ ):
UpperCAmelCase = 42
class A__ :
UpperCAmelCase = SCHEDULER_CONFIG_NAME
UpperCAmelCase = []
UpperCAmelCase = True
@classmethod
def __UpperCamelCase ( cls : List[str] , _a : Dict[str, Any] = None , _a : Optional[str] = None , _a : Optional[Any]=False , **_a : Dict , ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =cls.load_config(
pretrained_model_name_or_path=_a , subfolder=_a , return_unused_kwargs=_a , return_commit_hash=_a , **_a , )
return cls.from_config(_a , return_unused_kwargs=_a , **_a )
def __UpperCamelCase ( self : Dict , _a : Union[str, os.PathLike] , _a : bool = False , **_a : int ) -> Dict:
"""simple docstring"""
self.save_config(save_directory=_a , push_to_hub=_a , **_a )
@property
def __UpperCamelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
return self._get_compatibles()
@classmethod
def __UpperCamelCase ( cls : List[Any] ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =list(set([cls.__name__] + cls._compatibles ) )
_SCREAMING_SNAKE_CASE =importlib.import_module(__name__.split('''.''' )[0] )
_SCREAMING_SNAKE_CASE =[
getattr(_a , _a ) for c in compatible_classes_str if hasattr(_a , _a )
]
return compatible_classes
| 191
| 0
|
'''simple docstring'''
import enum
import shutil
import sys
snake_case, snake_case = shutil.get_terminal_size()
snake_case = {'''UP''': '''A''', '''DOWN''': '''B''', '''RIGHT''': '''C''', '''LEFT''': '''D'''}
class SCREAMING_SNAKE_CASE ( enum.Enum ):
"""simple docstring"""
__A = 0
__A = 1
def A_ ( _lowerCamelCase : List[Any] , _lowerCamelCase : Optional[int]="" ):
sys.stdout.write(str(_lowerCamelCase ) + end )
sys.stdout.flush()
def A_ ( _lowerCamelCase : int , _lowerCamelCase : List[Any] , _lowerCamelCase : List[Any]="" ):
forceWrite(F"\u001b[{color}m{content}\u001b[0m" , _lowerCamelCase )
def A_ ( ):
forceWrite('\r' )
def A_ ( _lowerCamelCase : int , _lowerCamelCase : str ):
forceWrite(F"\033[{num_lines}{CURSOR_TO_CHAR[direction.upper()]}" )
def A_ ( ):
forceWrite(' ' * TERMINAL_WIDTH )
reset_cursor()
def A_ ( ):
reset_cursor()
forceWrite('-' * TERMINAL_WIDTH )
| 309
|
'''simple docstring'''
import multiprocessing
from typing import TYPE_CHECKING, Optional, Union
from .. import Dataset, Features, config
from ..formatting import query_table
from ..packaged_modules.sql.sql import Sql
from ..utils import logging
from .abc import AbstractDatasetInputStream
if TYPE_CHECKING:
import sqlitea
import sqlalchemy
class SCREAMING_SNAKE_CASE ( __a ):
"""simple docstring"""
def __init__( self : List[Any] , __lowerCAmelCase : Union[str, "sqlalchemy.sql.Selectable"] , __lowerCAmelCase : Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] , __lowerCAmelCase : Optional[Features] = None , __lowerCAmelCase : str = None , __lowerCAmelCase : bool = False , **__lowerCAmelCase : Optional[int] , ):
"""simple docstring"""
super().__init__(features=__lowerCAmelCase , cache_dir=__lowerCAmelCase , keep_in_memory=__lowerCAmelCase , **__lowerCAmelCase )
_lowerCAmelCase = Sql(
cache_dir=__lowerCAmelCase , features=__lowerCAmelCase , sql=__lowerCAmelCase , con=__lowerCAmelCase , **__lowerCAmelCase , )
def a ( self : str ):
"""simple docstring"""
_lowerCAmelCase = None
_lowerCAmelCase = None
_lowerCAmelCase = None
_lowerCAmelCase = None
self.builder.download_and_prepare(
download_config=__lowerCAmelCase , download_mode=__lowerCAmelCase , verification_mode=__lowerCAmelCase , base_path=__lowerCAmelCase , )
# Build dataset for splits
_lowerCAmelCase = self.builder.as_dataset(
split='train' , verification_mode=__lowerCAmelCase , in_memory=self.keep_in_memory )
return dataset
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : Dict , __lowerCAmelCase : Dataset , __lowerCAmelCase : str , __lowerCAmelCase : Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] , __lowerCAmelCase : Optional[int] = None , __lowerCAmelCase : Optional[int] = None , **__lowerCAmelCase : Tuple , ):
"""simple docstring"""
if num_proc is not None and num_proc <= 0:
raise ValueError(F"num_proc {num_proc} must be an integer > 0." )
_lowerCAmelCase = dataset
_lowerCAmelCase = name
_lowerCAmelCase = con
_lowerCAmelCase = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
_lowerCAmelCase = num_proc
_lowerCAmelCase = to_sql_kwargs
def a ( self : Optional[int] ):
"""simple docstring"""
_lowerCAmelCase = self.to_sql_kwargs.pop('sql' , __lowerCAmelCase )
_lowerCAmelCase = self.to_sql_kwargs.pop('con' , __lowerCAmelCase )
_lowerCAmelCase = self.to_sql_kwargs.pop('index' , __lowerCAmelCase )
_lowerCAmelCase = self._write(index=__lowerCAmelCase , **self.to_sql_kwargs )
return written
def a ( self : Any , __lowerCAmelCase : int ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = args
_lowerCAmelCase = {**to_sql_kwargs, 'if_exists': 'append'} if offset > 0 else to_sql_kwargs
_lowerCAmelCase = query_table(
table=self.dataset.data , key=slice(__lowerCAmelCase , offset + self.batch_size ) , indices=self.dataset._indices , )
_lowerCAmelCase = batch.to_pandas()
_lowerCAmelCase = df.to_sql(self.name , self.con , index=__lowerCAmelCase , **__lowerCAmelCase )
return num_rows or len(__lowerCAmelCase )
def a ( self : Dict , __lowerCAmelCase : List[Any] , **__lowerCAmelCase : Dict ):
"""simple docstring"""
_lowerCAmelCase = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit='ba' , disable=not logging.is_progress_bar_enabled() , desc='Creating SQL from Arrow format' , ):
written += self._batch_sql((offset, index, to_sql_kwargs) )
else:
_lowerCAmelCase , _lowerCAmelCase = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for num_rows in logging.tqdm(
pool.imap(
self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , __lowerCAmelCase , __lowerCAmelCase )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='ba' , disable=not logging.is_progress_bar_enabled() , desc='Creating SQL from Arrow format' , ):
written += num_rows
return written
| 309
| 1
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__a : Any = logging.get_logger(__name__)
def UpperCAmelCase ( lowercase ):
"""simple docstring"""
__lowercase = DPTConfig(embedding_type='''hybrid''' )
if "large" in checkpoint_url:
__lowercase = 1024
__lowercase = 4096
__lowercase = 24
__lowercase = 16
__lowercase = [5, 11, 17, 23]
__lowercase = [256, 512, 1024, 1024]
__lowercase = (1, 384, 384)
if "nyu" or "midas" in checkpoint_url:
__lowercase = 768
__lowercase = [1, 1, 1, 0.5]
__lowercase = [256, 512, 768, 768]
__lowercase = 150
__lowercase = 16
__lowercase = (1, 384, 384)
__lowercase = False
__lowercase = '''project'''
if "ade" in checkpoint_url:
__lowercase = True
__lowercase = 768
__lowercase = [1, 1, 1, 0.5]
__lowercase = 150
__lowercase = 16
__lowercase = '''huggingface/label-files'''
__lowercase = '''ade20k-id2label.json'''
__lowercase = json.load(open(cached_download(hf_hub_url(lowercase , lowercase , repo_type='''dataset''' ) ) , '''r''' ) )
__lowercase = {int(lowercase ): v for k, v in idalabel.items()}
__lowercase = idalabel
__lowercase = {v: k for k, v in idalabel.items()}
__lowercase = [1, 150, 480, 480]
return config, expected_shape
def UpperCAmelCase ( lowercase ):
"""simple docstring"""
__lowercase = ['''pretrained.model.head.weight''', '''pretrained.model.head.bias''']
for k in ignore_keys:
state_dict.pop(lowercase , lowercase )
def UpperCAmelCase ( lowercase ):
"""simple docstring"""
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
__lowercase = name.replace('''pretrained.model''' , '''dpt.encoder''' )
if "pretrained.model" in name:
__lowercase = name.replace('''pretrained.model''' , '''dpt.embeddings''' )
if "patch_embed" in name:
__lowercase = name.replace('''patch_embed''' , '''''' )
if "pos_embed" in name:
__lowercase = name.replace('''pos_embed''' , '''position_embeddings''' )
if "attn.proj" in name:
__lowercase = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "proj" in name and "project" not in name:
__lowercase = name.replace('''proj''' , '''projection''' )
if "blocks" in name:
__lowercase = name.replace('''blocks''' , '''layer''' )
if "mlp.fc1" in name:
__lowercase = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
__lowercase = name.replace('''mlp.fc2''' , '''output.dense''' )
if "norm1" in name and "backbone" not in name:
__lowercase = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name and "backbone" not in name:
__lowercase = name.replace('''norm2''' , '''layernorm_after''' )
if "scratch.output_conv" in name:
__lowercase = name.replace('''scratch.output_conv''' , '''head''' )
if "scratch" in name:
__lowercase = name.replace('''scratch''' , '''neck''' )
if "layer1_rn" in name:
__lowercase = name.replace('''layer1_rn''' , '''convs.0''' )
if "layer2_rn" in name:
__lowercase = name.replace('''layer2_rn''' , '''convs.1''' )
if "layer3_rn" in name:
__lowercase = name.replace('''layer3_rn''' , '''convs.2''' )
if "layer4_rn" in name:
__lowercase = name.replace('''layer4_rn''' , '''convs.3''' )
if "refinenet" in name:
__lowercase = int(name[len('''neck.refinenet''' ) : len('''neck.refinenet''' ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
__lowercase = name.replace(F"refinenet{layer_idx}" , F"fusion_stage.layers.{abs(layer_idx-4 )}" )
if "out_conv" in name:
__lowercase = name.replace('''out_conv''' , '''projection''' )
if "resConfUnit1" in name:
__lowercase = name.replace('''resConfUnit1''' , '''residual_layer1''' )
if "resConfUnit2" in name:
__lowercase = name.replace('''resConfUnit2''' , '''residual_layer2''' )
if "conv1" in name:
__lowercase = name.replace('''conv1''' , '''convolution1''' )
if "conv2" in name:
__lowercase = name.replace('''conv2''' , '''convolution2''' )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
__lowercase = name.replace('''pretrained.act_postprocess1.0.project.0''' , '''neck.reassemble_stage.readout_projects.0.0''' )
if "pretrained.act_postprocess2.0.project.0" in name:
__lowercase = name.replace('''pretrained.act_postprocess2.0.project.0''' , '''neck.reassemble_stage.readout_projects.1.0''' )
if "pretrained.act_postprocess3.0.project.0" in name:
__lowercase = name.replace('''pretrained.act_postprocess3.0.project.0''' , '''neck.reassemble_stage.readout_projects.2.0''' )
if "pretrained.act_postprocess4.0.project.0" in name:
__lowercase = name.replace('''pretrained.act_postprocess4.0.project.0''' , '''neck.reassemble_stage.readout_projects.3.0''' )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
__lowercase = name.replace('''pretrained.act_postprocess1.3''' , '''neck.reassemble_stage.layers.0.projection''' )
if "pretrained.act_postprocess1.4" in name:
__lowercase = name.replace('''pretrained.act_postprocess1.4''' , '''neck.reassemble_stage.layers.0.resize''' )
if "pretrained.act_postprocess2.3" in name:
__lowercase = name.replace('''pretrained.act_postprocess2.3''' , '''neck.reassemble_stage.layers.1.projection''' )
if "pretrained.act_postprocess2.4" in name:
__lowercase = name.replace('''pretrained.act_postprocess2.4''' , '''neck.reassemble_stage.layers.1.resize''' )
if "pretrained.act_postprocess3.3" in name:
__lowercase = name.replace('''pretrained.act_postprocess3.3''' , '''neck.reassemble_stage.layers.2.projection''' )
if "pretrained.act_postprocess4.3" in name:
__lowercase = name.replace('''pretrained.act_postprocess4.3''' , '''neck.reassemble_stage.layers.3.projection''' )
if "pretrained.act_postprocess4.4" in name:
__lowercase = name.replace('''pretrained.act_postprocess4.4''' , '''neck.reassemble_stage.layers.3.resize''' )
if "pretrained" in name:
__lowercase = name.replace('''pretrained''' , '''dpt''' )
if "bn" in name:
__lowercase = name.replace('''bn''' , '''batch_norm''' )
if "head" in name:
__lowercase = name.replace('''head''' , '''head.head''' )
if "encoder.norm" in name:
__lowercase = name.replace('''encoder.norm''' , '''layernorm''' )
if "auxlayer" in name:
__lowercase = name.replace('''auxlayer''' , '''auxiliary_head.head''' )
if "backbone" in name:
__lowercase = name.replace('''backbone''' , '''backbone.bit.encoder''' )
if ".." in name:
__lowercase = name.replace('''..''' , '''.''' )
if "stem.conv" in name:
__lowercase = name.replace('''stem.conv''' , '''bit.embedder.convolution''' )
if "blocks" in name:
__lowercase = name.replace('''blocks''' , '''layers''' )
if "convolution" in name and "backbone" in name:
__lowercase = name.replace('''convolution''' , '''conv''' )
if "layer" in name and "backbone" in name:
__lowercase = name.replace('''layer''' , '''layers''' )
if "backbone.bit.encoder.bit" in name:
__lowercase = name.replace('''backbone.bit.encoder.bit''' , '''backbone.bit''' )
if "embedder.conv" in name:
__lowercase = name.replace('''embedder.conv''' , '''embedder.convolution''' )
if "backbone.bit.encoder.stem.norm" in name:
__lowercase = name.replace('''backbone.bit.encoder.stem.norm''' , '''backbone.bit.embedder.norm''' )
return name
def UpperCAmelCase ( lowercase , lowercase ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__lowercase = state_dict.pop(F"dpt.encoder.layer.{i}.attn.qkv.weight" )
__lowercase = state_dict.pop(F"dpt.encoder.layer.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
__lowercase = in_proj_weight[: config.hidden_size, :]
__lowercase = in_proj_bias[: config.hidden_size]
__lowercase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__lowercase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__lowercase = in_proj_weight[
-config.hidden_size :, :
]
__lowercase = in_proj_bias[-config.hidden_size :]
def UpperCAmelCase ( ):
"""simple docstring"""
__lowercase = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__lowercase = Image.open(requests.get(lowercase , stream=lowercase ).raw )
return im
@torch.no_grad()
def UpperCAmelCase ( lowercase , lowercase , lowercase , lowercase , lowercase ):
"""simple docstring"""
__lowercase , __lowercase = get_dpt_config(lowercase )
# load original state_dict from URL
# state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")
__lowercase = torch.load(lowercase , map_location='''cpu''' )
# remove certain keys
remove_ignore_keys_(lowercase )
# rename keys
for key in state_dict.copy().keys():
__lowercase = state_dict.pop(lowercase )
__lowercase = val
# read in qkv matrices
read_in_q_k_v(lowercase , lowercase )
# load HuggingFace model
__lowercase = DPTForSemanticSegmentation(lowercase ) if '''ade''' in checkpoint_url else DPTForDepthEstimation(lowercase )
model.load_state_dict(lowercase )
model.eval()
# Check outputs on an image
__lowercase = 480 if '''ade''' in checkpoint_url else 384
__lowercase = DPTImageProcessor(size=lowercase )
__lowercase = prepare_img()
__lowercase = image_processor(lowercase , return_tensors='''pt''' )
# forward pass
__lowercase = model(**lowercase ).logits if '''ade''' in checkpoint_url else model(**lowercase ).predicted_depth
if show_prediction:
__lowercase = (
torch.nn.functional.interpolate(
outputs.unsqueeze(1 ) , size=(image.size[1], image.size[0]) , mode='''bicubic''' , align_corners=lowercase , )
.squeeze()
.cpu()
.numpy()
)
Image.fromarray((prediction / prediction.max()) * 255 ).show()
if pytorch_dump_folder_path is not None:
Path(lowercase ).mkdir(exist_ok=lowercase )
print(F"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(lowercase )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(lowercase )
if push_to_hub:
model.push_to_hub('''ybelkada/dpt-hybrid-midas''' )
image_processor.push_to_hub('''ybelkada/dpt-hybrid-midas''' )
if __name__ == "__main__":
__a : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt""",
type=str,
help="""URL of the original DPT checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
required=False,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
)
parser.add_argument(
"""--model_name""",
default="""dpt-large""",
type=str,
help="""Name of the model, in case you're pushing to the hub.""",
)
parser.add_argument(
"""--show_prediction""",
action="""store_true""",
)
__a : Tuple = parser.parse_args()
convert_dpt_checkpoint(
args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction
)
| 522
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__a : Any = {"""configuration_xlnet""": ["""XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XLNetConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a : List[str] = ["""XLNetTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a : Dict = ["""XLNetTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a : Tuple = [
"""XLNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XLNetForMultipleChoice""",
"""XLNetForQuestionAnswering""",
"""XLNetForQuestionAnsweringSimple""",
"""XLNetForSequenceClassification""",
"""XLNetForTokenClassification""",
"""XLNetLMHeadModel""",
"""XLNetModel""",
"""XLNetPreTrainedModel""",
"""load_tf_weights_in_xlnet""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a : List[str] = [
"""TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFXLNetForMultipleChoice""",
"""TFXLNetForQuestionAnsweringSimple""",
"""TFXLNetForSequenceClassification""",
"""TFXLNetForTokenClassification""",
"""TFXLNetLMHeadModel""",
"""TFXLNetMainLayer""",
"""TFXLNetModel""",
"""TFXLNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
__a : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 522
| 1
|
import argparse
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
CLIPTokenizer,
CLIPTokenizerFast,
VideoMAEImageProcessor,
XCLIPConfig,
XCLIPModel,
XCLIPProcessor,
XCLIPTextConfig,
XCLIPVisionConfig,
)
def _lowercase ( SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Optional[int] ):
"""simple docstring"""
UpperCamelCase = XCLIPTextConfig()
# derive patch size from model name
UpperCamelCase = model_name.find("""patch""" )
UpperCamelCase = int(model_name[start_idx + len("""patch""" ) : start_idx + len("""patch""" ) + 2] )
UpperCamelCase = XCLIPVisionConfig(patch_size=SCREAMING_SNAKE_CASE__ , num_frames=SCREAMING_SNAKE_CASE__ )
if "large" in model_name:
UpperCamelCase = 768
UpperCamelCase = 3_072
UpperCamelCase = 12
UpperCamelCase = 1_024
UpperCamelCase = 4_096
UpperCamelCase = 16
UpperCamelCase = 24
UpperCamelCase = 768
UpperCamelCase = 3_072
if model_name == "xclip-large-patch14-16-frames":
UpperCamelCase = 336
UpperCamelCase = XCLIPConfig.from_text_vision_configs(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if "large" in model_name:
UpperCamelCase = 768
return config
def _lowercase ( SCREAMING_SNAKE_CASE_ : Any ):
"""simple docstring"""
if name == "token_embedding.weight":
UpperCamelCase = name.replace("""token_embedding.weight""" , """text_model.embeddings.token_embedding.weight""" )
if name == "positional_embedding":
UpperCamelCase = name.replace("""positional_embedding""" , """text_model.embeddings.position_embedding.weight""" )
if "ln_1" in name:
UpperCamelCase = name.replace("""ln_1""" , """layer_norm1""" )
if "ln_2" in name:
UpperCamelCase = name.replace("""ln_2""" , """layer_norm2""" )
if "c_fc" in name:
UpperCamelCase = name.replace("""c_fc""" , """fc1""" )
if "c_proj" in name:
UpperCamelCase = name.replace("""c_proj""" , """fc2""" )
if name.startswith("""transformer.resblocks""" ):
UpperCamelCase = name.replace("""transformer.resblocks""" , """text_model.encoder.layers""" )
if "attn.out_proj" in name and "message" not in name:
UpperCamelCase = name.replace("""attn.out_proj""" , """self_attn.out_proj""" )
if "ln_final" in name:
UpperCamelCase = name.replace("""ln_final""" , """text_model.final_layer_norm""" )
# visual encoder
if name == "visual.class_embedding":
UpperCamelCase = name.replace("""visual.class_embedding""" , """vision_model.embeddings.class_embedding""" )
if name == "visual.positional_embedding":
UpperCamelCase = name.replace("""visual.positional_embedding""" , """vision_model.embeddings.position_embedding.weight""" )
if name.startswith("""visual.transformer.resblocks""" ):
UpperCamelCase = name.replace("""visual.transformer.resblocks""" , """vision_model.encoder.layers""" )
if "visual.conv1" in name:
UpperCamelCase = name.replace("""visual.conv1""" , """vision_model.embeddings.patch_embedding""" )
if "visual.ln_pre" in name:
UpperCamelCase = name.replace("""visual.ln_pre""" , """vision_model.pre_layernorm""" )
if "visual.ln_post" in name:
UpperCamelCase = name.replace("""visual.ln_post""" , """vision_model.post_layernorm""" )
if "visual.proj" in name:
UpperCamelCase = name.replace("""visual.proj""" , """visual_projection.weight""" )
if "text_projection" in name:
UpperCamelCase = name.replace("""text_projection""" , """text_projection.weight""" )
# things on top
if "prompts_visual_proj" in name:
UpperCamelCase = name.replace("""prompts_visual_proj""" , """prompts_visual_projection""" )
if "prompts_visual_ln" in name:
UpperCamelCase = name.replace("""prompts_visual_ln""" , """prompts_visual_layernorm""" )
# mit
if name == "mit.positional_embedding":
UpperCamelCase = name.replace("""positional""" , """position""" )
if name.startswith("""mit.resblocks""" ):
UpperCamelCase = name.replace("""mit.resblocks""" , """mit.encoder.layers""" )
# prompts generator
if name.startswith("""prompts_generator.norm""" ):
UpperCamelCase = name.replace("""prompts_generator.norm""" , """prompts_generator.layernorm""" )
return name
def _lowercase ( SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
UpperCamelCase = orig_state_dict.pop(SCREAMING_SNAKE_CASE__ )
if "attn.in_proj" in key:
UpperCamelCase = key.split(""".""" )
if key.startswith("""visual""" ):
UpperCamelCase = key_split[3]
UpperCamelCase = config.vision_config.hidden_size
if "message_attn" in key:
if "weight" in key:
UpperCamelCase = val[
:dim, :
]
UpperCamelCase = val[
dim : dim * 2, :
]
UpperCamelCase = val[
-dim:, :
]
else:
UpperCamelCase = val[
:dim
]
UpperCamelCase = val[
dim : dim * 2
]
UpperCamelCase = val[
-dim:
]
else:
if "weight" in key:
UpperCamelCase = val[
:dim, :
]
UpperCamelCase = val[
dim : dim * 2, :
]
UpperCamelCase = val[
-dim:, :
]
else:
UpperCamelCase = val[:dim]
UpperCamelCase = val[
dim : dim * 2
]
UpperCamelCase = val[-dim:]
elif key.startswith("""mit""" ):
UpperCamelCase = key_split[2]
UpperCamelCase = config.vision_config.mit_hidden_size
if "weight" in key:
UpperCamelCase = val[:dim, :]
UpperCamelCase = val[dim : dim * 2, :]
UpperCamelCase = val[-dim:, :]
else:
UpperCamelCase = val[:dim]
UpperCamelCase = val[dim : dim * 2]
UpperCamelCase = val[-dim:]
else:
UpperCamelCase = key_split[2]
UpperCamelCase = config.text_config.hidden_size
if "weight" in key:
UpperCamelCase = val[:dim, :]
UpperCamelCase = val[
dim : dim * 2, :
]
UpperCamelCase = val[-dim:, :]
else:
UpperCamelCase = val[:dim]
UpperCamelCase = val[
dim : dim * 2
]
UpperCamelCase = val[-dim:]
else:
UpperCamelCase = rename_key(SCREAMING_SNAKE_CASE__ )
if new_key_name in ["visual_projection.weight", "text_projection.weight"]:
UpperCamelCase = val.T
UpperCamelCase = val
return orig_state_dict
def _lowercase ( SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
if num_frames == 8:
UpperCamelCase = "eating_spaghetti_8_frames.npy"
elif num_frames == 16:
UpperCamelCase = "eating_spaghetti.npy"
elif num_frames == 32:
UpperCamelCase = "eating_spaghetti_32_frames.npy"
UpperCamelCase = hf_hub_download(
repo_id="""hf-internal-testing/spaghetti-video""" , filename=SCREAMING_SNAKE_CASE__ , repo_type="""dataset""" , )
UpperCamelCase = np.load(SCREAMING_SNAKE_CASE__ )
return list(SCREAMING_SNAKE_CASE__ )
def _lowercase ( SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any]=None , SCREAMING_SNAKE_CASE_ : int=False ):
"""simple docstring"""
UpperCamelCase = {
# fully supervised kinetics-400 checkpoints
"xclip-base-patch32": "https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth",
"xclip-base-patch32-16-frames": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth"
),
"xclip-base-patch16": "https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth",
"xclip-base-patch16-16-frames": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth"
),
"xclip-large-patch14": "https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&export=download&confirm=t&uuid=b26caedc-88e2-473e-830a-9d158b653cdb",
"xclip-large-patch14-16-frames": "https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&export=download&confirm=t&uuid=538fa810-e671-4050-b385-9a623f89804f",
# fully supervised kinetics-600 checkpoints
"xclip-base-patch16-kinetics-600": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth"
),
"xclip-base-patch16-kinetics-600-16-frames": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth"
),
"xclip-large-patch14-kinetics-600": "https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&export=download&confirm=t&uuid=141d4977-4a65-44ae-864f-4b0c19f838be",
# few shot
"xclip-base-patch16-hmdb-2-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth"
),
"xclip-base-patch16-hmdb-4-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth"
),
"xclip-base-patch16-hmdb-8-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth"
),
"xclip-base-patch16-hmdb-16-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth"
),
"xclip-base-patch16-ucf-2-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth"
),
"xclip-base-patch16-ucf-4-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth"
),
"xclip-base-patch16-ucf-8-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth"
),
"xclip-base-patch16-ucf-16-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth"
),
# zero shot
"xclip-base-patch16-zero-shot": "https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth",
}
UpperCamelCase = model_to_url[model_name]
UpperCamelCase = 8
if "16-frames" in model_name:
UpperCamelCase = 16
elif "shot" in model_name:
UpperCamelCase = 32
UpperCamelCase = get_xclip_config(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
UpperCamelCase = XCLIPModel(SCREAMING_SNAKE_CASE__ )
model.eval()
if "drive" in checkpoint_url:
UpperCamelCase = "pytorch_model.bin"
gdown.cached_download(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , quiet=SCREAMING_SNAKE_CASE__ )
UpperCamelCase = torch.load(SCREAMING_SNAKE_CASE__ , map_location="""cpu""" )["model"]
else:
UpperCamelCase = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE__ )["model"]
UpperCamelCase = convert_state_dict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
UpperCamelCase = XCLIPModel(SCREAMING_SNAKE_CASE__ )
UpperCamelCase = model.load_state_dict(SCREAMING_SNAKE_CASE__ , strict=SCREAMING_SNAKE_CASE__ )
assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"]
model.eval()
UpperCamelCase = 336 if model_name == "xclip-large-patch14-16-frames" else 224
UpperCamelCase = VideoMAEImageProcessor(size=SCREAMING_SNAKE_CASE__ )
UpperCamelCase = CLIPTokenizer.from_pretrained("""openai/clip-vit-base-patch32""" )
UpperCamelCase = CLIPTokenizerFast.from_pretrained("""openai/clip-vit-base-patch32""" )
UpperCamelCase = XCLIPProcessor(image_processor=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ )
UpperCamelCase = prepare_video(SCREAMING_SNAKE_CASE__ )
UpperCamelCase = processor(
text=["""playing sports""", """eating spaghetti""", """go shopping"""] , videos=SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" , padding=SCREAMING_SNAKE_CASE__ )
print("""Shape of pixel values:""" , inputs.pixel_values.shape )
with torch.no_grad():
UpperCamelCase = model(**SCREAMING_SNAKE_CASE__ )
# Verify outputs
UpperCamelCase = outputs.logits_per_video
UpperCamelCase = logits_per_video.softmax(dim=1 )
print("""Probs:""" , SCREAMING_SNAKE_CASE__ )
# kinetics-400
if model_name == "xclip-base-patch32":
UpperCamelCase = torch.tensor([[0.0019, 0.9951, 0.0030]] )
elif model_name == "xclip-base-patch32-16-frames":
UpperCamelCase = torch.tensor([[7.0999e-04, 9.9883e-01, 4.5580e-04]] )
elif model_name == "xclip-base-patch16":
UpperCamelCase = torch.tensor([[0.0083, 0.9681, 0.0236]] )
elif model_name == "xclip-base-patch16-16-frames":
UpperCamelCase = torch.tensor([[7.6937e-04, 9.9728e-01, 1.9473e-03]] )
elif model_name == "xclip-large-patch14":
UpperCamelCase = torch.tensor([[0.0062, 0.9864, 0.0075]] )
elif model_name == "xclip-large-patch14-16-frames":
UpperCamelCase = torch.tensor([[3.3877e-04, 9.9937e-01, 2.8888e-04]] )
# kinetics-600
elif model_name == "xclip-base-patch16-kinetics-600":
UpperCamelCase = torch.tensor([[0.0555, 0.8914, 0.0531]] )
elif model_name == "xclip-base-patch16-kinetics-600-16-frames":
UpperCamelCase = torch.tensor([[3.8554e-04, 9.9929e-01, 3.2754e-04]] )
elif model_name == "xclip-large-patch14-kinetics-600":
UpperCamelCase = torch.tensor([[0.0036, 0.9920, 0.0045]] )
# few shot
elif model_name == "xclip-base-patch16-hmdb-2-shot":
UpperCamelCase = torch.tensor([[7.1890e-06, 9.9994e-01, 5.6559e-05]] )
elif model_name == "xclip-base-patch16-hmdb-4-shot":
UpperCamelCase = torch.tensor([[1.0320e-05, 9.9993e-01, 6.2435e-05]] )
elif model_name == "xclip-base-patch16-hmdb-8-shot":
UpperCamelCase = torch.tensor([[4.1377e-06, 9.9990e-01, 9.8386e-05]] )
elif model_name == "xclip-base-patch16-hmdb-16-shot":
UpperCamelCase = torch.tensor([[4.1347e-05, 9.9962e-01, 3.3411e-04]] )
elif model_name == "xclip-base-patch16-ucf-2-shot":
UpperCamelCase = torch.tensor([[8.5857e-05, 9.9928e-01, 6.3291e-04]] )
elif model_name == "xclip-base-patch16-ucf-4-shot":
UpperCamelCase = torch.tensor([[8.5857e-05, 9.9928e-01, 6.3291e-04]] )
elif model_name == "xclip-base-patch16-ucf-8-shot":
UpperCamelCase = torch.tensor([[0.0027, 0.9904, 0.0070]] )
elif model_name == "xclip-base-patch16-ucf-16-shot":
UpperCamelCase = torch.tensor([[9.8219e-04, 9.9593e-01, 3.0863e-03]] )
# zero shot
elif model_name == "xclip-base-patch16-zero-shot":
UpperCamelCase = torch.tensor([[3.5082e-04, 9.9785e-01, 1.7966e-03]] )
else:
raise ValueError(f'Model name {model_name} not supported' )
assert torch.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , atol=1e-3 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(f'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
if push_to_hub:
print("""Pushing model, processor and slow tokenizer files to the hub...""" )
model.push_to_hub(SCREAMING_SNAKE_CASE__ , organization="""nielsr""" )
processor.push_to_hub(SCREAMING_SNAKE_CASE__ , organization="""nielsr""" )
slow_tokenizer.push_to_hub(SCREAMING_SNAKE_CASE__ , organization="""nielsr""" )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="xclip-base-patch32",
type=str,
help="Name of the model.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
__snake_case = parser.parse_args()
convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 386
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {"""vocab_file""": """sentencepiece.bpe.model"""}
SCREAMING_SNAKE_CASE_ = {
"""vocab_file""": {
"""camembert-base""": """https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model""",
}
}
SCREAMING_SNAKE_CASE_ = {
"""camembert-base""": 5_12,
}
SCREAMING_SNAKE_CASE_ = """▁"""
class snake_case_ ( a_ ):
__lowerCAmelCase = VOCAB_FILES_NAMES
__lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase = ["input_ids", "attention_mask"]
def __init__( self , a_ , a_="<s>" , a_="</s>" , a_="</s>" , a_="<s>" , a_="<unk>" , a_="<pad>" , a_="<mask>" , a_=["<s>NOTUSED", "</s>NOTUSED"] , a_ = None , **a_ , ):
# Mask token behave like a normal word, i.e. include the space before it
a_ : Tuple = AddedToken(a_ , lstrip=a_ , rstrip=a_ ) if isinstance(a_ , a_ ) else mask_token
a_ : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=a_ , eos_token=a_ , unk_token=a_ , sep_token=a_ , cls_token=a_ , pad_token=a_ , mask_token=a_ , additional_special_tokens=a_ , sp_model_kwargs=self.sp_model_kwargs , **a_ , )
a_ : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(a_ ) )
a_ : Union[str, Any] = vocab_file
# HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual
# sentencepiece vocabulary (this is the case for <s> and </s>
a_ : Union[str, Any] = {"<s>NOTUSED": 0, "<pad>": 1, "</s>NOTUSED": 2, "<unk>": 3}
a_ : List[Any] = len(self.fairseq_tokens_to_ids )
a_ : List[Any] = len(self.sp_model ) + len(self.fairseq_tokens_to_ids )
a_ : List[str] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def snake_case_ ( self , a_ , a_ = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
a_ : Tuple = [self.cls_token_id]
a_ : int = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def snake_case_ ( self , a_ , a_ = None , a_ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a_ , token_ids_a=a_ , already_has_special_tokens=a_ )
if token_ids_a is None:
return [1] + ([0] * len(a_ )) + [1]
return [1] + ([0] * len(a_ )) + [1, 1] + ([0] * len(a_ )) + [1]
def snake_case_ ( self , a_ , a_ = None ):
a_ : List[str] = [self.sep_token_id]
a_ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def snake_case_ ( self ):
return len(self.fairseq_tokens_to_ids ) + len(self.sp_model )
def snake_case_ ( self ):
a_ : Any = {self.convert_ids_to_tokens(a_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def snake_case_ ( self , a_ ):
return self.sp_model.encode(a_ , out_type=a_ )
def snake_case_ ( self , a_ ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
elif self.sp_model.PieceToId(a_ ) == 0:
# Convert sentence piece unk token to fairseq unk token index
return self.unk_token_id
return self.fairseq_offset + self.sp_model.PieceToId(a_ )
def snake_case_ ( self , a_ ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def snake_case_ ( self , a_ ):
a_ : Any = []
a_ : int = ""
a_ : Dict = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(a_ ) + token
a_ : Union[str, Any] = True
a_ : Optional[Any] = []
else:
current_sub_tokens.append(a_ )
a_ : Dict = False
out_string += self.sp_model.decode(a_ )
return out_string.strip()
def __getstate__( self ):
a_ : Any = self.__dict__.copy()
a_ : Union[str, Any] = None
return state
def __setstate__( self , a_ ):
a_ : Any = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
a_ : Optional[int] = {}
a_ : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def snake_case_ ( self , a_ , a_ = None ):
if not os.path.isdir(a_ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
a_ : List[Any] = os.path.join(
a_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , a_ )
elif not os.path.isfile(self.vocab_file ):
with open(a_ , "wb" ) as fi:
a_ : Dict = self.sp_model.serialized_model_proto()
fi.write(a_ )
return (out_vocab_file,)
| 237
| 0
|
'''simple docstring'''
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowerCamelCase :
def __init__( self : Union[str, Any] , __snake_case : Union[str, Any] , __snake_case : str=13 , __snake_case : Union[str, Any]=30 , __snake_case : Union[str, Any]=2 , __snake_case : Dict=3 , __snake_case : Optional[Any]=True , __snake_case : Optional[int]=True , __snake_case : Optional[int]=32 , __snake_case : Optional[int]=5 , __snake_case : Any=4 , __snake_case : int=37 , __snake_case : int="gelu" , __snake_case : Union[str, Any]=0.1 , __snake_case : Optional[int]=0.1 , __snake_case : int=10 , __snake_case : Any=0.02 , __snake_case : List[str]=None , __snake_case : Tuple=2 , ):
'''simple docstring'''
_snake_case: Optional[Any] = parent
_snake_case: Tuple = batch_size
_snake_case: str = image_size
_snake_case: int = patch_size
_snake_case: Union[str, Any] = num_channels
_snake_case: Dict = is_training
_snake_case: Optional[Any] = use_labels
_snake_case: Optional[Any] = hidden_size
_snake_case: Tuple = num_hidden_layers
_snake_case: List[Any] = num_attention_heads
_snake_case: Union[str, Any] = intermediate_size
_snake_case: List[str] = hidden_act
_snake_case: Tuple = hidden_dropout_prob
_snake_case: List[Any] = attention_probs_dropout_prob
_snake_case: str = type_sequence_label_size
_snake_case: Any = initializer_range
_snake_case: str = scope
_snake_case: Union[str, Any] = encoder_stride
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_snake_case: Tuple = (image_size // patch_size) ** 2
_snake_case: List[str] = num_patches + 1
def SCREAMING_SNAKE_CASE_ ( self : str ):
'''simple docstring'''
_snake_case: List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_snake_case: List[str] = None
if self.use_labels:
_snake_case: Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_snake_case: Union[str, Any] = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE_ ( self : Any ):
'''simple docstring'''
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__snake_case , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def SCREAMING_SNAKE_CASE_ ( self : List[str] , __snake_case : int , __snake_case : Optional[int] , __snake_case : List[str] ):
'''simple docstring'''
_snake_case: Dict = ViTModel(config=__snake_case )
model.to(__snake_case )
model.eval()
_snake_case: Tuple = model(__snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , __snake_case : Union[str, Any] , __snake_case : Union[str, Any] , __snake_case : int ):
'''simple docstring'''
_snake_case: int = ViTForMaskedImageModeling(config=__snake_case )
model.to(__snake_case )
model.eval()
_snake_case: Dict = model(__snake_case )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
_snake_case: List[str] = 1
_snake_case: Tuple = ViTForMaskedImageModeling(__snake_case )
model.to(__snake_case )
model.eval()
_snake_case: List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_snake_case: Union[str, Any] = model(__snake_case )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def SCREAMING_SNAKE_CASE_ ( self : Dict , __snake_case : Union[str, Any] , __snake_case : Optional[int] , __snake_case : Dict ):
'''simple docstring'''
_snake_case: Optional[int] = self.type_sequence_label_size
_snake_case: Union[str, Any] = ViTForImageClassification(__snake_case )
model.to(__snake_case )
model.eval()
_snake_case: List[Any] = model(__snake_case , labels=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_snake_case: Tuple = 1
_snake_case: Optional[int] = ViTForImageClassification(__snake_case )
model.to(__snake_case )
model.eval()
_snake_case: List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_snake_case: Any = model(__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
'''simple docstring'''
_snake_case: Any = self.prepare_config_and_inputs()
(
(
_snake_case
) , (
_snake_case
) , (
_snake_case
) ,
): int = config_and_inputs
_snake_case: Dict = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
_SCREAMING_SNAKE_CASE = (
(
ViTModel,
ViTForImageClassification,
ViTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
_SCREAMING_SNAKE_CASE = (
{"feature-extraction": ViTModel, "image-classification": ViTForImageClassification}
if is_torch_available()
else {}
)
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
'''simple docstring'''
_snake_case: Optional[int] = ViTModelTester(self )
_snake_case: Union[str, Any] = ConfigTester(self , config_class=__snake_case , has_text_modality=__snake_case , hidden_size=37 )
def SCREAMING_SNAKE_CASE_ ( self : int ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='ViT does not use inputs_embeds' )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE_ ( self : str ):
'''simple docstring'''
_snake_case , _snake_case: Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case: Optional[int] = model_class(__snake_case )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_snake_case: Optional[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__snake_case , nn.Linear ) )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
'''simple docstring'''
_snake_case , _snake_case: List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case: int = model_class(__snake_case )
_snake_case: List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_snake_case: List[Any] = [*signature.parameters.keys()]
_snake_case: str = ['pixel_values']
self.assertListEqual(arg_names[:1] , __snake_case )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
'''simple docstring'''
_snake_case: List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__snake_case )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
'''simple docstring'''
_snake_case: int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__snake_case )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case: str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__snake_case )
@slow
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
'''simple docstring'''
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case: Any = ViTModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
def lowercase_ ( ) ->List[Any]:
_snake_case: Dict = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class lowerCamelCase ( unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained('google/vit-base-patch16-224' ) if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
'''simple docstring'''
_snake_case: Optional[int] = ViTForImageClassification.from_pretrained('google/vit-base-patch16-224' ).to(__snake_case )
_snake_case: Dict = self.default_image_processor
_snake_case: Optional[Any] = prepare_img()
_snake_case: List[str] = image_processor(images=__snake_case , return_tensors='pt' ).to(__snake_case )
# forward pass
with torch.no_grad():
_snake_case: Optional[int] = model(**__snake_case )
# verify the logits
_snake_case: Union[str, Any] = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , __snake_case )
_snake_case: Dict = torch.tensor([-0.2_744, 0.8_215, -0.0_836] ).to(__snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __snake_case , atol=1e-4 ) )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case: str = ViTModel.from_pretrained('facebook/dino-vits8' ).to(__snake_case )
_snake_case: Any = ViTImageProcessor.from_pretrained('facebook/dino-vits8' , size=4_80 )
_snake_case: Optional[int] = prepare_img()
_snake_case: Dict = image_processor(images=__snake_case , return_tensors='pt' )
_snake_case: Optional[Any] = inputs.pixel_values.to(__snake_case )
# forward pass
with torch.no_grad():
_snake_case: str = model(__snake_case , interpolate_pos_encoding=__snake_case )
# verify the logits
_snake_case: List[str] = torch.Size((1, 36_01, 3_84) )
self.assertEqual(outputs.last_hidden_state.shape , __snake_case )
_snake_case: Any = torch.tensor(
[[4.2_340, 4.3_906, -6.6_692], [4.5_463, 1.8_928, -6.7_257], [4.4_429, 0.8_496, -5.8_585]] ).to(__snake_case )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , __snake_case , atol=1e-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def SCREAMING_SNAKE_CASE_ ( self : int ):
'''simple docstring'''
_snake_case: List[Any] = ViTModel.from_pretrained('facebook/dino-vits8' , torch_dtype=torch.floataa , device_map='auto' )
_snake_case: Dict = self.default_image_processor
_snake_case: Any = prepare_img()
_snake_case: str = image_processor(images=__snake_case , return_tensors='pt' )
_snake_case: Any = inputs.pixel_values.to(__snake_case )
# forward pass to make sure inference works in fp16
with torch.no_grad():
_snake_case: int = model(__snake_case )
| 273
|
'''simple docstring'''
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_multi_gpu
class lowerCamelCase ( nn.Module ):
def __init__( self : Optional[int] ):
'''simple docstring'''
super().__init__()
_snake_case: int = nn.Linear(3 , 4 )
_snake_case: Any = nn.BatchNormad(4 )
_snake_case: Union[str, Any] = nn.Linear(4 , 5 )
def SCREAMING_SNAKE_CASE_ ( self : Tuple , __snake_case : List[Any] ):
'''simple docstring'''
return self.lineara(self.batchnorm(self.lineara(__snake_case ) ) )
class lowerCamelCase ( __UpperCAmelCase ):
def SCREAMING_SNAKE_CASE_ ( self : Tuple , __snake_case : Optional[Any] , *__snake_case : Optional[Any] , **__snake_case : List[Any] ):
'''simple docstring'''
return (args[0] + 1,) + args[1:], kwargs
class lowerCamelCase ( __UpperCAmelCase ):
def SCREAMING_SNAKE_CASE_ ( self : Dict , __snake_case : Union[str, Any] , __snake_case : Tuple ):
'''simple docstring'''
return output + 1
class lowerCamelCase ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case: str = ModelForTest()
_snake_case: List[str] = ModelHook()
add_hook_to_module(__snake_case , __snake_case )
self.assertEqual(test_model._hf_hook , __snake_case )
self.assertTrue(hasattr(__snake_case , '_old_forward' ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , 'forward' )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ['x'] )
remove_hook_from_module(__snake_case )
self.assertFalse(hasattr(__snake_case , '_hf_hook' ) )
self.assertFalse(hasattr(__snake_case , '_old_forward' ) )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
'''simple docstring'''
_snake_case: Optional[Any] = ModelForTest()
_snake_case: Union[str, Any] = ModelHook()
add_hook_to_module(__snake_case , __snake_case )
add_hook_to_module(__snake_case , __snake_case , append=__snake_case )
self.assertEqual(isinstance(test_model._hf_hook , __snake_case ) , __snake_case )
self.assertEqual(len(test_model._hf_hook.hooks ) , 2 )
self.assertTrue(hasattr(__snake_case , '_old_forward' ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , 'forward' )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ['x'] )
remove_hook_from_module(__snake_case )
self.assertFalse(hasattr(__snake_case , '_hf_hook' ) )
self.assertFalse(hasattr(__snake_case , '_old_forward' ) )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
'''simple docstring'''
_snake_case: Dict = ModelForTest()
_snake_case: Tuple = torch.randn(2 , 3 )
_snake_case: List[Any] = test_model(x + 1 )
_snake_case: int = test_model(x + 2 )
_snake_case: List[str] = PreForwardHook()
add_hook_to_module(__snake_case , __snake_case )
_snake_case: List[Any] = test_model(__snake_case )
self.assertTrue(torch.allclose(__snake_case , __snake_case , atol=1e-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
_snake_case: Any = PreForwardHook()
add_hook_to_module(__snake_case , __snake_case )
_snake_case: Dict = test_model(__snake_case )
self.assertTrue(torch.allclose(__snake_case , __snake_case , atol=1e-5 ) )
# You need to use the sequential hook to chain two or more hooks
_snake_case: Any = SequentialHook(PreForwardHook() , PreForwardHook() )
add_hook_to_module(__snake_case , __snake_case )
_snake_case: int = test_model(__snake_case )
assert torch.allclose(__snake_case , __snake_case , atol=1e-5 )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
'''simple docstring'''
_snake_case: int = ModelForTest()
_snake_case: str = torch.randn(2 , 3 )
_snake_case: int = test_model(__snake_case )
_snake_case: Union[str, Any] = PostForwardHook()
add_hook_to_module(__snake_case , __snake_case )
_snake_case: int = test_model(__snake_case )
self.assertTrue(torch.allclose(__snake_case , output + 1 , atol=1e-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
_snake_case: Tuple = PostForwardHook()
add_hook_to_module(__snake_case , __snake_case )
_snake_case: Optional[int] = test_model(__snake_case )
self.assertTrue(torch.allclose(__snake_case , output + 1 , atol=1e-5 ) )
# You need to use the sequential hook to chain two or more hooks
_snake_case: List[str] = SequentialHook(PostForwardHook() , PostForwardHook() )
add_hook_to_module(__snake_case , __snake_case )
_snake_case: List[Any] = test_model(__snake_case )
assert torch.allclose(__snake_case , output + 2 , atol=1e-5 )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
'''simple docstring'''
_snake_case: Optional[Any] = ModelForTest()
_snake_case: Dict = torch.randn(2 , 3 )
_snake_case: Tuple = test_model(__snake_case )
_snake_case: Tuple = PostForwardHook()
add_hook_to_module(__snake_case , __snake_case )
_snake_case: List[str] = test_model(__snake_case )
self.assertTrue(torch.allclose(__snake_case , output + 1 ) )
self.assertTrue(outputa.requires_grad )
_snake_case: Any = True
_snake_case: str = test_model(__snake_case )
self.assertFalse(outputa.requires_grad )
@require_multi_gpu
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
'''simple docstring'''
_snake_case: Any = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
# This will move each submodule on different devices
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1 ) )
self.assertEqual(model.lineara.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device(0 ) )
self.assertEqual(model.lineara.weight.device , torch.device(1 ) )
# We can still make a forward pass. The input does not need to be on any particular device
_snake_case: int = torch.randn(2 , 3 )
_snake_case: Tuple = model(__snake_case )
self.assertEqual(output.device , torch.device(1 ) )
# We can add a general hook to put back output on same device as input.
add_hook_to_module(__snake_case , AlignDevicesHook(io_same_device=__snake_case ) )
_snake_case: Optional[Any] = torch.randn(2 , 3 ).to(0 )
_snake_case: Any = model(__snake_case )
self.assertEqual(output.device , torch.device(0 ) )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
'''simple docstring'''
_snake_case: Union[str, Any] = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
# This will move each submodule on different devices
_snake_case: Optional[int] = {'execution_device': 0 if torch.cuda.is_available() else 'cpu', 'offload': True}
add_hook_to_module(model.lineara , AlignDevicesHook(**__snake_case ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**__snake_case ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**__snake_case ) )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) )
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
# Buffers are not included in the offload by default, so are on the execution device
_snake_case: Optional[int] = torch.device(hook_kwargs['execution_device'] )
self.assertEqual(model.batchnorm.running_mean.device , __snake_case )
_snake_case: Tuple = torch.randn(2 , 3 )
_snake_case: Union[str, Any] = model(__snake_case )
self.assertEqual(output.device , __snake_case )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
# Now test with buffers included in the offload
_snake_case: Optional[Any] = {
'execution_device': 0 if torch.cuda.is_available() else 'cpu',
'offload': True,
'offload_buffers': True,
}
add_hook_to_module(model.lineara , AlignDevicesHook(**__snake_case ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**__snake_case ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**__snake_case ) )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) )
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device('meta' ) )
_snake_case: Optional[Any] = torch.randn(2 , 3 )
_snake_case: Union[str, Any] = model(__snake_case )
self.assertEqual(output.device , __snake_case )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
'''simple docstring'''
_snake_case: Union[str, Any] = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
# This will move each submodule on different devices
_snake_case: Optional[Any] = 0 if torch.cuda.is_available() else 'cpu'
attach_align_device_hook(__snake_case , execution_device=__snake_case , offload=__snake_case )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) )
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
# Buffers are not included in the offload by default, so are on the execution device
_snake_case: List[str] = torch.device(__snake_case )
self.assertEqual(model.batchnorm.running_mean.device , __snake_case )
_snake_case: Optional[Any] = torch.randn(2 , 3 )
_snake_case: List[str] = model(__snake_case )
self.assertEqual(output.device , __snake_case )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__snake_case )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
# Now test with buffers included in the offload
attach_align_device_hook(__snake_case , execution_device=__snake_case , offload=__snake_case , offload_buffers=__snake_case )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) )
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device('meta' ) )
_snake_case: Any = torch.randn(2 , 3 )
_snake_case: List[Any] = model(__snake_case )
self.assertEqual(output.device , __snake_case )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__snake_case )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
'''simple docstring'''
_snake_case: Tuple = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
# This will move each submodule on different devices
_snake_case: Tuple = 0 if torch.cuda.is_available() else 'cpu'
attach_align_device_hook(
__snake_case , execution_device=__snake_case , offload=__snake_case , weights_map=model.state_dict() )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) )
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
# Buffers are not included in the offload by default, so are on the execution device
_snake_case: str = torch.device(__snake_case )
self.assertEqual(model.batchnorm.running_mean.device , __snake_case )
_snake_case: Dict = torch.randn(2 , 3 )
_snake_case: Dict = model(__snake_case )
self.assertEqual(output.device , __snake_case )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__snake_case )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
# Now test with buffers included in the offload
attach_align_device_hook(
__snake_case , execution_device=__snake_case , offload=__snake_case , weights_map=model.state_dict() , offload_buffers=__snake_case , )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) )
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device('meta' ) )
_snake_case: List[Any] = torch.randn(2 , 3 )
_snake_case: List[str] = model(__snake_case )
self.assertEqual(output.device , __snake_case )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__snake_case )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
| 273
| 1
|
from cva import destroyAllWindows, imread, imshow, waitKey
def lowerCamelCase__ (_UpperCAmelCase):
# getting number of pixels in the image
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = img.shape[0], img.shape[1]
# converting each pixel's color to its negative
for i in range(_UpperCAmelCase):
for j in range(_UpperCAmelCase):
SCREAMING_SNAKE_CASE = [255, 255, 255] - img[i][j]
return img
if __name__ == "__main__":
# read original image
a_ : int = imread('image_data/lena.jpg', 1)
# convert to its negative
a_ : List[Any] = convert_to_negative(img)
# show result image
imshow('negative of original image', img)
waitKey(0)
destroyAllWindows()
| 73
|
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class lowercase :
@staticmethod
def lowercase__ ( *_lowercase : Optional[Any] , **_lowercase : str ):
pass
def a ( A__ ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class lowercase ( unittest.TestCase ):
lowerCamelCase : int = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def lowercase__ ( self : List[Any] , _lowercase : Tuple , _lowercase : Any , _lowercase : List[str] ):
SCREAMING_SNAKE_CASE__ : List[str] = DepthEstimationPipeline(model=_lowercase , image_processor=_lowercase )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def lowercase__ ( self : Union[str, Any] , _lowercase : int , _lowercase : int ):
SCREAMING_SNAKE_CASE__ : Optional[int] = depth_estimator('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
self.assertEqual({'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )} , _lowercase )
import datasets
SCREAMING_SNAKE_CASE__ : List[str] = datasets.load_dataset('''hf-internal-testing/fixtures_image_utils''' , '''image''' , split='''test''' )
SCREAMING_SNAKE_CASE__ : Dict = depth_estimator(
[
Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ),
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
# RGBA
dataset[0]['''file'''],
# LA
dataset[1]['''file'''],
# L
dataset[2]['''file'''],
] )
self.assertEqual(
[
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
] , _lowercase , )
@require_tf
@unittest.skip('''Depth estimation is not implemented in TF''' )
def lowercase__ ( self : Optional[int] ):
pass
@slow
@require_torch
def lowercase__ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE__ : List[str] = '''Intel/dpt-large'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = pipeline('''depth-estimation''' , model=_lowercase )
SCREAMING_SNAKE_CASE__ : Dict = depth_estimator('''http://images.cocodataset.org/val2017/000000039769.jpg''' )
SCREAMING_SNAKE_CASE__ : List[str] = hashimage(outputs['''depth'''] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs['''predicted_depth'''].max().item() ) , 29.304 )
self.assertEqual(nested_simplify(outputs['''predicted_depth'''].min().item() ) , 2.662 )
@require_torch
def lowercase__ ( self : str ):
# This is highly irregular to have no small tests.
self.skipTest('''There is not hf-internal-testing tiny model for either GLPN nor DPT''' )
| 35
| 0
|
"""simple docstring"""
from __future__ import annotations
from typing import Any
class snake_case :
def __init__( self :Union[str, Any] , _lowerCamelCase :int = 6 ):
__SCREAMING_SNAKE_CASE : Node | None = None
__SCREAMING_SNAKE_CASE : Node | None = None
self.create_linked_list(_lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self :Union[str, Any] , _lowerCamelCase :int ):
__SCREAMING_SNAKE_CASE : Dict = Node()
__SCREAMING_SNAKE_CASE : Optional[Any] = current_node
__SCREAMING_SNAKE_CASE : Any = current_node
__SCREAMING_SNAKE_CASE : Any = current_node
for _ in range(1 , _lowerCamelCase ):
__SCREAMING_SNAKE_CASE : Optional[Any] = Node()
__SCREAMING_SNAKE_CASE : Any = current_node
__SCREAMING_SNAKE_CASE : List[Any] = previous_node
__SCREAMING_SNAKE_CASE : Any = current_node
__SCREAMING_SNAKE_CASE : Optional[int] = self.front
__SCREAMING_SNAKE_CASE : Union[str, Any] = previous_node
def SCREAMING_SNAKE_CASE_ ( self :Union[str, Any] ):
return (
self.front == self.rear
and self.front is not None
and self.front.data is None
)
def SCREAMING_SNAKE_CASE_ ( self :Union[str, Any] ):
self.check_can_perform_operation()
return self.front.data if self.front else None
def SCREAMING_SNAKE_CASE_ ( self :List[str] , _lowerCamelCase :Any ):
if self.rear is None:
return
self.check_is_full()
if not self.is_empty():
__SCREAMING_SNAKE_CASE : int = self.rear.next
if self.rear:
__SCREAMING_SNAKE_CASE : Any = data
def SCREAMING_SNAKE_CASE_ ( self :int ):
self.check_can_perform_operation()
if self.rear is None or self.front is None:
return None
if self.front == self.rear:
__SCREAMING_SNAKE_CASE : int = self.front.data
__SCREAMING_SNAKE_CASE : Union[str, Any] = None
return data
__SCREAMING_SNAKE_CASE : Optional[Any] = self.front
__SCREAMING_SNAKE_CASE : str = old_front.next
__SCREAMING_SNAKE_CASE : int = old_front.data
__SCREAMING_SNAKE_CASE : str = None
return data
def SCREAMING_SNAKE_CASE_ ( self :Union[str, Any] ):
if self.is_empty():
raise Exception('''Empty Queue''' )
def SCREAMING_SNAKE_CASE_ ( self :str ):
if self.rear and self.rear.next == self.front:
raise Exception('''Full Queue''' )
class snake_case :
def __init__( self :int ):
__SCREAMING_SNAKE_CASE : Any | None = None
__SCREAMING_SNAKE_CASE : Node | None = None
__SCREAMING_SNAKE_CASE : Node | None = None
if __name__ == "__main__":
import doctest
doctest.testmod()
| 401
|
"""simple docstring"""
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def lowerCAmelCase_ ( lowercase_ : dict ):
'''simple docstring'''
return (data["data"], data["target"])
def lowerCAmelCase_ ( lowercase_ : np.ndarray , lowercase_ : np.ndarray , lowercase_ : np.ndarray ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE : List[str] = XGBRegressor(verbosity=0 , random_state=42 )
xgb.fit(lowercase_ , lowercase_ )
# Predict target for test data
__SCREAMING_SNAKE_CASE : Optional[Any] = xgb.predict(lowercase_ )
__SCREAMING_SNAKE_CASE : Optional[Any] = predictions.reshape(len(lowercase_ ) , 1 )
return predictions
def lowerCAmelCase_ ( ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE : Union[str, Any] = fetch_california_housing()
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : str = data_handling(lowercase_ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Any = train_test_split(
lowercase_ , lowercase_ , test_size=0.25 , random_state=1 )
__SCREAMING_SNAKE_CASE : Optional[int] = xgboost(lowercase_ , lowercase_ , lowercase_ )
# Error printing
print(F'''Mean Absolute Error : {mean_absolute_error(lowercase_ , lowercase_ )}''' )
print(F'''Mean Square Error : {mean_squared_error(lowercase_ , lowercase_ )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 401
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
a__ : Optional[Any] = {
"""configuration_wav2vec2""": ["""WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Wav2Vec2Config"""],
"""feature_extraction_wav2vec2""": ["""Wav2Vec2FeatureExtractor"""],
"""processing_wav2vec2""": ["""Wav2Vec2Processor"""],
"""tokenization_wav2vec2""": ["""Wav2Vec2CTCTokenizer""", """Wav2Vec2Tokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Optional[int] = [
"""WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Wav2Vec2ForAudioFrameClassification""",
"""Wav2Vec2ForCTC""",
"""Wav2Vec2ForMaskedLM""",
"""Wav2Vec2ForPreTraining""",
"""Wav2Vec2ForSequenceClassification""",
"""Wav2Vec2ForXVector""",
"""Wav2Vec2Model""",
"""Wav2Vec2PreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Optional[int] = [
"""TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFWav2Vec2ForCTC""",
"""TFWav2Vec2Model""",
"""TFWav2Vec2PreTrainedModel""",
"""TFWav2Vec2ForSequenceClassification""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : List[Any] = [
"""FlaxWav2Vec2ForCTC""",
"""FlaxWav2Vec2ForPreTraining""",
"""FlaxWav2Vec2Model""",
"""FlaxWav2Vec2PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
a__ : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 589
|
"""simple docstring"""
import argparse
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM
from transformers.utils import logging
logging.set_verbosity_info()
a__ : List[Any] = logging.get_logger(__name__)
def A__ ( __lowerCamelCase, __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = RobertaPreLayerNormConfig.from_pretrained(
__lowerCamelCase, architectures=['RobertaPreLayerNormForMaskedLM'] )
# convert state_dict
_lowerCAmelCase = torch.load(hf_hub_download(repo_id=__lowerCamelCase, filename='pytorch_model.bin' ) )
_lowerCAmelCase = {}
for tensor_key, tensor_value in original_state_dict.items():
# The transformer implementation gives the model a unique name, rather than overwiriting 'roberta'
if tensor_key.startswith('roberta.' ):
_lowerCAmelCase = 'roberta_prelayernorm.' + tensor_key[len('roberta.' ) :]
# The original implementation contains weights which are not used, remove them from the state_dict
if tensor_key.endswith('.self.LayerNorm.weight' ) or tensor_key.endswith('.self.LayerNorm.bias' ):
continue
_lowerCAmelCase = tensor_value
_lowerCAmelCase = RobertaPreLayerNormForMaskedLM.from_pretrained(
pretrained_model_name_or_path=__lowerCamelCase, config=__lowerCamelCase, state_dict=__lowerCamelCase )
model.save_pretrained(__lowerCamelCase )
# convert tokenizer
_lowerCAmelCase = AutoTokenizer.from_pretrained(__lowerCamelCase )
tokenizer.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
a__ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint-repo""",
default=None,
type=str,
required=True,
help="""Path the official PyTorch dump, e.g. 'andreasmadsen/efficient_mlm_m0.40'.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
a__ : Optional[int] = parser.parse_args()
convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
| 589
| 1
|
"""simple docstring"""
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class A_ ( pl.LightningModule ):
def __init__( self : List[Any] , __lowerCamelCase : List[str] ) -> Optional[Any]:
super().__init__()
__magic_name__ : Any = model
__magic_name__ : Optional[Any] = 2
__magic_name__ : Union[str, Any] = nn.Linear(self.model.config.hidden_size , self.num_labels )
def _snake_case ( self : Optional[Any] ) -> Dict:
pass
def _lowerCAmelCase ( __lowerCamelCase:int , __lowerCamelCase:Union[str, Any] , __lowerCamelCase:Union[str, Any] ):
'''simple docstring'''
__magic_name__ : Union[str, Any] = LongformerModel.from_pretrained(__lowerCamelCase )
__magic_name__ : Optional[Any] = LightningModel(__lowerCamelCase )
__magic_name__ : Union[str, Any] = torch.load(__lowerCamelCase , map_location=torch.device("cpu" ) )
lightning_model.load_state_dict(ckpt["state_dict"] )
# init longformer question answering model
__magic_name__ : int = LongformerForQuestionAnswering.from_pretrained(__lowerCamelCase )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(__lowerCamelCase )
print(f'''Conversion successful. Model saved under {pytorch_dump_folder_path}''' )
if __name__ == "__main__":
lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--longformer_model''',
default=None,
type=str,
required=True,
help='''model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.''',
)
parser.add_argument(
'''--longformer_question_answering_ckpt_path''',
default=None,
type=str,
required=True,
help='''Path the official PyTorch Lightning Checkpoint.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowercase = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 714
|
"""simple docstring"""
def _lowerCAmelCase ( __lowerCamelCase:int ):
'''simple docstring'''
__magic_name__ = n ** (1 / 3)
return (val * val * val) == n
if __name__ == "__main__":
print(perfect_cube(27))
print(perfect_cube(4))
| 468
| 0
|
def lowercase__ ( A_: float , A_: float ) -> float:
"""simple docstring"""
if density <= 0:
raise ValueError("""Impossible fluid density""" )
if bulk_modulus <= 0:
raise ValueError("""Impossible bulk modulus""" )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 68
|
'''simple docstring'''
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __A ( a ):
"""simple docstring"""
A_ = ['image_processor', 'tokenizer']
A_ = 'BridgeTowerImageProcessor'
A_ = ('RobertaTokenizer', 'RobertaTokenizerFast')
def __init__( self , _lowerCamelCase , _lowerCamelCase )-> str:
super().__init__(_lowerCamelCase , _lowerCamelCase )
def __call__( self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = True , _lowerCamelCase = False , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = 0 , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = False , _lowerCamelCase = False , _lowerCamelCase = False , _lowerCamelCase = False , _lowerCamelCase = True , _lowerCamelCase = None , **_lowerCamelCase , )-> BatchEncoding:
lowercase__ = self.tokenizer(
text=_lowerCamelCase , add_special_tokens=_lowerCamelCase , padding=_lowerCamelCase , truncation=_lowerCamelCase , max_length=_lowerCamelCase , stride=_lowerCamelCase , pad_to_multiple_of=_lowerCamelCase , return_token_type_ids=_lowerCamelCase , return_attention_mask=_lowerCamelCase , return_overflowing_tokens=_lowerCamelCase , return_special_tokens_mask=_lowerCamelCase , return_offsets_mapping=_lowerCamelCase , return_length=_lowerCamelCase , verbose=_lowerCamelCase , return_tensors=_lowerCamelCase , **_lowerCamelCase , )
# add pixel_values + pixel_mask
lowercase__ = self.image_processor(
_lowerCamelCase , return_tensors=_lowerCamelCase , do_normalize=_lowerCamelCase , do_center_crop=_lowerCamelCase , **_lowerCamelCase )
encoding.update(_lowerCamelCase )
return encoding
def snake_case_( self , *_lowerCamelCase , **_lowerCamelCase )-> str:
return self.tokenizer.batch_decode(*_lowerCamelCase , **_lowerCamelCase )
def snake_case_( self , *_lowerCamelCase , **_lowerCamelCase )-> List[Any]:
return self.tokenizer.decode(*_lowerCamelCase , **_lowerCamelCase )
@property
def snake_case_( self )-> List[Any]:
lowercase__ = self.tokenizer.model_input_names
lowercase__ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 161
| 0
|
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE : Optional[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Dict = {
"b0": efficientnet.EfficientNetBa,
"b1": efficientnet.EfficientNetBa,
"b2": efficientnet.EfficientNetBa,
"b3": efficientnet.EfficientNetBa,
"b4": efficientnet.EfficientNetBa,
"b5": efficientnet.EfficientNetBa,
"b6": efficientnet.EfficientNetBa,
"b7": efficientnet.EfficientNetBa,
}
SCREAMING_SNAKE_CASE : Tuple = {
"b0": {
"hidden_dim": 1280,
"width_coef": 1.0,
"depth_coef": 1.0,
"image_size": 224,
"dropout_rate": 0.2,
"dw_padding": [],
},
"b1": {
"hidden_dim": 1280,
"width_coef": 1.0,
"depth_coef": 1.1,
"image_size": 240,
"dropout_rate": 0.2,
"dw_padding": [16],
},
"b2": {
"hidden_dim": 1408,
"width_coef": 1.1,
"depth_coef": 1.2,
"image_size": 260,
"dropout_rate": 0.3,
"dw_padding": [5, 8, 16],
},
"b3": {
"hidden_dim": 1536,
"width_coef": 1.2,
"depth_coef": 1.4,
"image_size": 300,
"dropout_rate": 0.3,
"dw_padding": [5, 18],
},
"b4": {
"hidden_dim": 1792,
"width_coef": 1.4,
"depth_coef": 1.8,
"image_size": 380,
"dropout_rate": 0.4,
"dw_padding": [6],
},
"b5": {
"hidden_dim": 2048,
"width_coef": 1.6,
"depth_coef": 2.2,
"image_size": 456,
"dropout_rate": 0.4,
"dw_padding": [13, 27],
},
"b6": {
"hidden_dim": 2304,
"width_coef": 1.8,
"depth_coef": 2.6,
"image_size": 528,
"dropout_rate": 0.5,
"dw_padding": [31],
},
"b7": {
"hidden_dim": 2560,
"width_coef": 2.0,
"depth_coef": 3.1,
"image_size": 600,
"dropout_rate": 0.5,
"dw_padding": [18],
},
}
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : Union[str, Any] ):
UpperCamelCase_ : Tuple = EfficientNetConfig()
UpperCamelCase_ : Optional[int] = CONFIG_MAP[model_name]["""hidden_dim"""]
UpperCamelCase_ : Tuple = CONFIG_MAP[model_name]["""width_coef"""]
UpperCamelCase_ : str = CONFIG_MAP[model_name]["""depth_coef"""]
UpperCamelCase_ : Optional[int] = CONFIG_MAP[model_name]["""image_size"""]
UpperCamelCase_ : Tuple = CONFIG_MAP[model_name]["""dropout_rate"""]
UpperCamelCase_ : List[Any] = CONFIG_MAP[model_name]["""dw_padding"""]
UpperCamelCase_ : Dict = """huggingface/label-files"""
UpperCamelCase_ : Tuple = """imagenet-1k-id2label.json"""
UpperCamelCase_ : List[Any] = 1000
UpperCamelCase_ : List[Any] = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type="""dataset""" ) , """r""" ) )
UpperCamelCase_ : int = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
UpperCamelCase_ : int = idalabel
UpperCamelCase_ : Optional[int] = {v: k for k, v in idalabel.items()}
return config
def lowerCAmelCase_ ( ):
UpperCamelCase_ : str = """http://images.cocodataset.org/val2017/000000039769.jpg"""
UpperCamelCase_ : str = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw )
return im
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : int ):
UpperCamelCase_ : str = CONFIG_MAP[model_name]["""image_size"""]
UpperCamelCase_ : List[str] = EfficientNetImageProcessor(
size={"""height""": size, """width""": size} , image_mean=[0.485, 0.456, 0.406] , image_std=[0.47_853_944, 0.4_732_864, 0.47_434_163] , do_center_crop=_SCREAMING_SNAKE_CASE , )
return preprocessor
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : Optional[int] ):
UpperCamelCase_ : List[str] = [v.split("""_""" )[0].split("""block""" )[1] for v in original_param_names if v.startswith("""block""" )]
UpperCamelCase_ : Optional[Any] = sorted(set(_SCREAMING_SNAKE_CASE ) )
UpperCamelCase_ : str = len(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ : Any = {b: str(_SCREAMING_SNAKE_CASE ) for b, i in zip(_SCREAMING_SNAKE_CASE , range(_SCREAMING_SNAKE_CASE ) )}
UpperCamelCase_ : Optional[Any] = []
rename_keys.append(("""stem_conv/kernel:0""", """embeddings.convolution.weight""") )
rename_keys.append(("""stem_bn/gamma:0""", """embeddings.batchnorm.weight""") )
rename_keys.append(("""stem_bn/beta:0""", """embeddings.batchnorm.bias""") )
rename_keys.append(("""stem_bn/moving_mean:0""", """embeddings.batchnorm.running_mean""") )
rename_keys.append(("""stem_bn/moving_variance:0""", """embeddings.batchnorm.running_var""") )
for b in block_names:
UpperCamelCase_ : Tuple = block_name_mapping[b]
rename_keys.append((f'''block{b}_expand_conv/kernel:0''', f'''encoder.blocks.{hf_b}.expansion.expand_conv.weight''') )
rename_keys.append((f'''block{b}_expand_bn/gamma:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.weight''') )
rename_keys.append((f'''block{b}_expand_bn/beta:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.bias''') )
rename_keys.append(
(f'''block{b}_expand_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.running_mean''') )
rename_keys.append(
(f'''block{b}_expand_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.running_var''') )
rename_keys.append(
(f'''block{b}_dwconv/depthwise_kernel:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight''') )
rename_keys.append((f'''block{b}_bn/gamma:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight''') )
rename_keys.append((f'''block{b}_bn/beta:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias''') )
rename_keys.append(
(f'''block{b}_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean''') )
rename_keys.append(
(f'''block{b}_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var''') )
rename_keys.append((f'''block{b}_se_reduce/kernel:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.reduce.weight''') )
rename_keys.append((f'''block{b}_se_reduce/bias:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.reduce.bias''') )
rename_keys.append((f'''block{b}_se_expand/kernel:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.expand.weight''') )
rename_keys.append((f'''block{b}_se_expand/bias:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.expand.bias''') )
rename_keys.append(
(f'''block{b}_project_conv/kernel:0''', f'''encoder.blocks.{hf_b}.projection.project_conv.weight''') )
rename_keys.append((f'''block{b}_project_bn/gamma:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.weight''') )
rename_keys.append((f'''block{b}_project_bn/beta:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.bias''') )
rename_keys.append(
(f'''block{b}_project_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.running_mean''') )
rename_keys.append(
(f'''block{b}_project_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.running_var''') )
rename_keys.append(("""top_conv/kernel:0""", """encoder.top_conv.weight""") )
rename_keys.append(("""top_bn/gamma:0""", """encoder.top_bn.weight""") )
rename_keys.append(("""top_bn/beta:0""", """encoder.top_bn.bias""") )
rename_keys.append(("""top_bn/moving_mean:0""", """encoder.top_bn.running_mean""") )
rename_keys.append(("""top_bn/moving_variance:0""", """encoder.top_bn.running_var""") )
UpperCamelCase_ : Optional[int] = {}
for item in rename_keys:
if item[0] in original_param_names:
UpperCamelCase_ : List[str] = """efficientnet.""" + item[1]
UpperCamelCase_ : str = """classifier.weight"""
UpperCamelCase_ : Optional[int] = """classifier.bias"""
return key_mapping
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : List[Any] ):
for key, value in tf_params.items():
if "normalization" in key:
continue
UpperCamelCase_ : Tuple = key_mapping[key]
if "_conv" in key and "kernel" in key:
UpperCamelCase_ : List[Any] = torch.from_numpy(_SCREAMING_SNAKE_CASE ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
UpperCamelCase_ : str = torch.from_numpy(_SCREAMING_SNAKE_CASE ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
UpperCamelCase_ : List[str] = torch.from_numpy(np.transpose(_SCREAMING_SNAKE_CASE ) )
else:
UpperCamelCase_ : List[Any] = torch.from_numpy(_SCREAMING_SNAKE_CASE )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(_SCREAMING_SNAKE_CASE )
@torch.no_grad()
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : int ):
UpperCamelCase_ : Optional[int] = model_classes[model_name](
include_top=_SCREAMING_SNAKE_CASE , weights="""imagenet""" , input_tensor=_SCREAMING_SNAKE_CASE , input_shape=_SCREAMING_SNAKE_CASE , pooling=_SCREAMING_SNAKE_CASE , classes=1000 , classifier_activation="""softmax""" , )
UpperCamelCase_ : str = original_model.trainable_variables
UpperCamelCase_ : Optional[Any] = original_model.non_trainable_variables
UpperCamelCase_ : Optional[Any] = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
UpperCamelCase_ : int = param.numpy()
UpperCamelCase_ : Optional[int] = list(tf_params.keys() )
# Load HuggingFace model
UpperCamelCase_ : Optional[int] = get_efficientnet_config(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ : Dict = EfficientNetForImageClassification(_SCREAMING_SNAKE_CASE ).eval()
UpperCamelCase_ : List[Any] = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print("""Converting parameters...""" )
UpperCamelCase_ : Optional[Any] = rename_keys(_SCREAMING_SNAKE_CASE )
replace_params(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Initialize preprocessor and preprocess input image
UpperCamelCase_ : Optional[Any] = convert_image_processor(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ : Tuple = preprocessor(images=prepare_img() , return_tensors="""pt""" )
# HF model inference
hf_model.eval()
with torch.no_grad():
UpperCamelCase_ : List[Any] = hf_model(**_SCREAMING_SNAKE_CASE )
UpperCamelCase_ : Tuple = outputs.logits.detach().numpy()
# Original model inference
UpperCamelCase_ : Tuple = False
UpperCamelCase_ : int = CONFIG_MAP[model_name]["""image_size"""]
UpperCamelCase_ : Any = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
UpperCamelCase_ : Tuple = image.img_to_array(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ : List[Any] = np.expand_dims(_SCREAMING_SNAKE_CASE , axis=0 )
UpperCamelCase_ : Optional[int] = original_model.predict(_SCREAMING_SNAKE_CASE )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1E-3 ), "The predicted logits are not the same."
print("""Model outputs match!""" )
if save_model:
# Create folder to save model
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
os.mkdir(_SCREAMING_SNAKE_CASE )
# Save converted model and image processor
hf_model.save_pretrained(_SCREAMING_SNAKE_CASE )
preprocessor.save_pretrained(_SCREAMING_SNAKE_CASE )
if push_to_hub:
# Push model and image processor to hub
print(f'''Pushing converted {model_name} to the hub...''' )
UpperCamelCase_ : Tuple = f'''efficientnet-{model_name}'''
preprocessor.push_to_hub(_SCREAMING_SNAKE_CASE )
hf_model.push_to_hub(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="b0",
type=str,
help="Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="hf_model",
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument("--save_model", action="store_true", help="Save model to local")
parser.add_argument("--push_to_hub", action="store_true", help="Push model and image processor to the hub")
SCREAMING_SNAKE_CASE : int = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 138
|
import numpy as np
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : np.ndarray , _SCREAMING_SNAKE_CASE : np.ndarray , _SCREAMING_SNAKE_CASE : float = 1E-12 , _SCREAMING_SNAKE_CASE : int = 100 , ):
assert np.shape(_SCREAMING_SNAKE_CASE )[0] == np.shape(_SCREAMING_SNAKE_CASE )[1]
# Ensure proper dimensionality.
assert np.shape(_SCREAMING_SNAKE_CASE )[0] == np.shape(_SCREAMING_SNAKE_CASE )[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(_SCREAMING_SNAKE_CASE ) == np.iscomplexobj(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ : Optional[Any] = np.iscomplexobj(_SCREAMING_SNAKE_CASE )
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(_SCREAMING_SNAKE_CASE , input_matrix.conj().T )
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
UpperCamelCase_ : Optional[int] = False
UpperCamelCase_ : str = 0
UpperCamelCase_ : Optional[int] = 0
UpperCamelCase_ : List[str] = 1E12
while not convergence:
# Multiple matrix by the vector.
UpperCamelCase_ : Optional[Any] = np.dot(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Normalize the resulting output vector.
UpperCamelCase_ : Union[str, Any] = w / np.linalg.norm(_SCREAMING_SNAKE_CASE )
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
UpperCamelCase_ : Tuple = vector.conj().T if is_complex else vector.T
UpperCamelCase_ : Tuple = np.dot(_SCREAMING_SNAKE_CASE , np.dot(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
# Check convergence.
UpperCamelCase_ : Union[str, Any] = np.abs(lambda_ - lambda_previous ) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
UpperCamelCase_ : List[Any] = True
UpperCamelCase_ : Dict = lambda_
if is_complex:
UpperCamelCase_ : List[Any] = np.real(lambda_ )
return lambda_, vector
def lowerCAmelCase_ ( ):
UpperCamelCase_ : Tuple = np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]] )
UpperCamelCase_ : List[Any] = np.array([41, 4, 20] )
UpperCamelCase_ : str = real_input_matrix.astype(np.complexaaa )
UpperCamelCase_ : str = np.triu(1j * complex_input_matrix , 1 )
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
UpperCamelCase_ : List[Any] = np.array([41, 4, 20] ).astype(np.complexaaa )
for problem_type in ["real", "complex"]:
if problem_type == "real":
UpperCamelCase_ : Tuple = real_input_matrix
UpperCamelCase_ : Any = real_vector
elif problem_type == "complex":
UpperCamelCase_ : List[Any] = complex_input_matrix
UpperCamelCase_ : List[str] = complex_vector
# Our implementation.
UpperCamelCase_,UpperCamelCase_ : List[Any] = power_iteration(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
UpperCamelCase_,UpperCamelCase_ : int = np.linalg.eigh(_SCREAMING_SNAKE_CASE )
# Last eigenvalue is the maximum one.
UpperCamelCase_ : Union[str, Any] = eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
UpperCamelCase_ : int = eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max ) <= 1E-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(_SCREAMING_SNAKE_CASE ) - np.abs(_SCREAMING_SNAKE_CASE ) ) <= 1E-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration()
| 138
| 1
|
'''simple docstring'''
import argparse
import re
import numpy as np
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SamConfig,
SamImageProcessor,
SamModel,
SamProcessor,
SamVisionConfig,
)
A = {
'''iou_prediction_head.layers.0''': '''iou_prediction_head.proj_in''',
'''iou_prediction_head.layers.1''': '''iou_prediction_head.layers.0''',
'''iou_prediction_head.layers.2''': '''iou_prediction_head.proj_out''',
'''mask_decoder.output_upscaling.0''': '''mask_decoder.upscale_conv1''',
'''mask_decoder.output_upscaling.1''': '''mask_decoder.upscale_layer_norm''',
'''mask_decoder.output_upscaling.3''': '''mask_decoder.upscale_conv2''',
'''mask_downscaling.0''': '''mask_embed.conv1''',
'''mask_downscaling.1''': '''mask_embed.layer_norm1''',
'''mask_downscaling.3''': '''mask_embed.conv2''',
'''mask_downscaling.4''': '''mask_embed.layer_norm2''',
'''mask_downscaling.6''': '''mask_embed.conv3''',
'''point_embeddings''': '''point_embed''',
'''pe_layer.positional_encoding_gaussian_matrix''': '''shared_embedding.positional_embedding''',
'''image_encoder''': '''vision_encoder''',
'''neck.0''': '''neck.conv1''',
'''neck.1''': '''neck.layer_norm1''',
'''neck.2''': '''neck.conv2''',
'''neck.3''': '''neck.layer_norm2''',
'''patch_embed.proj''': '''patch_embed.projection''',
'''.norm''': '''.layer_norm''',
'''blocks''': '''layers''',
}
def SCREAMING_SNAKE_CASE ( lowerCAmelCase__ : Dict) -> Optional[int]:
'''simple docstring'''
_lowercase : List[str] = {}
state_dict.pop('pixel_mean' , lowerCAmelCase__)
state_dict.pop('pixel_std' , lowerCAmelCase__)
_lowercase : Optional[int] = r'.*.output_hypernetworks_mlps.(\d+).layers.(\d+).*'
for key, value in state_dict.items():
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
_lowercase : int = key.replace(lowerCAmelCase__ , lowerCAmelCase__)
if re.match(lowerCAmelCase__ , lowerCAmelCase__):
_lowercase : Any = int(re.match(lowerCAmelCase__ , lowerCAmelCase__).group(2))
if layer_nb == 0:
_lowercase : Optional[int] = key.replace('layers.0' , 'proj_in')
elif layer_nb == 1:
_lowercase : str = key.replace('layers.1' , 'layers.0')
elif layer_nb == 2:
_lowercase : str = key.replace('layers.2' , 'proj_out')
_lowercase : Optional[int] = value
_lowercase : Dict = model_state_dict[
'prompt_encoder.shared_embedding.positional_embedding'
]
return model_state_dict
def SCREAMING_SNAKE_CASE ( lowerCAmelCase__ : str , lowerCAmelCase__ : Dict , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : str="ybelkada/segment-anything") -> List[Any]:
'''simple docstring'''
_lowercase : Optional[int] = hf_hub_download(lowerCAmelCase__ , F'''checkpoints/{model_name}.pth''')
if "sam_vit_b" in model_name:
_lowercase : int = SamConfig()
elif "sam_vit_l" in model_name:
_lowercase : str = SamVisionConfig(
hidden_size=10_24 , num_hidden_layers=24 , num_attention_heads=16 , global_attn_indexes=[5, 11, 17, 23] , )
_lowercase : List[Any] = SamConfig(
vision_config=lowerCAmelCase__ , )
elif "sam_vit_h" in model_name:
_lowercase : Dict = SamVisionConfig(
hidden_size=12_80 , num_hidden_layers=32 , num_attention_heads=16 , global_attn_indexes=[7, 15, 23, 31] , )
_lowercase : Union[str, Any] = SamConfig(
vision_config=lowerCAmelCase__ , )
_lowercase : Tuple = torch.load(lowerCAmelCase__ , map_location='cpu')
_lowercase : Dict = replace_keys(lowerCAmelCase__)
_lowercase : Tuple = SamImageProcessor()
_lowercase : Tuple = SamProcessor(image_processor=lowerCAmelCase__)
_lowercase : List[Any] = SamModel(lowerCAmelCase__)
hf_model.load_state_dict(lowerCAmelCase__)
_lowercase : Optional[Any] = hf_model.to('cuda')
_lowercase : int = 'https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png'
_lowercase : Optional[Any] = Image.open(requests.get(lowerCAmelCase__ , stream=lowerCAmelCase__).raw).convert('RGB')
_lowercase : Dict = [[[4_00, 6_50]]]
_lowercase : Any = [[1]]
_lowercase : Tuple = processor(images=np.array(lowerCAmelCase__) , return_tensors='pt').to('cuda')
with torch.no_grad():
_lowercase : int = hf_model(**lowerCAmelCase__)
_lowercase : Optional[Any] = output.iou_scores.squeeze()
if model_name == "sam_vit_h_4b8939":
assert scores[-1].item() == 0.5_7_9_8_9_0_2_5_1_1_5_9_6_6_8
_lowercase : Union[str, Any] = processor(
images=np.array(lowerCAmelCase__) , input_points=lowerCAmelCase__ , input_labels=lowerCAmelCase__ , return_tensors='pt').to('cuda')
with torch.no_grad():
_lowercase : Union[str, Any] = hf_model(**lowerCAmelCase__)
_lowercase : int = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9_7_1_2_6_0_3_0_9_2_1_9_3_6_0_4
_lowercase : Dict = ((75, 2_75, 17_25, 8_50),)
_lowercase : Optional[Any] = processor(images=np.array(lowerCAmelCase__) , input_boxes=lowerCAmelCase__ , return_tensors='pt').to('cuda')
with torch.no_grad():
_lowercase : Dict = hf_model(**lowerCAmelCase__)
_lowercase : Tuple = output.iou_scores.squeeze()
assert scores[-1].item() == 0.8_6_8_6_0_1_5_6_0_5_9_2_6_5_1_4
# Test with 2 points and 1 image.
_lowercase : Dict = [[[4_00, 6_50], [8_00, 6_50]]]
_lowercase : Dict = [[1, 1]]
_lowercase : Optional[Any] = processor(
images=np.array(lowerCAmelCase__) , input_points=lowerCAmelCase__ , input_labels=lowerCAmelCase__ , return_tensors='pt').to('cuda')
with torch.no_grad():
_lowercase : int = hf_model(**lowerCAmelCase__)
_lowercase : str = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9_9_3_6_0_4_7_7_9_2_4_3_4_6_9_2
if __name__ == "__main__":
A = argparse.ArgumentParser()
A = ['''sam_vit_b_01ec64''', '''sam_vit_h_4b8939''', '''sam_vit_l_0b3195''']
parser.add_argument(
'''--model_name''',
default='''sam_vit_h_4b8939''',
choices=choices,
type=str,
help='''Path to hf config.json of model to convert''',
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub after converting''',
)
parser.add_argument(
'''--model_hub_id''',
default='''ybelkada/segment-anything''',
choices=choices,
type=str,
help='''Path to hf config.json of model to convert''',
)
A = parser.parse_args()
convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
| 125
|
'''simple docstring'''
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase__ ):
'''simple docstring'''
lowerCAmelCase__ : Dict = (PNDMScheduler,)
lowerCAmelCase__ : Tuple = (("num_inference_steps", 50),)
def _lowerCamelCase ( self : Tuple ,**UpperCamelCase : Any ) -> Dict:
_lowercase : Tuple = {
'num_train_timesteps': 1000,
'beta_start': 0.0_0_0_1,
'beta_end': 0.0_2,
'beta_schedule': 'linear',
}
config.update(**UpperCamelCase )
return config
def _lowerCamelCase ( self : Optional[int] ,UpperCamelCase : Dict=0 ,**UpperCamelCase : List[str] ) -> Any:
_lowercase : Optional[Any] = dict(self.forward_default_kwargs )
_lowercase : Tuple = kwargs.pop('num_inference_steps' ,UpperCamelCase )
_lowercase : Tuple = self.dummy_sample
_lowercase : Optional[int] = 0.1 * sample
_lowercase : Dict = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
for scheduler_class in self.scheduler_classes:
_lowercase : str = self.get_scheduler_config(**UpperCamelCase )
_lowercase : List[Any] = scheduler_class(**UpperCamelCase )
scheduler.set_timesteps(UpperCamelCase )
# copy over dummy past residuals
_lowercase : str = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCamelCase )
_lowercase : List[str] = scheduler_class.from_pretrained(UpperCamelCase )
new_scheduler.set_timesteps(UpperCamelCase )
# copy over dummy past residuals
_lowercase : Any = dummy_past_residuals[:]
_lowercase : Dict = scheduler.step_prk(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,**UpperCamelCase ).prev_sample
_lowercase : Optional[Any] = new_scheduler.step_prk(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,**UpperCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
_lowercase : Union[str, Any] = scheduler.step_plms(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,**UpperCamelCase ).prev_sample
_lowercase : Optional[Any] = new_scheduler.step_plms(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,**UpperCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def _lowerCamelCase ( self : str ) -> List[Any]:
pass
def _lowerCamelCase ( self : Dict ,UpperCamelCase : List[Any]=0 ,**UpperCamelCase : List[Any] ) -> List[Any]:
_lowercase : int = dict(self.forward_default_kwargs )
_lowercase : Any = kwargs.pop('num_inference_steps' ,UpperCamelCase )
_lowercase : List[str] = self.dummy_sample
_lowercase : Dict = 0.1 * sample
_lowercase : Any = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
for scheduler_class in self.scheduler_classes:
_lowercase : int = self.get_scheduler_config()
_lowercase : Union[str, Any] = scheduler_class(**UpperCamelCase )
scheduler.set_timesteps(UpperCamelCase )
# copy over dummy past residuals (must be after setting timesteps)
_lowercase : int = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCamelCase )
_lowercase : Union[str, Any] = scheduler_class.from_pretrained(UpperCamelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(UpperCamelCase )
# copy over dummy past residual (must be after setting timesteps)
_lowercase : Any = dummy_past_residuals[:]
_lowercase : Dict = scheduler.step_prk(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,**UpperCamelCase ).prev_sample
_lowercase : List[Any] = new_scheduler.step_prk(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,**UpperCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
_lowercase : int = scheduler.step_plms(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,**UpperCamelCase ).prev_sample
_lowercase : Tuple = new_scheduler.step_plms(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,**UpperCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def _lowerCamelCase ( self : Optional[int] ,**UpperCamelCase : Any ) -> List[Any]:
_lowercase : Dict = self.scheduler_classes[0]
_lowercase : Union[str, Any] = self.get_scheduler_config(**UpperCamelCase )
_lowercase : Optional[int] = scheduler_class(**UpperCamelCase )
_lowercase : Dict = 10
_lowercase : str = self.dummy_model()
_lowercase : Tuple = self.dummy_sample_deter
scheduler.set_timesteps(UpperCamelCase )
for i, t in enumerate(scheduler.prk_timesteps ):
_lowercase : Any = model(UpperCamelCase ,UpperCamelCase )
_lowercase : Union[str, Any] = scheduler.step_prk(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ).prev_sample
for i, t in enumerate(scheduler.plms_timesteps ):
_lowercase : Optional[Any] = model(UpperCamelCase ,UpperCamelCase )
_lowercase : Tuple = scheduler.step_plms(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ).prev_sample
return sample
def _lowerCamelCase ( self : Optional[Any] ) -> List[str]:
_lowercase : Union[str, Any] = dict(self.forward_default_kwargs )
_lowercase : List[str] = kwargs.pop('num_inference_steps' ,UpperCamelCase )
for scheduler_class in self.scheduler_classes:
_lowercase : str = self.get_scheduler_config()
_lowercase : Dict = scheduler_class(**UpperCamelCase )
_lowercase : int = self.dummy_sample
_lowercase : List[str] = 0.1 * sample
if num_inference_steps is not None and hasattr(UpperCamelCase ,'set_timesteps' ):
scheduler.set_timesteps(UpperCamelCase )
elif num_inference_steps is not None and not hasattr(UpperCamelCase ,'set_timesteps' ):
_lowercase : List[Any] = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
_lowercase : List[str] = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
_lowercase : List[Any] = dummy_past_residuals[:]
_lowercase : List[str] = scheduler.step_prk(UpperCamelCase ,0 ,UpperCamelCase ,**UpperCamelCase ).prev_sample
_lowercase : List[str] = scheduler.step_prk(UpperCamelCase ,1 ,UpperCamelCase ,**UpperCamelCase ).prev_sample
self.assertEqual(output_a.shape ,sample.shape )
self.assertEqual(output_a.shape ,output_a.shape )
_lowercase : str = scheduler.step_plms(UpperCamelCase ,0 ,UpperCamelCase ,**UpperCamelCase ).prev_sample
_lowercase : Any = scheduler.step_plms(UpperCamelCase ,1 ,UpperCamelCase ,**UpperCamelCase ).prev_sample
self.assertEqual(output_a.shape ,sample.shape )
self.assertEqual(output_a.shape ,output_a.shape )
def _lowerCamelCase ( self : Optional[int] ) -> List[str]:
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=UpperCamelCase )
def _lowerCamelCase ( self : Union[str, Any] ) -> int:
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=UpperCamelCase )
_lowercase : Tuple = self.scheduler_classes[0]
_lowercase : List[str] = self.get_scheduler_config(steps_offset=1 )
_lowercase : Any = scheduler_class(**UpperCamelCase )
scheduler.set_timesteps(10 )
assert torch.equal(
scheduler.timesteps ,torch.LongTensor(
[901, 851, 851, 801, 801, 751, 751, 701, 701, 651, 651, 601, 601, 501, 401, 301, 201, 101, 1] ) ,)
def _lowerCamelCase ( self : Any ) -> Optional[int]:
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1] ,[0.0_0_2, 0.0_2] ):
self.check_over_configs(beta_start=UpperCamelCase ,beta_end=UpperCamelCase )
def _lowerCamelCase ( self : Any ) -> Union[str, Any]:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=UpperCamelCase )
def _lowerCamelCase ( self : List[Any] ) -> List[str]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCamelCase )
def _lowerCamelCase ( self : Optional[Any] ) -> Any:
for t in [1, 5, 10]:
self.check_over_forward(time_step=UpperCamelCase )
def _lowerCamelCase ( self : Dict ) -> Union[str, Any]:
for t, num_inference_steps in zip([1, 5, 10] ,[10, 50, 100] ):
self.check_over_forward(num_inference_steps=UpperCamelCase )
def _lowerCamelCase ( self : Union[str, Any] ) -> Optional[int]:
# earlier version of set_timesteps() caused an error indexing alpha's with inference steps as power of 3
_lowercase : Dict = 27
for scheduler_class in self.scheduler_classes:
_lowercase : List[Any] = self.dummy_sample
_lowercase : List[str] = 0.1 * sample
_lowercase : Union[str, Any] = self.get_scheduler_config()
_lowercase : Any = scheduler_class(**UpperCamelCase )
scheduler.set_timesteps(UpperCamelCase )
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2] ):
_lowercase : Optional[int] = scheduler.step_prk(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ).prev_sample
def _lowerCamelCase ( self : Dict ) -> Dict:
with self.assertRaises(UpperCamelCase ):
_lowercase : Optional[int] = self.scheduler_classes[0]
_lowercase : str = self.get_scheduler_config()
_lowercase : Tuple = scheduler_class(**UpperCamelCase )
scheduler.step_plms(self.dummy_sample ,1 ,self.dummy_sample ).prev_sample
def _lowerCamelCase ( self : Optional[int] ) -> int:
_lowercase : Any = self.full_loop()
_lowercase : Optional[Any] = torch.sum(torch.abs(UpperCamelCase ) )
_lowercase : Any = torch.mean(torch.abs(UpperCamelCase ) )
assert abs(result_sum.item() - 1_9_8.1_3_1_8 ) < 1e-2
assert abs(result_mean.item() - 0.2_5_8_0 ) < 1e-3
def _lowerCamelCase ( self : Any ) -> Union[str, Any]:
_lowercase : Tuple = self.full_loop(prediction_type='v_prediction' )
_lowercase : Optional[Any] = torch.sum(torch.abs(UpperCamelCase ) )
_lowercase : int = torch.mean(torch.abs(UpperCamelCase ) )
assert abs(result_sum.item() - 6_7.3_9_8_6 ) < 1e-2
assert abs(result_mean.item() - 0.0_8_7_8 ) < 1e-3
def _lowerCamelCase ( self : List[Any] ) -> str:
# We specify different beta, so that the first alpha is 0.99
_lowercase : List[Any] = self.full_loop(set_alpha_to_one=UpperCamelCase ,beta_start=0.0_1 )
_lowercase : List[Any] = torch.sum(torch.abs(UpperCamelCase ) )
_lowercase : Any = torch.mean(torch.abs(UpperCamelCase ) )
assert abs(result_sum.item() - 2_3_0.0_3_9_9 ) < 1e-2
assert abs(result_mean.item() - 0.2_9_9_5 ) < 1e-3
def _lowerCamelCase ( self : Any ) -> Optional[int]:
# We specify different beta, so that the first alpha is 0.99
_lowercase : Union[str, Any] = self.full_loop(set_alpha_to_one=UpperCamelCase ,beta_start=0.0_1 )
_lowercase : List[Any] = torch.sum(torch.abs(UpperCamelCase ) )
_lowercase : List[Any] = torch.mean(torch.abs(UpperCamelCase ) )
assert abs(result_sum.item() - 1_8_6.9_4_8_2 ) < 1e-2
assert abs(result_mean.item() - 0.2_4_3_4 ) < 1e-3
| 125
| 1
|
"""simple docstring"""
def _lowercase ( __snake_case ) -> list[list[int]]:
__lowerCAmelCase : str = []
if len(__snake_case ) == 1:
return [nums.copy()]
for _ in range(len(__snake_case ) ):
__lowerCAmelCase : Union[str, Any] = nums.pop(0 )
__lowerCAmelCase : Optional[int] = permute(__snake_case )
for perm in permutations:
perm.append(__snake_case )
result.extend(__snake_case )
nums.append(__snake_case )
return result
def _lowercase ( __snake_case ) -> int:
def backtrack(__snake_case ):
if start == len(__snake_case ) - 1:
output.append(nums[:] )
else:
for i in range(__snake_case ,len(__snake_case ) ):
__lowerCAmelCase , __lowerCAmelCase : Any = nums[i], nums[start]
backtrack(start + 1 )
__lowerCAmelCase , __lowerCAmelCase : List[str] = nums[i], nums[start] # backtrack
__lowerCAmelCase : Any = []
backtrack(0 )
return output
if __name__ == "__main__":
import doctest
# use res to print the data in permute2 function
__snake_case : List[str] = permutea([1, 2, 3])
print(res)
doctest.testmod()
| 615
|
"""simple docstring"""
import itertools
import random
import unittest
import numpy as np
from transformers import is_speech_available
from transformers.testing_utils import require_torch, require_torchaudio
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import SpeechaTextFeatureExtractor
__snake_case : Union[str, Any] = random.Random()
def _lowercase ( __snake_case ,__snake_case=1.0 ,__snake_case=None ,__snake_case=None ) -> List[Any]:
if rng is None:
__lowerCAmelCase : Dict = global_rng
__lowerCAmelCase : Tuple = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class A__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self: Optional[Any] , _SCREAMING_SNAKE_CASE: Any , _SCREAMING_SNAKE_CASE: List[Any]=7 , _SCREAMING_SNAKE_CASE: int=400 , _SCREAMING_SNAKE_CASE: List[str]=2000 , _SCREAMING_SNAKE_CASE: Optional[Any]=24 , _SCREAMING_SNAKE_CASE: Dict=24 , _SCREAMING_SNAKE_CASE: Optional[int]=0.0 , _SCREAMING_SNAKE_CASE: Any=1_6000 , _SCREAMING_SNAKE_CASE: int=True , _SCREAMING_SNAKE_CASE: Union[str, Any]=True , ) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : Any = parent
__lowerCAmelCase : str = batch_size
__lowerCAmelCase : List[str] = min_seq_length
__lowerCAmelCase : Any = max_seq_length
__lowerCAmelCase : Dict = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__lowerCAmelCase : Union[str, Any] = feature_size
__lowerCAmelCase : int = num_mel_bins
__lowerCAmelCase : Optional[Any] = padding_value
__lowerCAmelCase : List[Any] = sampling_rate
__lowerCAmelCase : Dict = return_attention_mask
__lowerCAmelCase : int = do_normalize
def _SCREAMING_SNAKE_CASE ( self: Any) -> Optional[int]:
"""simple docstring"""
return {
"feature_size": self.feature_size,
"num_mel_bins": self.num_mel_bins,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def _SCREAMING_SNAKE_CASE ( self: Tuple , _SCREAMING_SNAKE_CASE: Tuple=False , _SCREAMING_SNAKE_CASE: int=False) -> Optional[int]:
"""simple docstring"""
def _flatten(_SCREAMING_SNAKE_CASE: Optional[Any]):
return list(itertools.chain(*_SCREAMING_SNAKE_CASE))
if equal_length:
__lowerCAmelCase : Tuple = [floats_list((self.max_seq_length, self.feature_size)) for _ in range(self.batch_size)]
else:
# make sure that inputs increase in size
__lowerCAmelCase : List[Any] = [
floats_list((x, self.feature_size))
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff)
]
if numpify:
__lowerCAmelCase : Any = [np.asarray(_SCREAMING_SNAKE_CASE) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class A__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = SpeechaTextFeatureExtractor if is_speech_available() else None
def _SCREAMING_SNAKE_CASE ( self: List[Any]) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : Tuple = SpeechaTextFeatureExtractionTester(self)
def _SCREAMING_SNAKE_CASE ( self: Optional[Any] , _SCREAMING_SNAKE_CASE: Optional[Any]) -> str:
"""simple docstring"""
self.assertTrue(np.all(np.mean(_SCREAMING_SNAKE_CASE , axis=0) < 1e-3))
self.assertTrue(np.all(np.abs(np.var(_SCREAMING_SNAKE_CASE , axis=0) - 1) < 1e-3))
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any]) -> int:
"""simple docstring"""
__lowerCAmelCase : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
# create three inputs of length 800, 1000, and 1200
__lowerCAmelCase : Tuple = [floats_list((1, x))[0] for x in range(800 , 1400 , 200)]
__lowerCAmelCase : Optional[int] = [np.asarray(_SCREAMING_SNAKE_CASE) for speech_input in speech_inputs]
# Test feature size
__lowerCAmelCase : Optional[int] = feature_extractor(_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , return_tensors="np").input_features
self.assertTrue(input_features.ndim == 3)
self.assertTrue(input_features.shape[-1] == feature_extractor.feature_size)
# Test not batched input
__lowerCAmelCase : Optional[Any] = feature_extractor(speech_inputs[0] , return_tensors="np").input_features
__lowerCAmelCase : Any = feature_extractor(np_speech_inputs[0] , return_tensors="np").input_features
self.assertTrue(np.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1e-3))
# Test batched
__lowerCAmelCase : Tuple = feature_extractor(_SCREAMING_SNAKE_CASE , return_tensors="np").input_features
__lowerCAmelCase : Tuple = feature_extractor(_SCREAMING_SNAKE_CASE , return_tensors="np").input_features
for enc_seq_a, enc_seq_a in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE):
self.assertTrue(np.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1e-3))
# Test 2-D numpy arrays are batched.
__lowerCAmelCase : List[str] = [floats_list((1, x))[0] for x in (800, 800, 800)]
__lowerCAmelCase : Dict = np.asarray(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[int] = feature_extractor(_SCREAMING_SNAKE_CASE , return_tensors="np").input_features
__lowerCAmelCase : Optional[Any] = feature_extractor(_SCREAMING_SNAKE_CASE , return_tensors="np").input_features
for enc_seq_a, enc_seq_a in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE):
self.assertTrue(np.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1e-3))
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any]) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
__lowerCAmelCase : Tuple = [floats_list((1, x))[0] for x in range(800 , 1400 , 200)]
__lowerCAmelCase : Optional[Any] = ["longest", "max_length", "do_not_pad"]
__lowerCAmelCase : str = [None, 16, None]
for max_length, padding in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE):
__lowerCAmelCase : str = feature_extractor(
_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[Any] = inputs.input_features
__lowerCAmelCase : Optional[Any] = inputs.attention_mask
__lowerCAmelCase : Tuple = [np.sum(_SCREAMING_SNAKE_CASE) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]])
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]])
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]])
def _SCREAMING_SNAKE_CASE ( self: Any) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : str = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
__lowerCAmelCase : Any = [floats_list((1, x))[0] for x in range(800 , 1400 , 200)]
__lowerCAmelCase : Dict = ["longest", "max_length", "do_not_pad"]
__lowerCAmelCase : List[Any] = [None, 16, None]
for max_length, padding in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE):
__lowerCAmelCase : List[str] = feature_extractor(
_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , return_tensors="np" , return_attention_mask=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[str] = inputs.input_features
__lowerCAmelCase : Dict = inputs.attention_mask
__lowerCAmelCase : Dict = [np.sum(_SCREAMING_SNAKE_CASE) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]])
self.assertTrue(input_features[0][fbank_feat_lengths[0] :].sum() < 1e-6)
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]])
self.assertTrue(input_features[0][fbank_feat_lengths[1] :].sum() < 1e-6)
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]])
def _SCREAMING_SNAKE_CASE ( self: str) -> Any:
"""simple docstring"""
__lowerCAmelCase : str = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
__lowerCAmelCase : str = [floats_list((1, x))[0] for x in range(800 , 1400 , 200)]
__lowerCAmelCase : str = feature_extractor(
_SCREAMING_SNAKE_CASE , padding="max_length" , max_length=4 , truncation=_SCREAMING_SNAKE_CASE , return_tensors="np" , return_attention_mask=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : List[str] = inputs.input_features
__lowerCAmelCase : Dict = inputs.attention_mask
__lowerCAmelCase : Any = np.sum(attention_mask == 1 , axis=1)
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]])
self._check_zero_mean_unit_variance(input_features[1])
self._check_zero_mean_unit_variance(input_features[2])
def _SCREAMING_SNAKE_CASE ( self: int) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
__lowerCAmelCase : List[Any] = [floats_list((1, x))[0] for x in range(800 , 1400 , 200)]
__lowerCAmelCase : Any = feature_extractor(
_SCREAMING_SNAKE_CASE , padding="longest" , max_length=4 , truncation=_SCREAMING_SNAKE_CASE , return_tensors="np" , return_attention_mask=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : List[str] = inputs.input_features
__lowerCAmelCase : Optional[Any] = inputs.attention_mask
__lowerCAmelCase : Optional[Any] = np.sum(attention_mask == 1 , axis=1)
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]])
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]])
self._check_zero_mean_unit_variance(input_features[2])
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 4, 24))
__lowerCAmelCase : List[str] = [floats_list((1, x))[0] for x in range(800 , 1400 , 200)]
__lowerCAmelCase : List[str] = feature_extractor(
_SCREAMING_SNAKE_CASE , padding="longest" , max_length=16 , truncation=_SCREAMING_SNAKE_CASE , return_tensors="np" , return_attention_mask=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Optional[Any] = inputs.input_features
__lowerCAmelCase : Optional[int] = inputs.attention_mask
__lowerCAmelCase : str = np.sum(attention_mask == 1 , axis=1)
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]])
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]])
self._check_zero_mean_unit_variance(input_features[2])
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 6, 24))
def _SCREAMING_SNAKE_CASE ( self: Any) -> Dict:
"""simple docstring"""
import torch
__lowerCAmelCase : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
__lowerCAmelCase : List[str] = np.random.rand(100 , 32).astype(np.floataa)
__lowerCAmelCase : List[Any] = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
__lowerCAmelCase : Any = feature_extractor.pad([{"input_features": inputs}] , return_tensors="np")
self.assertTrue(np_processed.input_features.dtype == np.floataa)
__lowerCAmelCase : List[Any] = feature_extractor.pad([{"input_features": inputs}] , return_tensors="pt")
self.assertTrue(pt_processed.input_features.dtype == torch.floataa)
def _SCREAMING_SNAKE_CASE ( self: str , _SCREAMING_SNAKE_CASE: List[Any]) -> Optional[Any]:
"""simple docstring"""
from datasets import load_dataset
__lowerCAmelCase : Any = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation")
# automatic decoding with librispeech
__lowerCAmelCase : List[Any] = ds.sort("id").select(range(_SCREAMING_SNAKE_CASE))[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def _SCREAMING_SNAKE_CASE ( self: List[str]) -> Any:
"""simple docstring"""
__lowerCAmelCase : str = np.array([
-1.5745, -1.7713, -1.7020, -1.6069, -1.2250, -1.1105, -0.9072, -0.8241,
-1.2310, -0.8098, -0.3320, -0.4101, -0.7985, -0.4996, -0.8213, -0.9128,
-1.0420, -1.1286, -1.0440, -0.7999, -0.8405, -1.2275, -1.5443, -1.4625,
])
# fmt: on
__lowerCAmelCase : str = self._load_datasamples(1)
__lowerCAmelCase : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
__lowerCAmelCase : List[str] = feature_extractor(_SCREAMING_SNAKE_CASE , return_tensors="pt").input_features
self.assertEquals(input_features.shape , (1, 584, 24))
self.assertTrue(np.allclose(input_features[0, 0, :30] , _SCREAMING_SNAKE_CASE , atol=1e-4))
| 615
| 1
|
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class _A :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=10 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=32 * 4 , _SCREAMING_SNAKE_CASE=32 * 6 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=32 , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = parent
SCREAMING_SNAKE_CASE_ : str = batch_size
SCREAMING_SNAKE_CASE_ : List[str] = is_training
SCREAMING_SNAKE_CASE_ : Tuple = use_auxiliary_loss
SCREAMING_SNAKE_CASE_ : Any = num_queries
SCREAMING_SNAKE_CASE_ : Optional[Any] = num_channels
SCREAMING_SNAKE_CASE_ : Dict = min_size
SCREAMING_SNAKE_CASE_ : Any = max_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = num_labels
SCREAMING_SNAKE_CASE_ : Union[str, Any] = mask_feature_size
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Tuple = torch.ones([self.batch_size, self.min_size, self.max_size] , device=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Optional[Any] = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=_SCREAMING_SNAKE_CASE ) > 0.5
).float()
SCREAMING_SNAKE_CASE_ : Any = (torch.rand((self.batch_size, self.num_labels) , device=_SCREAMING_SNAKE_CASE ) > 0.5).long()
SCREAMING_SNAKE_CASE_ : Tuple = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def UpperCAmelCase ( self ):
"""simple docstring"""
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig(
decoder_ffn_dim=128 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , )
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ : Optional[Any] = {'pixel_values': pixel_values, 'pixel_mask': pixel_mask}
return config, inputs_dict
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = output.encoder_hidden_states
SCREAMING_SNAKE_CASE_ : str = output.pixel_decoder_hidden_states
SCREAMING_SNAKE_CASE_ : int = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(_SCREAMING_SNAKE_CASE ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_SCREAMING_SNAKE_CASE ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_SCREAMING_SNAKE_CASE ) , config.decoder_config.decoder_layers )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ):
"""simple docstring"""
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : Optional[Any] = MaskFormerModel(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
SCREAMING_SNAKE_CASE_ : str = model(pixel_values=_SCREAMING_SNAKE_CASE , pixel_mask=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : int = model(_SCREAMING_SNAKE_CASE , output_hidden_states=_SCREAMING_SNAKE_CASE )
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = MaskFormerForInstanceSegmentation(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
def comm_check_on_output(_SCREAMING_SNAKE_CASE ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : List[Any] = model(pixel_values=_SCREAMING_SNAKE_CASE , pixel_mask=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : str = model(_SCREAMING_SNAKE_CASE )
comm_check_on_output(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : List[str] = model(
pixel_values=_SCREAMING_SNAKE_CASE , pixel_mask=_SCREAMING_SNAKE_CASE , mask_labels=_SCREAMING_SNAKE_CASE , class_labels=_SCREAMING_SNAKE_CASE )
comm_check_on_output(_SCREAMING_SNAKE_CASE )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class _A ( __magic_name__ , __magic_name__ , unittest.TestCase):
SCREAMING_SNAKE_CASE : Optional[int] = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
SCREAMING_SNAKE_CASE : Any = (
{'''feature-extraction''': MaskFormerModel, '''image-segmentation''': MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE : Union[str, Any] = False
SCREAMING_SNAKE_CASE : str = False
SCREAMING_SNAKE_CASE : Union[str, Any] = False
SCREAMING_SNAKE_CASE : Optional[Any] = False
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = MaskFormerModelTester(self )
SCREAMING_SNAKE_CASE_ : int = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , has_text_modality=_SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , output_hidden_states=_SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*_SCREAMING_SNAKE_CASE )
@unittest.skip(reason='MaskFormer does not use inputs_embeds' )
def UpperCAmelCase ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='MaskFormer does not have a get_input_embeddings method' )
def UpperCAmelCase ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='MaskFormer is not a generative model' )
def UpperCAmelCase ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='MaskFormer does not use token embeddings' )
def UpperCAmelCase ( self ):
"""simple docstring"""
pass
@require_torch_multi_gpu
@unittest.skip(
reason='MaskFormer has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def UpperCAmelCase ( self ):
"""simple docstring"""
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def UpperCAmelCase ( self ):
"""simple docstring"""
pass
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : Any = model_class(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE_ : int = ['pixel_values']
self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE )
@slow
def UpperCAmelCase ( self ):
"""simple docstring"""
for model_name in ["facebook/maskformer-swin-small-coco"]:
SCREAMING_SNAKE_CASE_ : str = MaskFormerModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = (self.model_tester.min_size,) * 2
SCREAMING_SNAKE_CASE_ : Any = {
'pixel_values': torch.randn((2, 3, *size) , device=_SCREAMING_SNAKE_CASE ),
'mask_labels': torch.randn((2, 10, *size) , device=_SCREAMING_SNAKE_CASE ),
'class_labels': torch.zeros(2 , 10 , device=_SCREAMING_SNAKE_CASE ).long(),
}
SCREAMING_SNAKE_CASE_ : Optional[Any] = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Tuple = model(**_SCREAMING_SNAKE_CASE )
self.assertTrue(outputs.loss is not None )
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , output_hidden_states=_SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : List[Any] = model_class(_SCREAMING_SNAKE_CASE ).to(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Tuple = model(**_SCREAMING_SNAKE_CASE , output_attentions=_SCREAMING_SNAKE_CASE )
self.assertTrue(outputs.attentions is not None )
def UpperCAmelCase ( self ):
"""simple docstring"""
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
SCREAMING_SNAKE_CASE_ : List[Any] = self.all_model_classes[1]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = self.model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ : Dict = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.train()
SCREAMING_SNAKE_CASE_ : List[Any] = model(_SCREAMING_SNAKE_CASE , mask_labels=_SCREAMING_SNAKE_CASE , class_labels=_SCREAMING_SNAKE_CASE ).loss
loss.backward()
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = self.all_model_classes[1]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ : Dict = True
SCREAMING_SNAKE_CASE_ : Tuple = True
SCREAMING_SNAKE_CASE_ : Tuple = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.train()
SCREAMING_SNAKE_CASE_ : Tuple = model(_SCREAMING_SNAKE_CASE , mask_labels=_SCREAMING_SNAKE_CASE , class_labels=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
SCREAMING_SNAKE_CASE_ : Dict = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
SCREAMING_SNAKE_CASE_ : int = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
SCREAMING_SNAKE_CASE_ : int = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
lowerCAmelCase : Optional[Any] = 1e-4
def A_ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_vision
@slow
class _A ( unittest.TestCase):
@cached_property
def UpperCAmelCase ( self ):
"""simple docstring"""
return (
MaskFormerImageProcessor.from_pretrained('facebook/maskformer-swin-small-coco' )
if is_vision_available()
else None
)
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = MaskFormerModel.from_pretrained('facebook/maskformer-swin-small-coco' ).to(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Any = self.default_image_processor
SCREAMING_SNAKE_CASE_ : str = prepare_img()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = image_processor(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).to(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : List[Any] = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_SCREAMING_SNAKE_CASE , (1, 3, 800, 1088) )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : Optional[Any] = model(**_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : int = torch.tensor(
[[-0.0482, 0.9228, 0.4951], [-0.2547, 0.8017, 0.8527], [-0.0069, 0.3385, -0.0089]] ).to(_SCREAMING_SNAKE_CASE )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=_SCREAMING_SNAKE_CASE ) )
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.tensor(
[[-0.8422, -0.8434, -0.9718], [-1.0144, -0.5565, -0.4195], [-1.0038, -0.4484, -0.1961]] ).to(_SCREAMING_SNAKE_CASE )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=_SCREAMING_SNAKE_CASE ) )
SCREAMING_SNAKE_CASE_ : List[str] = torch.tensor(
[[0.2852, -0.0159, 0.9735], [0.6254, 0.1858, 0.8529], [-0.0680, -0.4116, 1.8413]] ).to(_SCREAMING_SNAKE_CASE )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=_SCREAMING_SNAKE_CASE ) )
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = (
MaskFormerForInstanceSegmentation.from_pretrained('facebook/maskformer-swin-small-coco' )
.to(_SCREAMING_SNAKE_CASE )
.eval()
)
SCREAMING_SNAKE_CASE_ : Dict = self.default_image_processor
SCREAMING_SNAKE_CASE_ : int = prepare_img()
SCREAMING_SNAKE_CASE_ : Dict = image_processor(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).to(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Any = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_SCREAMING_SNAKE_CASE , (1, 3, 800, 1088) )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : List[Any] = model(**_SCREAMING_SNAKE_CASE )
# masks_queries_logits
SCREAMING_SNAKE_CASE_ : List[Any] = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
SCREAMING_SNAKE_CASE_ : List[str] = [
[-1.3737124, -1.7724937, -1.9364233],
[-1.5977281, -1.9867939, -2.1523695],
[-1.5795398, -1.9269832, -2.093942],
]
SCREAMING_SNAKE_CASE_ : List[Any] = torch.tensor(_SCREAMING_SNAKE_CASE ).to(_SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=_SCREAMING_SNAKE_CASE ) )
# class_queries_logits
SCREAMING_SNAKE_CASE_ : Dict = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.tensor(
[
[1.6_512e00, -5.2_572e00, -3.3_519e00],
[3.6_169e-02, -5.9_025e00, -2.9_313e00],
[1.0_766e-04, -7.7_630e00, -5.1_263e00],
] ).to(_SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=_SCREAMING_SNAKE_CASE ) )
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = (
MaskFormerForInstanceSegmentation.from_pretrained('facebook/maskformer-resnet101-coco-stuff' )
.to(_SCREAMING_SNAKE_CASE )
.eval()
)
SCREAMING_SNAKE_CASE_ : List[Any] = self.default_image_processor
SCREAMING_SNAKE_CASE_ : str = prepare_img()
SCREAMING_SNAKE_CASE_ : List[str] = image_processor(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).to(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_SCREAMING_SNAKE_CASE , (1, 3, 800, 1088) )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : str = model(**_SCREAMING_SNAKE_CASE )
# masks_queries_logits
SCREAMING_SNAKE_CASE_ : str = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
SCREAMING_SNAKE_CASE_ : str = [[-0.9046, -2.6366, -4.6062], [-3.4179, -5.7890, -8.8057], [-4.9179, -7.6560, -10.7711]]
SCREAMING_SNAKE_CASE_ : Dict = torch.tensor(_SCREAMING_SNAKE_CASE ).to(_SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=_SCREAMING_SNAKE_CASE ) )
# class_queries_logits
SCREAMING_SNAKE_CASE_ : Union[str, Any] = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
SCREAMING_SNAKE_CASE_ : List[str] = torch.tensor(
[[4.7188, -3.2585, -2.8857], [6.6871, -2.9181, -1.2487], [7.2449, -2.2764, -2.1874]] ).to(_SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=_SCREAMING_SNAKE_CASE ) )
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = (
MaskFormerForInstanceSegmentation.from_pretrained('facebook/maskformer-swin-small-coco' )
.to(_SCREAMING_SNAKE_CASE )
.eval()
)
SCREAMING_SNAKE_CASE_ : Tuple = self.default_image_processor
SCREAMING_SNAKE_CASE_ : int = image_processor(
[np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors='pt' , )
SCREAMING_SNAKE_CASE_ : List[Any] = inputs['pixel_values'].to(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : List[Any] = [el.to(_SCREAMING_SNAKE_CASE ) for el in inputs['mask_labels']]
SCREAMING_SNAKE_CASE_ : Optional[int] = [el.to(_SCREAMING_SNAKE_CASE ) for el in inputs['class_labels']]
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : Optional[Any] = model(**_SCREAMING_SNAKE_CASE )
self.assertTrue(outputs.loss is not None )
| 511
|
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def A_ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ArgumentParser(
description=(
'PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes'
) )
# Optional arguments for the launch helper
parser.add_argument('--num_cores' , type=a , default=1 , help='Number of TPU cores to use (1 or 8).' )
# positional
parser.add_argument(
'training_script' , type=a , help=(
'The full path to the single TPU training '
'program/script to be launched in parallel, '
'followed by all the arguments for the '
'training script'
) , )
# rest from the training program
parser.add_argument('training_script_args' , nargs=a )
return parser.parse_args()
def A_ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = parse_args()
# Import training_script as a module.
SCREAMING_SNAKE_CASE_ : Dict = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
SCREAMING_SNAKE_CASE_ : Optional[Any] = script_fpath.stem
SCREAMING_SNAKE_CASE_ : int = importlib.import_module(a )
# Patch sys.argv
SCREAMING_SNAKE_CASE_ : int = [args.training_script] + args.training_script_args + ['--tpu_num_cores', str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 511
| 1
|
"""simple docstring"""
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class _UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def _UpperCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ : Dict = 10
def _UpperCAmelCase ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
__magic_name__ : Optional[int] = [1, 2, 3, 4]
__magic_name__ : Optional[Any] = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(snake_case , self.block_size , 0 ) , snake_case )
def _UpperCAmelCase ( self : str ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ : Optional[int] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
__magic_name__ : str = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(snake_case , self.block_size , 0 ) , snake_case )
def _UpperCAmelCase ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
__magic_name__ : Tuple = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
__magic_name__ : int = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(snake_case , self.block_size , 0 ) , snake_case )
def _UpperCAmelCase ( self : int ) -> List[str]:
'''simple docstring'''
__magic_name__ : List[str] = '''It was the year of Our Lord one thousand seven hundred and
seventy-five.\n\nSpiritual revelations were conceded to England at that
favoured period, as at this.'''
__magic_name__ , __magic_name__ : Optional[Any] = process_story(snake_case )
self.assertEqual(snake_case , [] )
def _UpperCAmelCase ( self : Dict ) -> str:
'''simple docstring'''
__magic_name__ : List[str] = ''''''
__magic_name__ , __magic_name__ : Optional[int] = process_story(snake_case )
self.assertEqual(snake_case , [] )
self.assertEqual(snake_case , [] )
def _UpperCAmelCase ( self : List[Any] ) -> str:
'''simple docstring'''
__magic_name__ : Optional[Any] = (
'''It was the year of Our Lord one thousand seven hundred and '''
'''seventy-five\n\nSpiritual revelations were conceded to England '''
'''at that favoured period, as at this.\n@highlight\n\nIt was the best of times'''
)
__magic_name__ , __magic_name__ : Union[str, Any] = process_story(snake_case )
__magic_name__ : int = [
'''It was the year of Our Lord one thousand seven hundred and seventy-five.''',
'''Spiritual revelations were conceded to England at that favoured period, as at this.''',
]
self.assertEqual(snake_case , snake_case )
__magic_name__ : Tuple = ['''It was the best of times.''']
self.assertEqual(snake_case , snake_case )
def _UpperCAmelCase ( self : List[Any] ) -> Tuple:
'''simple docstring'''
__magic_name__ : Optional[int] = torch.tensor([1, 2, 3, 4] )
__magic_name__ : Dict = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(snake_case , 0 ).numpy() , expected.numpy() )
def _UpperCAmelCase ( self : Any ) -> Dict:
'''simple docstring'''
__magic_name__ : Any = torch.tensor([1, 2, 3, 4, 23, 23, 23] )
__magic_name__ : Optional[int] = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(snake_case , 23 ).numpy() , expected.numpy() )
def _UpperCAmelCase ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
__magic_name__ : List[Any] = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
__magic_name__ : Dict = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(snake_case , 1 ).numpy() , expected.numpy() )
def _UpperCAmelCase ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
__magic_name__ : List[str] = 101
__magic_name__ : Union[str, Any] = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]] )
__magic_name__ : Optional[int] = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
__magic_name__ : List[str] = compute_token_type_ids(snake_case , snake_case )
np.testing.assert_array_equal(snake_case , snake_case )
| 147
|
"""simple docstring"""
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def UpperCamelCase_ ( lowerCamelCase : int ) -> bool:
"""simple docstring"""
__magic_name__ : int = int(number**0.5 )
return number == sq * sq
def UpperCamelCase_ ( lowerCamelCase : int , lowerCamelCase : int , lowerCamelCase : int , lowerCamelCase : int , lowerCamelCase : int , lowerCamelCase : int ) -> tuple[int, int]:
"""simple docstring"""
__magic_name__ : int = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
__magic_name__ : int = x_den * y_den * z_den
__magic_name__ : int = gcd(lowerCamelCase , lowerCamelCase )
top //= hcf
bottom //= hcf
return top, bottom
def UpperCamelCase_ ( lowerCamelCase : int = 35 ) -> int:
"""simple docstring"""
__magic_name__ : set = set()
__magic_name__ : int
__magic_name__ : Fraction = Fraction(0 )
__magic_name__ : tuple[int, int]
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
__magic_name__ : Dict = x_num * y_den + x_den * y_num
__magic_name__ : str = x_den * y_den
__magic_name__ : Union[str, Any] = gcd(lowerCamelCase , lowerCamelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
__magic_name__ : Dict = add_three(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
unique_s.add(lowerCamelCase )
# n=2
__magic_name__ : Any = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
__magic_name__ : int = x_den * x_den * y_den * y_den
if is_sq(lowerCamelCase ) and is_sq(lowerCamelCase ):
__magic_name__ : str = int(sqrt(lowerCamelCase ) )
__magic_name__ : Dict = int(sqrt(lowerCamelCase ) )
__magic_name__ : Optional[Any] = gcd(lowerCamelCase , lowerCamelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
__magic_name__ : Dict = add_three(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
unique_s.add(lowerCamelCase )
# n=-1
__magic_name__ : Optional[int] = x_num * y_num
__magic_name__ : str = x_den * y_num + x_num * y_den
__magic_name__ : Dict = gcd(lowerCamelCase , lowerCamelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
__magic_name__ : List[Any] = add_three(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
unique_s.add(lowerCamelCase )
# n=2
__magic_name__ : Any = x_num * x_num * y_num * y_num
__magic_name__ : List[str] = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(lowerCamelCase ) and is_sq(lowerCamelCase ):
__magic_name__ : Optional[Any] = int(sqrt(lowerCamelCase ) )
__magic_name__ : Dict = int(sqrt(lowerCamelCase ) )
__magic_name__ : Any = gcd(lowerCamelCase , lowerCamelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
__magic_name__ : Any = add_three(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
unique_s.add(lowerCamelCase )
for num, den in unique_s:
total += Fraction(lowerCamelCase , lowerCamelCase )
return total.denominator + total.numerator
if __name__ == "__main__":
print(F"""{solution() = }""")
| 147
| 1
|
"""simple docstring"""
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def lowercase__ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_UpperCAmelCase , "tf_padding" ) )
self.parent.assertTrue(hasattr(_UpperCAmelCase , "depth_multiplier" ) )
class lowercase__ :
'''simple docstring'''
def __init__( self : Optional[int] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : str=13 , _UpperCAmelCase : List[str]=3 , _UpperCAmelCase : List[Any]=32 , _UpperCAmelCase : Any=0.25 , _UpperCAmelCase : Optional[int]=8 , _UpperCAmelCase : Union[str, Any]=8 , _UpperCAmelCase : Tuple=6 , _UpperCAmelCase : Dict=32 , _UpperCAmelCase : Dict=True , _UpperCAmelCase : Optional[Any]=True , _UpperCAmelCase : str=True , _UpperCAmelCase : int="relu6" , _UpperCAmelCase : Optional[int]=1280 , _UpperCAmelCase : str=0.1 , _UpperCAmelCase : List[str]=0.02 , _UpperCAmelCase : Optional[int]=True , _UpperCAmelCase : str=True , _UpperCAmelCase : Optional[int]=10 , _UpperCAmelCase : Optional[Any]=None , ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = image_size
UpperCAmelCase_ = depth_multiplier
UpperCAmelCase_ = depth_divisible_by
UpperCAmelCase_ = min_depth
UpperCAmelCase_ = expand_ratio
UpperCAmelCase_ = tf_padding
UpperCAmelCase_ = output_stride
UpperCAmelCase_ = first_layer_is_expansion
UpperCAmelCase_ = finegrained_output
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = last_hidden_size if finegrained_output else int(last_hidden_size * depth_multiplier )
UpperCAmelCase_ = classifier_dropout_prob
UpperCAmelCase_ = use_labels
UpperCAmelCase_ = is_training
UpperCAmelCase_ = num_labels
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = scope
def lowercase__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
UpperCAmelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ = None
UpperCAmelCase_ = None
if self.use_labels:
UpperCAmelCase_ = ids_tensor([self.batch_size] , self.num_labels )
UpperCAmelCase_ = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
UpperCAmelCase_ = self.get_config()
return config, pixel_values, labels, pixel_labels
def lowercase__ ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , depth_divisible_by=self.depth_divisible_by , min_depth=self.min_depth , expand_ratio=self.expand_ratio , output_stride=self.output_stride , first_layer_is_expansion=self.first_layer_is_expansion , finegrained_output=self.finegrained_output , hidden_act=self.hidden_act , tf_padding=self.tf_padding , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def lowercase__ ( self : int , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : Tuple , _UpperCAmelCase : str ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = MobileNetVaModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCAmelCase_ = model(_UpperCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
self.parent.assertEqual(
result.pooler_output.shape , (self.batch_size, self.last_hidden_size) , )
def lowercase__ ( self : Dict , _UpperCAmelCase : Dict , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : int ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = self.num_labels
UpperCAmelCase_ = MobileNetVaForImageClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCAmelCase_ = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase__ ( self : Dict , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : int , _UpperCAmelCase : Dict , _UpperCAmelCase : int ) -> str:
'''simple docstring'''
UpperCAmelCase_ = self.num_labels
UpperCAmelCase_ = MobileNetVaForSemanticSegmentation(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCAmelCase_ = model(_UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
UpperCAmelCase_ = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowercase__ ( self : Dict ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = config_and_inputs
UpperCAmelCase_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowercase__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = (
(MobileNetVaModel, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation)
if is_torch_available()
else ()
)
UpperCamelCase = (
{
'''feature-extraction''': MobileNetVaModel,
'''image-classification''': MobileNetVaForImageClassification,
'''image-segmentation''': MobileNetVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def lowercase__ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = MobileNetVaModelTester(self )
UpperCAmelCase_ = MobileNetVaConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase )
def lowercase__ ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="MobileNetV2 does not use inputs_embeds" )
def lowercase__ ( self : Dict ) -> List[str]:
'''simple docstring'''
pass
@unittest.skip(reason="MobileNetV2 does not support input and output embeddings" )
def lowercase__ ( self : str ) -> int:
'''simple docstring'''
pass
@unittest.skip(reason="MobileNetV2 does not output attentions" )
def lowercase__ ( self : List[Any] ) -> List[str]:
'''simple docstring'''
pass
def lowercase__ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(_UpperCAmelCase )
UpperCAmelCase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ = [*signature.parameters.keys()]
UpperCAmelCase_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , _UpperCAmelCase )
def lowercase__ ( self : Dict ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def lowercase__ ( self : Any ) -> List[str]:
'''simple docstring'''
def check_hidden_states_output(_UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : int ):
UpperCAmelCase_ = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
UpperCAmelCase_ = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
UpperCAmelCase_ = outputs.hidden_states
UpperCAmelCase_ = 16
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_ = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def lowercase__ ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase )
def lowercase__ ( self : Optional[int] ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_UpperCAmelCase )
@slow
def lowercase__ ( self : List[Any] ) -> int:
'''simple docstring'''
for model_name in MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ = MobileNetVaModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def a__ ( ):
UpperCAmelCase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowercase__ ( self : Any ) -> Dict:
'''simple docstring'''
return (
MobileNetVaImageProcessor.from_pretrained("google/mobilenet_v2_1.0_224" ) if is_vision_available() else None
)
@slow
def lowercase__ ( self : Dict ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = MobileNetVaForImageClassification.from_pretrained("google/mobilenet_v2_1.0_224" ).to(_UpperCAmelCase )
UpperCAmelCase_ = self.default_image_processor
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = image_processor(images=_UpperCAmelCase , return_tensors="pt" ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
UpperCAmelCase_ = model(**_UpperCAmelCase )
# verify the logits
UpperCAmelCase_ = torch.Size((1, 1001) )
self.assertEqual(outputs.logits.shape , _UpperCAmelCase )
UpperCAmelCase_ = torch.tensor([0.2445, -1.1993, 0.1905] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCAmelCase , atol=1e-4 ) )
@slow
def lowercase__ ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = MobileNetVaForSemanticSegmentation.from_pretrained("google/deeplabv3_mobilenet_v2_1.0_513" )
UpperCAmelCase_ = model.to(_UpperCAmelCase )
UpperCAmelCase_ = MobileNetVaImageProcessor.from_pretrained("google/deeplabv3_mobilenet_v2_1.0_513" )
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = image_processor(images=_UpperCAmelCase , return_tensors="pt" ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
UpperCAmelCase_ = model(**_UpperCAmelCase )
UpperCAmelCase_ = outputs.logits
# verify the logits
UpperCAmelCase_ = torch.Size((1, 21, 65, 65) )
self.assertEqual(logits.shape , _UpperCAmelCase )
UpperCAmelCase_ = torch.tensor(
[
[[17.5790, 17.7581, 18.3355], [18.3257, 18.4230, 18.8973], [18.6169, 18.8650, 19.2187]],
[[-2.1595, -2.0977, -2.3741], [-2.4226, -2.3028, -2.6835], [-2.7819, -2.5991, -2.7706]],
[[4.2058, 4.8317, 4.7638], [4.4136, 5.0361, 4.9383], [4.5028, 4.9644, 4.8734]],
] , device=_UpperCAmelCase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , _UpperCAmelCase , atol=1e-4 ) )
| 82
|
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTForImageClassification, TFViTModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowercase__ :
'''simple docstring'''
def __init__( self : List[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[Any]=13 , _UpperCAmelCase : List[str]=30 , _UpperCAmelCase : int=2 , _UpperCAmelCase : Optional[Any]=3 , _UpperCAmelCase : str=True , _UpperCAmelCase : int=True , _UpperCAmelCase : Dict=32 , _UpperCAmelCase : List[Any]=2 , _UpperCAmelCase : Union[str, Any]=4 , _UpperCAmelCase : Any=37 , _UpperCAmelCase : Dict="gelu" , _UpperCAmelCase : Union[str, Any]=0.1 , _UpperCAmelCase : Optional[int]=0.1 , _UpperCAmelCase : List[Any]=10 , _UpperCAmelCase : Union[str, Any]=0.02 , _UpperCAmelCase : List[str]=3 , _UpperCAmelCase : Dict=None , ) -> str:
'''simple docstring'''
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = image_size
UpperCAmelCase_ = patch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = is_training
UpperCAmelCase_ = use_labels
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = type_sequence_label_size
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = scope
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCAmelCase_ = (image_size // patch_size) ** 2
UpperCAmelCase_ = num_patches + 1
def lowercase__ ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ = None
if self.use_labels:
UpperCAmelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ = self.get_config()
return config, pixel_values, labels
def lowercase__ ( self : int ) -> Dict:
'''simple docstring'''
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , )
def lowercase__ ( self : Optional[int] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Any , _UpperCAmelCase : Optional[int] ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = TFViTModel(config=_UpperCAmelCase )
UpperCAmelCase_ = model(_UpperCAmelCase , training=_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# Test with an image with different size than the one specified in config.
UpperCAmelCase_ = self.image_size // 2
UpperCAmelCase_ = pixel_values[:, :, :image_size, :image_size]
UpperCAmelCase_ = model(_UpperCAmelCase , interpolate_pos_encoding=_UpperCAmelCase , training=_UpperCAmelCase )
UpperCAmelCase_ = (image_size // self.patch_size) ** 2 + 1
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, seq_length, self.hidden_size) )
def lowercase__ ( self : Any , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Any , _UpperCAmelCase : str ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = self.type_sequence_label_size
UpperCAmelCase_ = TFViTForImageClassification(_UpperCAmelCase )
UpperCAmelCase_ = model(_UpperCAmelCase , labels=_UpperCAmelCase , training=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# Test with an image with different size than the one specified in config.
UpperCAmelCase_ = self.image_size // 2
UpperCAmelCase_ = pixel_values[:, :, :image_size, :image_size]
UpperCAmelCase_ = model(_UpperCAmelCase , interpolate_pos_encoding=_UpperCAmelCase , training=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCAmelCase_ = 1
UpperCAmelCase_ = TFViTForImageClassification(_UpperCAmelCase )
UpperCAmelCase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase_ = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowercase__ ( self : Any ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = config_and_inputs
UpperCAmelCase_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class lowercase__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = (TFViTModel, TFViTForImageClassification) if is_tf_available() else ()
UpperCamelCase = (
{'''feature-extraction''': TFViTModel, '''image-classification''': TFViTForImageClassification}
if is_tf_available()
else {}
)
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def lowercase__ ( self : Any ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = TFViTModelTester(self )
UpperCAmelCase_ = ConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase , hidden_size=37 )
def lowercase__ ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="ViT does not use inputs_embeds" )
def lowercase__ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
pass
@unittest.skip(reason="ViT does not use inputs_embeds" )
def lowercase__ ( self : List[str] ) -> List[Any]:
'''simple docstring'''
pass
def lowercase__ ( self : str ) -> str:
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(_UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
UpperCAmelCase_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_UpperCAmelCase , tf.keras.layers.Layer ) )
def lowercase__ ( self : Tuple ) -> str:
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(_UpperCAmelCase )
UpperCAmelCase_ = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ = [*signature.parameters.keys()]
UpperCAmelCase_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , _UpperCAmelCase )
def lowercase__ ( self : List[Any] ) -> str:
'''simple docstring'''
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def lowercase__ ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase )
@slow
def lowercase__ ( self : int ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = TFViTModel.from_pretrained("google/vit-base-patch16-224" )
self.assertIsNotNone(_UpperCAmelCase )
def a__ ( ):
UpperCAmelCase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowercase__ ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
return ViTImageProcessor.from_pretrained("google/vit-base-patch16-224" ) if is_vision_available() else None
@slow
def lowercase__ ( self : int ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = TFViTForImageClassification.from_pretrained("google/vit-base-patch16-224" )
UpperCAmelCase_ = self.default_image_processor
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = image_processor(images=_UpperCAmelCase , return_tensors="tf" )
# forward pass
UpperCAmelCase_ = model(**_UpperCAmelCase )
# verify the logits
UpperCAmelCase_ = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , _UpperCAmelCase )
UpperCAmelCase_ = tf.constant([-0.2744, 0.8215, -0.0836] )
tf.debugging.assert_near(outputs.logits[0, :3] , _UpperCAmelCase , atol=1e-4 )
| 82
| 1
|
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowercase ( __snake_case , unittest.TestCase ):
lowercase_ : str =LEDTokenizer
lowercase_ : Union[str, Any] =LEDTokenizerFast
lowercase_ : Tuple =True
def A__ ( self):
super().setUp()
lowercase = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
lowercase = dict(zip(A_ ,range(len(A_))))
lowercase = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
lowercase = {"unk_token": "<unk>"}
lowercase = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['''vocab_file'''])
lowercase = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['''merges_file'''])
with open(self.vocab_file ,'''w''' ,encoding='''utf-8''') as fp:
fp.write(json.dumps(A_) + '''\n''')
with open(self.merges_file ,'''w''' ,encoding='''utf-8''') as fp:
fp.write('''\n'''.join(A_))
def A__ ( self ,**A__):
kwargs.update(self.special_tokens_map)
return self.tokenizer_class.from_pretrained(self.tmpdirname ,**A_)
def A__ ( self ,**A__):
kwargs.update(self.special_tokens_map)
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname ,**A_)
def A__ ( self ,A__):
return "lower newer", "lower newer"
@cached_property
def A__ ( self):
return LEDTokenizer.from_pretrained('''allenai/led-base-16384''')
@cached_property
def A__ ( self):
return LEDTokenizerFast.from_pretrained('''allenai/led-base-16384''')
@require_torch
def A__ ( self):
lowercase = ["A long paragraph for summarization.", "Another paragraph for summarization."]
lowercase = [0, 2_5_0, 2_5_1, 1_7_8_1_8, 1_3, 3_9_1_8_6, 1_9_3_8, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase = tokenizer(A_ ,max_length=len(A_) ,padding=A_ ,return_tensors='''pt''')
self.assertIsInstance(A_ ,A_)
self.assertEqual((2, 9) ,batch.input_ids.shape)
self.assertEqual((2, 9) ,batch.attention_mask.shape)
lowercase = batch.input_ids.tolist()[0]
self.assertListEqual(A_ ,A_)
@require_torch
def A__ ( self):
lowercase = ["A long paragraph for summarization.", "Another paragraph for summarization."]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase = tokenizer(A_ ,padding=A_ ,return_tensors='''pt''')
self.assertIn('''input_ids''' ,A_)
self.assertIn('''attention_mask''' ,A_)
self.assertNotIn('''labels''' ,A_)
self.assertNotIn('''decoder_attention_mask''' ,A_)
@require_torch
def A__ ( self):
lowercase = [
"Summary of the text.",
"Another summary.",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase = tokenizer(text_target=A_ ,max_length=3_2 ,padding='''max_length''' ,return_tensors='''pt''')
self.assertEqual(3_2 ,targets['''input_ids'''].shape[1])
@require_torch
def A__ ( self):
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase = tokenizer(
['''I am a small frog''' * 1_0_2_4, '''I am a small frog'''] ,padding=A_ ,truncation=A_ ,return_tensors='''pt''')
self.assertIsInstance(A_ ,A_)
self.assertEqual(batch.input_ids.shape ,(2, 5_1_2_2))
@require_torch
def A__ ( self):
lowercase = ["A long paragraph for summarization."]
lowercase = [
"Summary of the text.",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase = tokenizer(A_ ,return_tensors='''pt''')
lowercase = tokenizer(text_target=A_ ,return_tensors='''pt''')
lowercase = inputs["input_ids"]
lowercase = targets["input_ids"]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item())
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item())
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item())
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item())
@require_torch
def A__ ( self):
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase = ["Summary of the text.", "Another summary."]
lowercase = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
lowercase = tokenizer(A_ ,padding=A_)
lowercase = [[0] * len(A_) for x in encoded_output["input_ids"]]
lowercase = tokenizer.pad(A_)
self.assertSequenceEqual(outputs['''global_attention_mask'''] ,A_)
def A__ ( self):
pass
def A__ ( self):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})'):
lowercase = self.rust_tokenizer_class.from_pretrained(A_ ,**A_)
lowercase = self.tokenizer_class.from_pretrained(A_ ,**A_)
lowercase = "A, <mask> AllenNLP sentence."
lowercase = tokenizer_r.encode_plus(A_ ,add_special_tokens=A_ ,return_token_type_ids=A_)
lowercase = tokenizer_p.encode_plus(A_ ,add_special_tokens=A_ ,return_token_type_ids=A_)
self.assertEqual(sum(tokens_r['''token_type_ids''']) ,sum(tokens_p['''token_type_ids''']))
self.assertEqual(
sum(tokens_r['''attention_mask''']) / len(tokens_r['''attention_mask''']) ,sum(tokens_p['''attention_mask''']) / len(tokens_p['''attention_mask''']) ,)
lowercase = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''])
lowercase = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''])
self.assertSequenceEqual(tokens_p['''input_ids'''] ,[0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2])
self.assertSequenceEqual(tokens_r['''input_ids'''] ,[0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2])
self.assertSequenceEqual(
A_ ,['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''])
self.assertSequenceEqual(
A_ ,['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''])
| 716
|
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
lowercase__ :Any = "Run commands across TPU VMs for initial setup before running `accelerate launch`."
def UpperCamelCase ( lowerCAmelCase__=None ):
'''simple docstring'''
if subparsers is not None:
lowercase = subparsers.add_parser('''tpu-config''' , description=_description )
else:
lowercase = argparse.ArgumentParser('''Accelerate tpu-config command''' , description=_description )
# Core arguments
lowercase = parser.add_argument_group(
'''Config Arguments''' , '''Arguments that can be configured through `accelerate config`.''' )
config_args.add_argument(
'''--config_file''' , type=lowerCAmelCase__ , default=lowerCAmelCase__ , help='''Path to the config file to use for accelerate.''' , )
config_args.add_argument(
'''--tpu_name''' , default=lowerCAmelCase__ , help='''The name of the TPU to use. If not specified, will use the TPU specified in the config file.''' , )
config_args.add_argument(
'''--tpu_zone''' , default=lowerCAmelCase__ , help='''The zone of the TPU to use. If not specified, will use the zone specified in the config file.''' , )
lowercase = parser.add_argument_group('''TPU Arguments''' , '''Arguments for options ran inside the TPU.''' )
pod_args.add_argument(
'''--use_alpha''' , action='''store_true''' , help='''Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.''' , )
pod_args.add_argument(
'''--command_file''' , default=lowerCAmelCase__ , help='''The path to the file containing the commands to run on the pod on startup.''' , )
pod_args.add_argument(
'''--command''' , action='''append''' , nargs='''+''' , help='''A command to run on the pod. Can be passed multiple times.''' , )
pod_args.add_argument(
'''--install_accelerate''' , action='''store_true''' , help='''Whether to install accelerate on the pod. Defaults to False.''' , )
pod_args.add_argument(
'''--accelerate_version''' , default='''latest''' , help='''The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify \'dev\' to install from GitHub.''' , )
pod_args.add_argument(
'''--debug''' , action='''store_true''' , help='''If set, will print the command that would be run instead of running it.''' )
if subparsers is not None:
parser.set_defaults(func=lowerCAmelCase__ )
return parser
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(lowerCAmelCase__ ):
lowercase = load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
lowercase = defaults.command_file
if not args.command and defaults.commands is not None:
lowercase = defaults.commands
if not args.tpu_name:
lowercase = defaults.tpu_name
if not args.tpu_zone:
lowercase = defaults.tpu_zone
if args.accelerate_version == "dev":
lowercase = '''git+https://github.com/huggingface/accelerate.git'''
elif args.accelerate_version == "latest":
lowercase = '''accelerate -U'''
elif isinstance(parse(args.accelerate_version ) , lowerCAmelCase__ ):
lowercase = f'accelerate=={args.accelerate_version}'
if not args.command_file and not args.command:
raise ValueError('''You must specify either a command file or a command to run on the pod.''' )
if args.command_file:
with open(args.command_file , '''r''' ) as f:
lowercase = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , lowerCAmelCase__ ):
lowercase = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
lowercase = ['''cd /usr/share''']
if args.install_accelerate:
new_cmd += [f'pip install {args.accelerate_version}']
new_cmd += args.command
lowercase = '''; '''.join(lowerCAmelCase__ )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
lowercase = ['''gcloud''']
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(f'Running {" ".join(lowerCAmelCase__ )}' )
return
subprocess.run(lowerCAmelCase__ )
print('''Successfully setup pod.''' )
def UpperCamelCase ( ):
'''simple docstring'''
lowercase = tpu_command_parser()
lowercase = parser.parse_args()
tpu_command_launcher(lowerCAmelCase__ )
| 633
| 0
|
"""simple docstring"""
from transformers import BertTokenizerFast
from .custom_tokenization import CustomTokenizer
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : str = CustomTokenizer
pass
| 480
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__SCREAMING_SNAKE_CASE : Optional[int] = {
'configuration_vivit': ['VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'VivitConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : List[Any] = ['VivitImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = [
'VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'VivitModel',
'VivitPreTrainedModel',
'VivitForVideoClassification',
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 348
| 0
|
import unittest
from datasets import load_dataset
from transformers import BloomTokenizerFast
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _lowerCAmelCase ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
_lowercase : Dict = None
_lowercase : int = BloomTokenizerFast
_lowercase : Any = BloomTokenizerFast
_lowercase : Dict = True
_lowercase : Union[str, Any] = False
_lowercase : Optional[Any] = '''tokenizer_file'''
_lowercase : Any = {'''bos_token''': '''<s>''', '''eos_token''': '''</s>''', '''unk_token''': '''<unk>''', '''pad_token''': '''<pad>'''}
def __magic_name__ ( self : Tuple):
'''simple docstring'''
super().setUp()
snake_case__ = BloomTokenizerFast.from_pretrained("""bigscience/tokenizer""")
tokenizer.save_pretrained(self.tmpdirname)
def __magic_name__ ( self : str , **UpperCamelCase__ : Dict):
'''simple docstring'''
kwargs.update(self.special_tokens_map)
return BloomTokenizerFast.from_pretrained(self.tmpdirname , **UpperCamelCase__)
def __magic_name__ ( self : int):
'''simple docstring'''
snake_case__ = self.get_rust_tokenizer()
snake_case__ = ["The quick brown fox</s>", "jumps over the lazy dog</s>"]
snake_case__ = [[2_1_7_5, 2_3_7_1_4, 7_3_1_7_3, 1_4_4_2_5_2, 2], [7_7, 1_3_2_6_1_9, 3_4_7_8, 3_6_8, 1_0_9_5_8_6, 3_5_4_3_3, 2]]
snake_case__ = tokenizer.batch_encode_plus(UpperCamelCase__)["input_ids"]
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__)
snake_case__ = tokenizer.batch_decode(UpperCamelCase__)
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__)
def __magic_name__ ( self : Dict , UpperCamelCase__ : List[str]=6):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})'''):
snake_case__ = self.rust_tokenizer_class.from_pretrained(UpperCamelCase__ , **UpperCamelCase__)
# tokenizer_r.pad_token = None # Hotfixing padding = None
# Simple input
snake_case__ = "This is a simple input"
snake_case__ = ["This is a simple input 1", "This is a simple input 2"]
snake_case__ = ("This is a simple input", "This is a pair")
snake_case__ = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
try:
tokenizer_r.encode(UpperCamelCase__ , max_length=UpperCamelCase__)
tokenizer_r.encode_plus(UpperCamelCase__ , max_length=UpperCamelCase__)
tokenizer_r.batch_encode_plus(UpperCamelCase__ , max_length=UpperCamelCase__)
tokenizer_r.encode(UpperCamelCase__ , max_length=UpperCamelCase__)
tokenizer_r.batch_encode_plus(UpperCamelCase__ , max_length=UpperCamelCase__)
except ValueError:
self.fail("""Bloom Tokenizer should be able to deal with padding""")
snake_case__ = None # Hotfixing padding = None
self.assertRaises(UpperCamelCase__ , tokenizer_r.encode , UpperCamelCase__ , max_length=UpperCamelCase__ , padding="""max_length""")
# Simple input
self.assertRaises(UpperCamelCase__ , tokenizer_r.encode_plus , UpperCamelCase__ , max_length=UpperCamelCase__ , padding="""max_length""")
# Simple input
self.assertRaises(
UpperCamelCase__ , tokenizer_r.batch_encode_plus , UpperCamelCase__ , max_length=UpperCamelCase__ , padding="""max_length""" , )
# Pair input
self.assertRaises(UpperCamelCase__ , tokenizer_r.encode , UpperCamelCase__ , max_length=UpperCamelCase__ , padding="""max_length""")
# Pair input
self.assertRaises(UpperCamelCase__ , tokenizer_r.encode_plus , UpperCamelCase__ , max_length=UpperCamelCase__ , padding="""max_length""")
# Pair input
self.assertRaises(
UpperCamelCase__ , tokenizer_r.batch_encode_plus , UpperCamelCase__ , max_length=UpperCamelCase__ , padding="""max_length""" , )
def __magic_name__ ( self : Any):
'''simple docstring'''
snake_case__ = self.get_rust_tokenizer()
snake_case__ = load_dataset("""xnli""" , """all_languages""" , split="""test""" , streaming=UpperCamelCase__)
snake_case__ = next(iter(UpperCamelCase__))["premise"] # pick up one data
snake_case__ = list(sample_data.values())
snake_case__ = list(map(tokenizer.encode , UpperCamelCase__))
snake_case__ = [tokenizer.decode(UpperCamelCase__ , clean_up_tokenization_spaces=UpperCamelCase__) for x in output_tokens]
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__)
def __magic_name__ ( self : Union[str, Any]):
'''simple docstring'''
self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map) , 1)
self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values())[0]) , 1)
| 704
|
from __future__ import annotations
import time
a__ = list[tuple[int, int]]
a__ = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
a__ = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : Dict , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : Node | None):
'''simple docstring'''
snake_case__ = pos_x
snake_case__ = pos_y
snake_case__ = (pos_y, pos_x)
snake_case__ = goal_x
snake_case__ = goal_y
snake_case__ = parent
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : Optional[int] , UpperCamelCase__ : tuple[int, int] , UpperCamelCase__ : tuple[int, int]):
'''simple docstring'''
snake_case__ = Node(start[1] , start[0] , goal[1] , goal[0] , UpperCamelCase__)
snake_case__ = Node(goal[1] , goal[0] , goal[1] , goal[0] , UpperCamelCase__)
snake_case__ = [self.start]
snake_case__ = False
def __magic_name__ ( self : List[Any]):
'''simple docstring'''
while self.node_queue:
snake_case__ = self.node_queue.pop(0)
if current_node.pos == self.target.pos:
snake_case__ = True
return self.retrace_path(UpperCamelCase__)
snake_case__ = self.get_successors(UpperCamelCase__)
for node in successors:
self.node_queue.append(UpperCamelCase__)
if not self.reached:
return [self.start.pos]
return None
def __magic_name__ ( self : Any , UpperCamelCase__ : Node):
'''simple docstring'''
snake_case__ = []
for action in delta:
snake_case__ = parent.pos_x + action[1]
snake_case__ = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0]) - 1 and 0 <= pos_y <= len(UpperCamelCase__) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(UpperCamelCase__ , UpperCamelCase__ , self.target.pos_y , self.target.pos_x , UpperCamelCase__))
return successors
def __magic_name__ ( self : str , UpperCamelCase__ : Node | None):
'''simple docstring'''
snake_case__ = node
snake_case__ = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x))
snake_case__ = current_node.parent
path.reverse()
return path
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : str , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Any):
'''simple docstring'''
snake_case__ = BreadthFirstSearch(UpperCamelCase__ , UpperCamelCase__)
snake_case__ = BreadthFirstSearch(UpperCamelCase__ , UpperCamelCase__)
snake_case__ = False
def __magic_name__ ( self : Any):
'''simple docstring'''
while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue:
snake_case__ = self.fwd_bfs.node_queue.pop(0)
snake_case__ = self.bwd_bfs.node_queue.pop(0)
if current_bwd_node.pos == current_fwd_node.pos:
snake_case__ = True
return self.retrace_bidirectional_path(
UpperCamelCase__ , UpperCamelCase__)
snake_case__ = current_bwd_node
snake_case__ = current_fwd_node
snake_case__ = {
self.fwd_bfs: self.fwd_bfs.get_successors(UpperCamelCase__),
self.bwd_bfs: self.bwd_bfs.get_successors(UpperCamelCase__),
}
for bfs in [self.fwd_bfs, self.bwd_bfs]:
for node in successors[bfs]:
bfs.node_queue.append(UpperCamelCase__)
if not self.reached:
return [self.fwd_bfs.start.pos]
return None
def __magic_name__ ( self : Union[str, Any] , UpperCamelCase__ : Node , UpperCamelCase__ : Node):
'''simple docstring'''
snake_case__ = self.fwd_bfs.retrace_path(UpperCamelCase__)
snake_case__ = self.bwd_bfs.retrace_path(UpperCamelCase__)
bwd_path.pop()
bwd_path.reverse()
snake_case__ = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
import doctest
doctest.testmod()
a__ = (0, 0)
a__ = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
a__ = time.time()
a__ = BreadthFirstSearch(init, goal)
a__ = bfs.search()
a__ = time.time() - start_bfs_time
print("""Unidirectional BFS computation time : """, bfs_time)
a__ = time.time()
a__ = BidirectionalBreadthFirstSearch(init, goal)
a__ = bd_bfs.search()
a__ = time.time() - start_bd_bfs_time
print("""Bidirectional BFS computation time : """, bd_bfs_time)
| 99
| 0
|
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class _a :
_lowercase : int
_lowercase : Node | None = None
_lowercase : Node | None = None
def _a ( ):
"""simple docstring"""
lowercase__ = Node(1 )
lowercase__ = Node(2 )
lowercase__ = Node(3 )
lowercase__ = Node(4 )
lowercase__ = Node(5 )
return tree
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = []
if root is None:
return output
lowercase__ = deque([root] )
while process_queue:
lowercase__ = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = []
def populate_output(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left , level - 1 )
populate_output(root.right , level - 1 )
populate_output(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return output
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = []
def populate_output(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right , level - 1 )
populate_output(root.left , level - 1 )
populate_output(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return output
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if root is None:
return []
lowercase__ = []
lowercase__ = 0
lowercase__ = height(SCREAMING_SNAKE_CASE )
for h in range(1 , height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
lowercase__ = 1
else:
output.append(get_nodes_from_right_to_left(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
lowercase__ = 0
return output
def _a ( ): # Main function for testing.
"""simple docstring"""
lowercase__ = make_tree()
print(f'In-order Traversal: {inorder(SCREAMING_SNAKE_CASE )}' )
print(f'Pre-order Traversal: {preorder(SCREAMING_SNAKE_CASE )}' )
print(f'Post-order Traversal: {postorder(SCREAMING_SNAKE_CASE )}' , '''\n''' )
print(f'Height of Tree: {height(SCREAMING_SNAKE_CASE )}' , '''\n''' )
print('''Complete Level Order Traversal: ''' )
print(level_order(SCREAMING_SNAKE_CASE ) , '''\n''' )
print('''Level-wise order Traversal: ''' )
for level in range(1 , height(SCREAMING_SNAKE_CASE ) + 1 ):
print(f'Level {level}:' , get_nodes_from_left_to_right(SCREAMING_SNAKE_CASE , level=SCREAMING_SNAKE_CASE ) )
print('''\nZigZag order Traversal: ''' )
print(zigzag(SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 43
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase : Tuple = logging.get_logger(__name__)
def A__ ( __lowerCAmelCase : int ):
lowerCamelCase__ = DPTConfig(embedding_type="""hybrid""" )
if "large" in checkpoint_url:
lowerCamelCase__ = 1024
lowerCamelCase__ = 4096
lowerCamelCase__ = 24
lowerCamelCase__ = 16
lowerCamelCase__ = [5, 11, 17, 23]
lowerCamelCase__ = [256, 512, 1024, 1024]
lowerCamelCase__ = (1, 384, 384)
if "nyu" or "midas" in checkpoint_url:
lowerCamelCase__ = 768
lowerCamelCase__ = [1, 1, 1, 0.5]
lowerCamelCase__ = [256, 512, 768, 768]
lowerCamelCase__ = 150
lowerCamelCase__ = 16
lowerCamelCase__ = (1, 384, 384)
lowerCamelCase__ = False
lowerCamelCase__ = """project"""
if "ade" in checkpoint_url:
lowerCamelCase__ = True
lowerCamelCase__ = 768
lowerCamelCase__ = [1, 1, 1, 0.5]
lowerCamelCase__ = 150
lowerCamelCase__ = 16
lowerCamelCase__ = """huggingface/label-files"""
lowerCamelCase__ = """ade20k-id2label.json"""
lowerCamelCase__ = json.load(open(cached_download(hf_hub_url(__lowerCAmelCase , __lowerCAmelCase , repo_type="""dataset""" ) ) , """r""" ) )
lowerCamelCase__ = {int(__lowerCAmelCase ): v for k, v in idalabel.items()}
lowerCamelCase__ = idalabel
lowerCamelCase__ = {v: k for k, v in idalabel.items()}
lowerCamelCase__ = [1, 150, 480, 480]
return config, expected_shape
def A__ ( __lowerCAmelCase : Optional[int] ):
lowerCamelCase__ = ["""pretrained.model.head.weight""", """pretrained.model.head.bias"""]
for k in ignore_keys:
state_dict.pop(__lowerCAmelCase , __lowerCAmelCase )
def A__ ( __lowerCAmelCase : List[Any] ):
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
lowerCamelCase__ = name.replace("""pretrained.model""" , """dpt.encoder""" )
if "pretrained.model" in name:
lowerCamelCase__ = name.replace("""pretrained.model""" , """dpt.embeddings""" )
if "patch_embed" in name:
lowerCamelCase__ = name.replace("""patch_embed""" , """""" )
if "pos_embed" in name:
lowerCamelCase__ = name.replace("""pos_embed""" , """position_embeddings""" )
if "attn.proj" in name:
lowerCamelCase__ = name.replace("""attn.proj""" , """attention.output.dense""" )
if "proj" in name and "project" not in name:
lowerCamelCase__ = name.replace("""proj""" , """projection""" )
if "blocks" in name:
lowerCamelCase__ = name.replace("""blocks""" , """layer""" )
if "mlp.fc1" in name:
lowerCamelCase__ = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
lowerCamelCase__ = name.replace("""mlp.fc2""" , """output.dense""" )
if "norm1" in name and "backbone" not in name:
lowerCamelCase__ = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name and "backbone" not in name:
lowerCamelCase__ = name.replace("""norm2""" , """layernorm_after""" )
if "scratch.output_conv" in name:
lowerCamelCase__ = name.replace("""scratch.output_conv""" , """head""" )
if "scratch" in name:
lowerCamelCase__ = name.replace("""scratch""" , """neck""" )
if "layer1_rn" in name:
lowerCamelCase__ = name.replace("""layer1_rn""" , """convs.0""" )
if "layer2_rn" in name:
lowerCamelCase__ = name.replace("""layer2_rn""" , """convs.1""" )
if "layer3_rn" in name:
lowerCamelCase__ = name.replace("""layer3_rn""" , """convs.2""" )
if "layer4_rn" in name:
lowerCamelCase__ = name.replace("""layer4_rn""" , """convs.3""" )
if "refinenet" in name:
lowerCamelCase__ = int(name[len("""neck.refinenet""" ) : len("""neck.refinenet""" ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
lowerCamelCase__ = name.replace(F'''refinenet{layer_idx}''' , F'''fusion_stage.layers.{abs(layer_idx-4 )}''' )
if "out_conv" in name:
lowerCamelCase__ = name.replace("""out_conv""" , """projection""" )
if "resConfUnit1" in name:
lowerCamelCase__ = name.replace("""resConfUnit1""" , """residual_layer1""" )
if "resConfUnit2" in name:
lowerCamelCase__ = name.replace("""resConfUnit2""" , """residual_layer2""" )
if "conv1" in name:
lowerCamelCase__ = name.replace("""conv1""" , """convolution1""" )
if "conv2" in name:
lowerCamelCase__ = name.replace("""conv2""" , """convolution2""" )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
lowerCamelCase__ = name.replace("""pretrained.act_postprocess1.0.project.0""" , """neck.reassemble_stage.readout_projects.0.0""" )
if "pretrained.act_postprocess2.0.project.0" in name:
lowerCamelCase__ = name.replace("""pretrained.act_postprocess2.0.project.0""" , """neck.reassemble_stage.readout_projects.1.0""" )
if "pretrained.act_postprocess3.0.project.0" in name:
lowerCamelCase__ = name.replace("""pretrained.act_postprocess3.0.project.0""" , """neck.reassemble_stage.readout_projects.2.0""" )
if "pretrained.act_postprocess4.0.project.0" in name:
lowerCamelCase__ = name.replace("""pretrained.act_postprocess4.0.project.0""" , """neck.reassemble_stage.readout_projects.3.0""" )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
lowerCamelCase__ = name.replace("""pretrained.act_postprocess1.3""" , """neck.reassemble_stage.layers.0.projection""" )
if "pretrained.act_postprocess1.4" in name:
lowerCamelCase__ = name.replace("""pretrained.act_postprocess1.4""" , """neck.reassemble_stage.layers.0.resize""" )
if "pretrained.act_postprocess2.3" in name:
lowerCamelCase__ = name.replace("""pretrained.act_postprocess2.3""" , """neck.reassemble_stage.layers.1.projection""" )
if "pretrained.act_postprocess2.4" in name:
lowerCamelCase__ = name.replace("""pretrained.act_postprocess2.4""" , """neck.reassemble_stage.layers.1.resize""" )
if "pretrained.act_postprocess3.3" in name:
lowerCamelCase__ = name.replace("""pretrained.act_postprocess3.3""" , """neck.reassemble_stage.layers.2.projection""" )
if "pretrained.act_postprocess4.3" in name:
lowerCamelCase__ = name.replace("""pretrained.act_postprocess4.3""" , """neck.reassemble_stage.layers.3.projection""" )
if "pretrained.act_postprocess4.4" in name:
lowerCamelCase__ = name.replace("""pretrained.act_postprocess4.4""" , """neck.reassemble_stage.layers.3.resize""" )
if "pretrained" in name:
lowerCamelCase__ = name.replace("""pretrained""" , """dpt""" )
if "bn" in name:
lowerCamelCase__ = name.replace("""bn""" , """batch_norm""" )
if "head" in name:
lowerCamelCase__ = name.replace("""head""" , """head.head""" )
if "encoder.norm" in name:
lowerCamelCase__ = name.replace("""encoder.norm""" , """layernorm""" )
if "auxlayer" in name:
lowerCamelCase__ = name.replace("""auxlayer""" , """auxiliary_head.head""" )
if "backbone" in name:
lowerCamelCase__ = name.replace("""backbone""" , """backbone.bit.encoder""" )
if ".." in name:
lowerCamelCase__ = name.replace("""..""" , """.""" )
if "stem.conv" in name:
lowerCamelCase__ = name.replace("""stem.conv""" , """bit.embedder.convolution""" )
if "blocks" in name:
lowerCamelCase__ = name.replace("""blocks""" , """layers""" )
if "convolution" in name and "backbone" in name:
lowerCamelCase__ = name.replace("""convolution""" , """conv""" )
if "layer" in name and "backbone" in name:
lowerCamelCase__ = name.replace("""layer""" , """layers""" )
if "backbone.bit.encoder.bit" in name:
lowerCamelCase__ = name.replace("""backbone.bit.encoder.bit""" , """backbone.bit""" )
if "embedder.conv" in name:
lowerCamelCase__ = name.replace("""embedder.conv""" , """embedder.convolution""" )
if "backbone.bit.encoder.stem.norm" in name:
lowerCamelCase__ = name.replace("""backbone.bit.encoder.stem.norm""" , """backbone.bit.embedder.norm""" )
return name
def A__ ( __lowerCAmelCase : str , __lowerCAmelCase : int ):
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCamelCase__ = state_dict.pop(F'''dpt.encoder.layer.{i}.attn.qkv.weight''' )
lowerCamelCase__ = state_dict.pop(F'''dpt.encoder.layer.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase__ = in_proj_weight[: config.hidden_size, :]
lowerCamelCase__ = in_proj_bias[: config.hidden_size]
lowerCamelCase__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCamelCase__ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCamelCase__ = in_proj_weight[
-config.hidden_size :, :
]
lowerCamelCase__ = in_proj_bias[-config.hidden_size :]
def A__ ( ):
lowerCamelCase__ = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowerCamelCase__ = Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw )
return im
@torch.no_grad()
def A__ ( __lowerCAmelCase : Dict , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Any ):
lowerCamelCase__ , lowerCamelCase__ = get_dpt_config(__lowerCAmelCase )
# load original state_dict from URL
# state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")
lowerCamelCase__ = torch.load(__lowerCAmelCase , map_location="""cpu""" )
# remove certain keys
remove_ignore_keys_(__lowerCAmelCase )
# rename keys
for key in state_dict.copy().keys():
lowerCamelCase__ = state_dict.pop(__lowerCAmelCase )
lowerCamelCase__ = val
# read in qkv matrices
read_in_q_k_v(__lowerCAmelCase , __lowerCAmelCase )
# load HuggingFace model
lowerCamelCase__ = DPTForSemanticSegmentation(__lowerCAmelCase ) if """ade""" in checkpoint_url else DPTForDepthEstimation(__lowerCAmelCase )
model.load_state_dict(__lowerCAmelCase )
model.eval()
# Check outputs on an image
lowerCamelCase__ = 480 if """ade""" in checkpoint_url else 384
lowerCamelCase__ = DPTImageProcessor(size=__lowerCAmelCase )
lowerCamelCase__ = prepare_img()
lowerCamelCase__ = image_processor(__lowerCAmelCase , return_tensors="""pt""" )
# forward pass
lowerCamelCase__ = model(**__lowerCAmelCase ).logits if """ade""" in checkpoint_url else model(**__lowerCAmelCase ).predicted_depth
if show_prediction:
lowerCamelCase__ = (
torch.nn.functional.interpolate(
outputs.unsqueeze(1 ) , size=(image.size[1], image.size[0]) , mode="""bicubic""" , align_corners=__lowerCAmelCase , )
.squeeze()
.cpu()
.numpy()
)
Image.fromarray((prediction / prediction.max()) * 255 ).show()
if pytorch_dump_folder_path is not None:
Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(__lowerCAmelCase )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__lowerCAmelCase )
if push_to_hub:
model.push_to_hub("""ybelkada/dpt-hybrid-midas""" )
image_processor.push_to_hub("""ybelkada/dpt-hybrid-midas""" )
if __name__ == "__main__":
UpperCamelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt',
type=str,
help='URL of the original DPT checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=False,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
)
parser.add_argument(
'--model_name',
default='dpt-large',
type=str,
help='Name of the model, in case you\'re pushing to the hub.',
)
parser.add_argument(
'--show_prediction',
action='store_true',
)
UpperCamelCase : List[str] = parser.parse_args()
convert_dpt_checkpoint(
args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction
)
| 50
| 0
|
'''simple docstring'''
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING
UpperCamelCase__ : Union[str, Any] = logging.get_logger(__name__)
@add_end_docstrings(lowerCamelCase__ )
class _UpperCamelCase ( lowerCamelCase__ ):
'''simple docstring'''
def __init__( self : str , **lowerCAmelCase__ : List[str] ):
"""simple docstring"""
super().__init__(**lowerCAmelCase__ )
requires_backends(self , """vision""" )
requires_backends(self , """torch""" )
if self.framework != "pt":
raise ValueError(F"The {self.__class__} is only available in PyTorch." )
self.check_model_type(lowerCAmelCase__ )
def UpperCamelCase__ ( self : Optional[Any] , **lowerCAmelCase__ : Dict ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = {}
__SCREAMING_SNAKE_CASE : str = {}
__SCREAMING_SNAKE_CASE : Tuple = {}
# preprocess args
if "points_per_batch" in kwargs:
__SCREAMING_SNAKE_CASE : Any = kwargs["""points_per_batch"""]
if "points_per_crop" in kwargs:
__SCREAMING_SNAKE_CASE : Dict = kwargs["""points_per_crop"""]
if "crops_n_layers" in kwargs:
__SCREAMING_SNAKE_CASE : List[Any] = kwargs["""crops_n_layers"""]
if "crop_overlap_ratio" in kwargs:
__SCREAMING_SNAKE_CASE : Dict = kwargs["""crop_overlap_ratio"""]
if "crop_n_points_downscale_factor" in kwargs:
__SCREAMING_SNAKE_CASE : int = kwargs["""crop_n_points_downscale_factor"""]
# postprocess args
if "pred_iou_thresh" in kwargs:
__SCREAMING_SNAKE_CASE : Optional[int] = kwargs["""pred_iou_thresh"""]
if "stability_score_offset" in kwargs:
__SCREAMING_SNAKE_CASE : Optional[int] = kwargs["""stability_score_offset"""]
if "mask_threshold" in kwargs:
__SCREAMING_SNAKE_CASE : Optional[int] = kwargs["""mask_threshold"""]
if "stability_score_thresh" in kwargs:
__SCREAMING_SNAKE_CASE : List[Any] = kwargs["""stability_score_thresh"""]
if "crops_nms_thresh" in kwargs:
__SCREAMING_SNAKE_CASE : Optional[int] = kwargs["""crops_nms_thresh"""]
if "output_rle_mask" in kwargs:
__SCREAMING_SNAKE_CASE : Optional[Any] = kwargs["""output_rle_mask"""]
if "output_bboxes_mask" in kwargs:
__SCREAMING_SNAKE_CASE : Any = kwargs["""output_bboxes_mask"""]
return preprocess_kwargs, forward_params, postprocess_kwargs
def __call__( self : Tuple , lowerCAmelCase__ : int , *lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Tuple=None , lowerCAmelCase__ : Optional[int]=None , **lowerCAmelCase__ : Optional[int] ):
"""simple docstring"""
return super().__call__(lowerCAmelCase__ , *lowerCAmelCase__ , num_workers=lowerCAmelCase__ , batch_size=lowerCAmelCase__ , **lowerCAmelCase__ )
def UpperCamelCase__ ( self : Any , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Optional[Any]=6_4 , lowerCAmelCase__ : int = 0 , lowerCAmelCase__ : float = 5_1_2 / 1_5_0_0 , lowerCAmelCase__ : Optional[int] = 3_2 , lowerCAmelCase__ : Optional[int] = 1 , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = load_image(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = self.image_processor.size["""longest_edge"""]
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[int] = self.image_processor.generate_crop_boxes(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = self.image_processor(images=lowerCAmelCase__ , return_tensors="""pt""" )
with self.device_placement():
if self.framework == "pt":
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_inference_context()
with inference_context():
__SCREAMING_SNAKE_CASE : Any = self._ensure_tensor_on_device(lowerCAmelCase__ , device=self.device )
__SCREAMING_SNAKE_CASE : int = self.model.get_image_embeddings(model_inputs.pop("""pixel_values""" ) )
__SCREAMING_SNAKE_CASE : List[Any] = image_embeddings
__SCREAMING_SNAKE_CASE : List[str] = grid_points.shape[1]
__SCREAMING_SNAKE_CASE : Union[str, Any] = points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
"""Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. """
"""To return all points at once, set points_per_batch to None""" )
for i in range(0 , lowerCAmelCase__ , lowerCAmelCase__ ):
__SCREAMING_SNAKE_CASE : List[Any] = grid_points[:, i : i + points_per_batch, :, :]
__SCREAMING_SNAKE_CASE : Dict = input_labels[:, i : i + points_per_batch]
__SCREAMING_SNAKE_CASE : Any = i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def UpperCamelCase__ ( self : Dict , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : List[Any]=0.88 , lowerCAmelCase__ : Dict=0.95 , lowerCAmelCase__ : List[str]=0 , lowerCAmelCase__ : Optional[int]=1 , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = model_inputs.pop("""input_boxes""" )
__SCREAMING_SNAKE_CASE : int = model_inputs.pop("""is_last""" )
__SCREAMING_SNAKE_CASE : Any = model_inputs.pop("""original_sizes""" ).tolist()
__SCREAMING_SNAKE_CASE : str = model_inputs.pop("""reshaped_input_sizes""" ).tolist()
__SCREAMING_SNAKE_CASE : Optional[Any] = self.model(**lowerCAmelCase__ )
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
__SCREAMING_SNAKE_CASE : Any = model_outputs["""pred_masks"""]
__SCREAMING_SNAKE_CASE : Dict = self.image_processor.post_process_masks(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , binarize=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = model_outputs["""iou_scores"""]
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Union[str, Any] = self.image_processor.filter_masks(
masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , )
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def UpperCamelCase__ ( self : Dict , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : int=False , lowerCAmelCase__ : List[str]=False , lowerCAmelCase__ : Optional[Any]=0.7 , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = []
__SCREAMING_SNAKE_CASE : int = []
__SCREAMING_SNAKE_CASE : Any = []
for model_output in model_outputs:
all_scores.append(model_output.pop("""iou_scores""" ) )
all_masks.extend(model_output.pop("""masks""" ) )
all_boxes.append(model_output.pop("""boxes""" ) )
__SCREAMING_SNAKE_CASE : List[Any] = torch.cat(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : str = torch.cat(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[int] = self.image_processor.post_process_for_mask_generation(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Any = defaultdict(lowerCAmelCase__ )
for output in model_outputs:
for k, v in output.items():
extra[k].append(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : str = {}
if output_rle_mask:
__SCREAMING_SNAKE_CASE : List[Any] = rle_mask
if output_bboxes_mask:
__SCREAMING_SNAKE_CASE : List[Any] = bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
| 178
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinvaConfig, SwinvaForImageClassification
def lowerCAmelCase_ ( _lowerCamelCase: Tuple ):
__SCREAMING_SNAKE_CASE : List[Any] = SwinvaConfig()
__SCREAMING_SNAKE_CASE : List[Any] = swinva_name.split("""_""" )
__SCREAMING_SNAKE_CASE : Union[str, Any] = name_split[1]
if "to" in name_split[3]:
__SCREAMING_SNAKE_CASE : Dict = int(name_split[3][-3:] )
else:
__SCREAMING_SNAKE_CASE : str = int(name_split[3] )
if "to" in name_split[2]:
__SCREAMING_SNAKE_CASE : Optional[Any] = int(name_split[2][-2:] )
else:
__SCREAMING_SNAKE_CASE : Optional[int] = int(name_split[2][6:] )
if model_size == "tiny":
__SCREAMING_SNAKE_CASE : Dict = 96
__SCREAMING_SNAKE_CASE : List[str] = (2, 2, 6, 2)
__SCREAMING_SNAKE_CASE : List[Any] = (3, 6, 12, 24)
elif model_size == "small":
__SCREAMING_SNAKE_CASE : List[str] = 96
__SCREAMING_SNAKE_CASE : int = (2, 2, 18, 2)
__SCREAMING_SNAKE_CASE : int = (3, 6, 12, 24)
elif model_size == "base":
__SCREAMING_SNAKE_CASE : int = 1_28
__SCREAMING_SNAKE_CASE : str = (2, 2, 18, 2)
__SCREAMING_SNAKE_CASE : Optional[int] = (4, 8, 16, 32)
else:
__SCREAMING_SNAKE_CASE : List[str] = 1_92
__SCREAMING_SNAKE_CASE : Union[str, Any] = (2, 2, 18, 2)
__SCREAMING_SNAKE_CASE : Dict = (6, 12, 24, 48)
if "to" in swinva_name:
__SCREAMING_SNAKE_CASE : int = (12, 12, 12, 6)
if ("22k" in swinva_name) and ("to" not in swinva_name):
__SCREAMING_SNAKE_CASE : int = 2_18_41
__SCREAMING_SNAKE_CASE : str = """huggingface/label-files"""
__SCREAMING_SNAKE_CASE : List[str] = """imagenet-22k-id2label.json"""
__SCREAMING_SNAKE_CASE : List[str] = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type="""dataset""" ) , """r""" ) )
__SCREAMING_SNAKE_CASE : List[Any] = {int(_lowerCamelCase ): v for k, v in idalabel.items()}
__SCREAMING_SNAKE_CASE : Optional[int] = idalabel
__SCREAMING_SNAKE_CASE : str = {v: k for k, v in idalabel.items()}
else:
__SCREAMING_SNAKE_CASE : str = 10_00
__SCREAMING_SNAKE_CASE : Optional[int] = """huggingface/label-files"""
__SCREAMING_SNAKE_CASE : Any = """imagenet-1k-id2label.json"""
__SCREAMING_SNAKE_CASE : Optional[int] = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type="""dataset""" ) , """r""" ) )
__SCREAMING_SNAKE_CASE : int = {int(_lowerCamelCase ): v for k, v in idalabel.items()}
__SCREAMING_SNAKE_CASE : Optional[int] = idalabel
__SCREAMING_SNAKE_CASE : Dict = {v: k for k, v in idalabel.items()}
__SCREAMING_SNAKE_CASE : Any = img_size
__SCREAMING_SNAKE_CASE : Union[str, Any] = num_classes
__SCREAMING_SNAKE_CASE : int = embed_dim
__SCREAMING_SNAKE_CASE : Dict = depths
__SCREAMING_SNAKE_CASE : str = num_heads
__SCREAMING_SNAKE_CASE : int = window_size
return config
def lowerCAmelCase_ ( _lowerCamelCase: int ):
if "patch_embed.proj" in name:
__SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
__SCREAMING_SNAKE_CASE : Optional[Any] = name.replace("""patch_embed.norm""" , """embeddings.norm""" )
if "layers" in name:
__SCREAMING_SNAKE_CASE : Optional[int] = """encoder.""" + name
if "attn.proj" in name:
__SCREAMING_SNAKE_CASE : Optional[Any] = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
__SCREAMING_SNAKE_CASE : Any = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
__SCREAMING_SNAKE_CASE : Optional[int] = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
__SCREAMING_SNAKE_CASE : Dict = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
__SCREAMING_SNAKE_CASE : Optional[Any] = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
__SCREAMING_SNAKE_CASE : List[Any] = name.replace("""mlp.fc2""" , """output.dense""" )
if "q_bias" in name:
__SCREAMING_SNAKE_CASE : Tuple = name.replace("""q_bias""" , """query.bias""" )
if "k_bias" in name:
__SCREAMING_SNAKE_CASE : Optional[int] = name.replace("""k_bias""" , """key.bias""" )
if "v_bias" in name:
__SCREAMING_SNAKE_CASE : List[str] = name.replace("""v_bias""" , """value.bias""" )
if "cpb_mlp" in name:
__SCREAMING_SNAKE_CASE : str = name.replace("""cpb_mlp""" , """continuous_position_bias_mlp""" )
if name == "norm.weight":
__SCREAMING_SNAKE_CASE : Tuple = """layernorm.weight"""
if name == "norm.bias":
__SCREAMING_SNAKE_CASE : Optional[int] = """layernorm.bias"""
if "head" in name:
__SCREAMING_SNAKE_CASE : Optional[Any] = name.replace("""head""" , """classifier""" )
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = """swinv2.""" + name
return name
def lowerCAmelCase_ ( _lowerCamelCase: int , _lowerCamelCase: Optional[Any] ):
for key in orig_state_dict.copy().keys():
__SCREAMING_SNAKE_CASE : Optional[Any] = orig_state_dict.pop(_lowerCamelCase )
if "mask" in key:
continue
elif "qkv" in key:
__SCREAMING_SNAKE_CASE : Union[str, Any] = key.split(""".""" )
__SCREAMING_SNAKE_CASE : List[str] = int(key_split[1] )
__SCREAMING_SNAKE_CASE : Dict = int(key_split[3] )
__SCREAMING_SNAKE_CASE : str = model.swinva.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
__SCREAMING_SNAKE_CASE : Optional[int] = val[:dim, :]
__SCREAMING_SNAKE_CASE : str = val[dim : dim * 2, :]
__SCREAMING_SNAKE_CASE : Dict = val[-dim:, :]
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = val[:dim]
__SCREAMING_SNAKE_CASE : int = val[
dim : dim * 2
]
__SCREAMING_SNAKE_CASE : int = val[-dim:]
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = val
return orig_state_dict
def lowerCAmelCase_ ( _lowerCamelCase: Tuple , _lowerCamelCase: int ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = timm.create_model(_lowerCamelCase , pretrained=_lowerCamelCase )
timm_model.eval()
__SCREAMING_SNAKE_CASE : int = get_swinva_config(_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Union[str, Any] = SwinvaForImageClassification(_lowerCamelCase )
model.eval()
__SCREAMING_SNAKE_CASE : Optional[int] = convert_state_dict(timm_model.state_dict() , _lowerCamelCase )
model.load_state_dict(_lowerCamelCase )
__SCREAMING_SNAKE_CASE : int = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__SCREAMING_SNAKE_CASE : List[Any] = AutoImageProcessor.from_pretrained("""microsoft/{}""".format(swinva_name.replace("""_""" , """-""" ) ) )
__SCREAMING_SNAKE_CASE : Optional[Any] = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw )
__SCREAMING_SNAKE_CASE : Union[str, Any] = image_processor(images=_lowerCamelCase , return_tensors="""pt""" )
__SCREAMING_SNAKE_CASE : int = timm_model(inputs["""pixel_values"""] )
__SCREAMING_SNAKE_CASE : Dict = model(**_lowerCamelCase ).logits
assert torch.allclose(_lowerCamelCase , _lowerCamelCase , atol=1E-3 )
print(F"Saving model {swinva_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_lowerCamelCase )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(_lowerCamelCase )
model.push_to_hub(
repo_path_or_name=Path(_lowerCamelCase , _lowerCamelCase ) , organization="""nandwalritik""" , commit_message="""Add model""" , )
if __name__ == "__main__":
UpperCamelCase__ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--swinv2_name''',
default='''swinv2_tiny_patch4_window8_256''',
type=str,
help='''Name of the Swinv2 timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
UpperCamelCase__ : Optional[int] = parser.parse_args()
convert_swinva_checkpoint(args.swinva_name, args.pytorch_dump_folder_path)
| 178
| 1
|
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class lowerCAmelCase_ ( __A , __A ):
'''simple docstring'''
@register_to_config
def __init__( self , *,
__UpperCAmelCase = 4 , __UpperCAmelCase = 768 , __UpperCAmelCase , __UpperCAmelCase , ):
super().__init__()
SCREAMING_SNAKE_CASE_ : Any =nn.Parameter(torch.zeros(__UpperCAmelCase ) )
# parameters for additional clip time embeddings
SCREAMING_SNAKE_CASE_ : Any =nn.Linear(__UpperCAmelCase , __UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : Dict =nn.Linear(__UpperCAmelCase , __UpperCAmelCase )
# parameters for encoder hidden states
SCREAMING_SNAKE_CASE_ : Dict =clip_extra_context_tokens
SCREAMING_SNAKE_CASE_ : Optional[int] =nn.Linear(
__UpperCAmelCase , self.clip_extra_context_tokens * cross_attention_dim )
SCREAMING_SNAKE_CASE_ : Tuple =nn.Linear(__UpperCAmelCase , __UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : str =nn.LayerNorm(__UpperCAmelCase )
def __lowerCamelCase ( self , *, __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
if do_classifier_free_guidance:
# Add the classifier free guidance embeddings to the image embeddings
SCREAMING_SNAKE_CASE_ : Tuple =image_embeddings.shape[0]
SCREAMING_SNAKE_CASE_ : Optional[int] =self.learned_classifier_free_guidance_embeddings.unsqueeze(0 )
SCREAMING_SNAKE_CASE_ : Optional[int] =classifier_free_guidance_embeddings.expand(
__UpperCAmelCase , -1 )
SCREAMING_SNAKE_CASE_ : Tuple =torch.cat([classifier_free_guidance_embeddings, image_embeddings] , dim=0 )
# The image embeddings batch size and the text embeddings batch size are equal
assert image_embeddings.shape[0] == prompt_embeds.shape[0]
SCREAMING_SNAKE_CASE_ : Any =prompt_embeds.shape[0]
# "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and
# adding CLIP embeddings to the existing timestep embedding, ...
SCREAMING_SNAKE_CASE_ : Dict =self.embedding_proj(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : Tuple =self.clip_image_embeddings_project_to_time_embeddings(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : Dict =time_projected_image_embeddings + time_projected_prompt_embeds
# ... and by projecting CLIP embeddings into four
# extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder"
SCREAMING_SNAKE_CASE_ : Union[str, Any] =self.clip_extra_context_tokens_proj(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : int =clip_extra_context_tokens.reshape(__UpperCAmelCase , -1 , self.clip_extra_context_tokens )
SCREAMING_SNAKE_CASE_ : List[Any] =clip_extra_context_tokens.permute(0 , 2 , 1 )
SCREAMING_SNAKE_CASE_ : Tuple =self.encoder_hidden_states_proj(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[Any] =self.text_encoder_hidden_states_norm(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[Any] =torch.cat([clip_extra_context_tokens, text_encoder_hidden_states] , dim=1 )
return text_encoder_hidden_states, additive_clip_time_embeddings
| 220
|
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('0.12.2'):
raise Exception('requires fairseq >= 0.12.2')
if version.parse(fairseq.__version__) > version.parse('2'):
raise Exception('requires fairseq < v2')
logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE = 'Hello, World!'
__SCREAMING_SNAKE_CASE = 'en_XX'
def SCREAMING_SNAKE_CASE__ ( lowerCAmelCase_ : str ,lowerCAmelCase_ : str ,lowerCAmelCase_ : bool ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] =Path('data_bin' )
SCREAMING_SNAKE_CASE_ : Any =FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(lowerCAmelCase_ ).parent ) ,checkpoint_file=Path(lowerCAmelCase_ ).name ,_name='xmod_base' ,arch='xmod_base' ,task='multilingual_masked_lm' ,data_name_or_path=str(lowerCAmelCase_ ) ,bpe='sentencepiece' ,sentencepiece_model=str(Path(lowerCAmelCase_ ).parent / 'sentencepiece.bpe.model' ) ,src_dict=str(data_dir / 'dict.txt' ) ,)
xmod.eval() # disable dropout
print(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ : List[str] =xmod.model.encoder.sentence_encoder
SCREAMING_SNAKE_CASE_ : Union[str, Any] =XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings ,hidden_size=xmod.cfg.model.encoder_embed_dim ,num_hidden_layers=xmod.cfg.model.encoder_layers ,num_attention_heads=xmod.cfg.model.encoder_attention_heads ,intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim ,max_position_embeddings=514 ,type_vocab_size=1 ,layer_norm_eps=1e-5 ,pre_norm=xmod.cfg.model.encoder_normalize_before ,adapter_reduction_factor=getattr(xmod.cfg.model ,'bottleneck' ,2 ) ,adapter_layer_norm=xmod.cfg.model.adapter_layer_norm ,adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm ,ln_before_adapter=xmod.cfg.model.ln_before_adapter ,languages=xmod.cfg.model.languages ,)
if classification_head:
SCREAMING_SNAKE_CASE_ : Any =xmod.model.classification_heads['mnli'].out_proj.weight.shape[0]
print('Our X-MOD config:' ,lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ : Tuple =XmodForSequenceClassification(lowerCAmelCase_ ) if classification_head else XmodForMaskedLM(lowerCAmelCase_ )
model.eval()
# Now let's copy all the weights.
# Embeddings
SCREAMING_SNAKE_CASE_ : Any =xmod_sent_encoder.embed_tokens.weight
SCREAMING_SNAKE_CASE_ : Dict =xmod_sent_encoder.embed_positions.weight
SCREAMING_SNAKE_CASE_ : Union[str, Any] =torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them.
SCREAMING_SNAKE_CASE_ : Dict =xmod_sent_encoder.layernorm_embedding.weight
SCREAMING_SNAKE_CASE_ : Tuple =xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
SCREAMING_SNAKE_CASE_ : Dict =model.roberta.encoder.layer[i]
SCREAMING_SNAKE_CASE_ : List[Any] =xmod_sent_encoder.layers[i]
# self attention
SCREAMING_SNAKE_CASE_ : int =layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
):
raise AssertionError('Dimensions of self-attention weights do not match.' )
SCREAMING_SNAKE_CASE_ : Tuple =xmod_layer.self_attn.q_proj.weight
SCREAMING_SNAKE_CASE_ : int =xmod_layer.self_attn.q_proj.bias
SCREAMING_SNAKE_CASE_ : Any =xmod_layer.self_attn.k_proj.weight
SCREAMING_SNAKE_CASE_ : Optional[int] =xmod_layer.self_attn.k_proj.bias
SCREAMING_SNAKE_CASE_ : Union[str, Any] =xmod_layer.self_attn.v_proj.weight
SCREAMING_SNAKE_CASE_ : int =xmod_layer.self_attn.v_proj.bias
# self-attention output
SCREAMING_SNAKE_CASE_ : Dict =layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError('Dimensions of self-attention output weights do not match.' )
SCREAMING_SNAKE_CASE_ : Optional[Any] =xmod_layer.self_attn.out_proj.weight
SCREAMING_SNAKE_CASE_ : Tuple =xmod_layer.self_attn.out_proj.bias
SCREAMING_SNAKE_CASE_ : Any =xmod_layer.self_attn_layer_norm.weight
SCREAMING_SNAKE_CASE_ : Optional[Any] =xmod_layer.self_attn_layer_norm.bias
# intermediate
SCREAMING_SNAKE_CASE_ : Optional[int] =layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('Dimensions of intermediate weights do not match.' )
SCREAMING_SNAKE_CASE_ : int =xmod_layer.fca.weight
SCREAMING_SNAKE_CASE_ : List[Any] =xmod_layer.fca.bias
# output
SCREAMING_SNAKE_CASE_ : Union[str, Any] =layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('Dimensions of feed-forward weights do not match.' )
SCREAMING_SNAKE_CASE_ : Any =xmod_layer.fca.weight
SCREAMING_SNAKE_CASE_ : Dict =xmod_layer.fca.bias
SCREAMING_SNAKE_CASE_ : str =xmod_layer.final_layer_norm.weight
SCREAMING_SNAKE_CASE_ : List[str] =xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
SCREAMING_SNAKE_CASE_ : Dict =xmod_layer.adapter_layer_norm.weight
SCREAMING_SNAKE_CASE_ : Union[str, Any] =xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ):
raise AssertionError('Lists of language adapters do not match.' )
for lang_code, adapter in xmod_layer.adapter_modules.items():
SCREAMING_SNAKE_CASE_ : Union[str, Any] =bert_output.adapter_modules[lang_code]
SCREAMING_SNAKE_CASE_ : Tuple =xmod_layer.adapter_modules[lang_code]
SCREAMING_SNAKE_CASE_ : int =from_adapter.fca.weight
SCREAMING_SNAKE_CASE_ : Union[str, Any] =from_adapter.fca.bias
SCREAMING_SNAKE_CASE_ : Optional[Any] =from_adapter.fca.weight
SCREAMING_SNAKE_CASE_ : int =from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
SCREAMING_SNAKE_CASE_ : str =xmod_sent_encoder.layer_norm.weight
SCREAMING_SNAKE_CASE_ : Optional[int] =xmod_sent_encoder.layer_norm.bias
if classification_head:
SCREAMING_SNAKE_CASE_ : List[Any] =xmod.model.classification_heads['mnli'].dense.weight
SCREAMING_SNAKE_CASE_ : Optional[Any] =xmod.model.classification_heads['mnli'].dense.bias
SCREAMING_SNAKE_CASE_ : Dict =xmod.model.classification_heads['mnli'].out_proj.weight
SCREAMING_SNAKE_CASE_ : Optional[int] =xmod.model.classification_heads['mnli'].out_proj.bias
else:
# LM Head
SCREAMING_SNAKE_CASE_ : Optional[int] =xmod.model.encoder.lm_head.dense.weight
SCREAMING_SNAKE_CASE_ : Tuple =xmod.model.encoder.lm_head.dense.bias
SCREAMING_SNAKE_CASE_ : Dict =xmod.model.encoder.lm_head.layer_norm.weight
SCREAMING_SNAKE_CASE_ : Tuple =xmod.model.encoder.lm_head.layer_norm.bias
SCREAMING_SNAKE_CASE_ : List[str] =xmod.model.encoder.lm_head.weight
SCREAMING_SNAKE_CASE_ : Tuple =xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
SCREAMING_SNAKE_CASE_ : Optional[Any] =xmod.encode(lowerCAmelCase_ ).unsqueeze(0 ) # batch of size 1
model.roberta.set_default_language(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ : List[str] =model(lowerCAmelCase_ )[0]
if classification_head:
SCREAMING_SNAKE_CASE_ : Union[str, Any] =xmod.model.classification_heads['mnli'](xmod.extract_features(lowerCAmelCase_ ) )
else:
SCREAMING_SNAKE_CASE_ : List[str] =xmod.model(lowerCAmelCase_ ,lang_id=[SAMPLE_LANGUAGE] )[0]
print(our_output.shape ,their_output.shape )
SCREAMING_SNAKE_CASE_ : str =torch.max(torch.abs(our_output - their_output ) ).item()
print(F"""max_absolute_diff = {max_absolute_diff}""" ) # ~ 1e-7
SCREAMING_SNAKE_CASE_ : str =torch.allclose(lowerCAmelCase_ ,lowerCAmelCase_ ,atol=1e-3 )
print('Do both models output the same tensors?' ,'🔥' if success else '💩' )
if not success:
raise Exception('Something went wRoNg' )
Path(lowerCAmelCase_ ).mkdir(parents=lowerCAmelCase_ ,exist_ok=lowerCAmelCase_ )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--xmod_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--classification_head', action='store_true', help='Whether to convert a final classification head.'
)
__SCREAMING_SNAKE_CASE = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 220
| 1
|
'''simple docstring'''
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
__lowercase : Union[str, Any] = re.compile('''[^A-Za-z_0-9]''')
# parameters used in DuplicationIndex
__lowercase : Tuple = 10
__lowercase : Union[str, Any] = 256
def lowercase_ ( _lowercase ) -> Optional[MinHash]:
'''simple docstring'''
if len(_lowercase ) < MIN_NUM_TOKENS:
return None
lowerCamelCase_ : Union[str, Any] = MinHash(num_perm=_lowercase )
for token in set(_lowercase ):
min_hash.update(token.encode() )
return min_hash
def lowercase_ ( _lowercase ) -> Set[str]:
'''simple docstring'''
return {t for t in NON_ALPHA.split(_lowercase ) if len(t.strip() ) > 0}
class __lowercase :
def __init__(self , *,
A = 0.85 , ):
lowerCamelCase_ : Optional[Any] = duplication_jaccard_threshold
lowerCamelCase_ : Any = NUM_PERM
lowerCamelCase_ : str = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
lowerCamelCase_ : Optional[int] = defaultdict(A )
def UpperCAmelCase__ (self , A , A ):
lowerCamelCase_ : int = self._index.query(A )
if code_key in self._index.keys:
print(F"""Duplicate key {code_key}""" )
return
self._index.insert(A , A )
if len(A ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(A )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(A )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[Any] = []
for base, duplicates in self._duplicate_clusters.items():
lowerCamelCase_ : Any = [base] + list(A )
# reformat the cluster to be a list of dict
lowerCamelCase_ : List[Any] = [{'''base_index''': el[0], '''repo_name''': el[1], '''path''': el[2]} for el in cluster]
duplicate_clusters.append(A )
return duplicate_clusters
def UpperCAmelCase__ (self , A ):
lowerCamelCase_ : Any = self.get_duplicate_clusters()
with open(A , '''w''' ) as f:
json.dump(A , A )
def lowercase_ ( _lowercase ) -> Any:
'''simple docstring'''
lowerCamelCase_, lowerCamelCase_ : List[str] = element
lowerCamelCase_ : Optional[Any] = get_min_hash([t for t in NON_ALPHA.split(data['''content'''] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def lowercase_ ( _lowercase ) -> Union[str, Any]:
'''simple docstring'''
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(_lowercase , max_queue_size=10_000 ) , chunksize=100 , ):
if data is not None:
yield data
def lowercase_ ( _lowercase , _lowercase ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase_ : List[Any] = DuplicationIndex(duplication_jaccard_threshold=_lowercase )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(_lowercase ) ) , max_queue_size=100 ) ):
di.add(_lowercase , _lowercase )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def lowercase_ ( _lowercase , _lowercase ) -> float:
'''simple docstring'''
lowerCamelCase_ : int = get_tokens(_lowercase )
lowerCamelCase_ : Optional[int] = get_tokens(_lowercase )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
__lowercase : str = None
def lowercase_ ( _lowercase , _lowercase ) -> Dict:
'''simple docstring'''
lowerCamelCase_ : str = []
for elementa in cluster:
lowerCamelCase_ : Optional[int] = _shared_dataset[elementa['''base_index''']]['''content''']
for elementa in extremes:
lowerCamelCase_ : List[str] = _shared_dataset[elementa['''base_index''']]['''content''']
if jaccard_similarity(_lowercase , _lowercase ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
lowerCamelCase_ : List[str] = 1
extremes.append(_lowercase )
return extremes
def lowercase_ ( _lowercase , _lowercase , _lowercase ) -> Optional[int]:
'''simple docstring'''
global _shared_dataset
lowerCamelCase_ : Tuple = dataset
lowerCamelCase_ : Optional[Any] = []
lowerCamelCase_ : List[str] = partial(_find_cluster_extremes_shared , jaccard_threshold=_lowercase )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
_lowercase , _lowercase , ) , total=len(_lowercase ) , ):
extremes_list.append(_lowercase )
return extremes_list
def lowercase_ ( _lowercase , _lowercase = 0.85 ) -> Tuple[Type[Dataset], List[List[Dict]]]:
'''simple docstring'''
lowerCamelCase_ : Any = make_duplicate_clusters(_lowercase , _lowercase )
lowerCamelCase_ : List[Any] = {x['''base_index'''] for cluster in duplicate_clusters for x in cluster}
lowerCamelCase_ : Optional[int] = {}
lowerCamelCase_ : Tuple = find_extremes(_lowercase , _lowercase , _lowercase )
for extremes in extremes_clusters:
for element in extremes:
lowerCamelCase_ : str = element
lowerCamelCase_ : Dict = duplicate_indices - set(extreme_dict.keys() )
lowerCamelCase_ : Tuple = dataset.filter(lambda _lowercase , _lowercase : idx not in remove_indices , with_indices=_lowercase )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
lowerCamelCase_ : List[str] = element['''base_index'''] in extreme_dict
if element["is_extreme"]:
lowerCamelCase_ : Dict = extreme_dict[element['''base_index''']]['''copies''']
print(F"""Original dataset size: {len(_lowercase )}""" )
print(F"""Number of duplicate clusters: {len(_lowercase )}""" )
print(F"""Files in duplicate cluster: {len(_lowercase )}""" )
print(F"""Unique files in duplicate cluster: {len(_lowercase )}""" )
print(F"""Filtered dataset size: {len(_lowercase )}""" )
return ds_filter, duplicate_clusters
| 357
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
__lowercase : int = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Any = ['''BartphoTokenizer''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
__lowercase : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 357
| 1
|
from __future__ import annotations
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :Union[str, Any] ) -> str:
# Checks if the entire collection has been sorted
if len(SCREAMING_SNAKE_CASE ) <= 1 or n <= 1:
return
insert_next(SCREAMING_SNAKE_CASE , n - 1 )
rec_insertion_sort(SCREAMING_SNAKE_CASE , n - 1 )
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :List[Any] , SCREAMING_SNAKE_CASE :Optional[Any] ) -> Optional[Any]:
# Checks order between adjacent elements
if index >= len(SCREAMING_SNAKE_CASE ) or collection[index - 1] <= collection[index]:
return
# Swaps adjacent elements since they are not in ascending order
__lowerCAmelCase , __lowerCAmelCase : Any = (
collection[index],
collection[index - 1],
)
insert_next(SCREAMING_SNAKE_CASE , index + 1 )
if __name__ == "__main__":
_UpperCAmelCase = input('Enter integers separated by spaces: ')
_UpperCAmelCase = [int(num) for num in numbers.split()]
rec_insertion_sort(number_list, len(number_list))
print(number_list)
| 504
|
from __future__ import annotations
def SCREAMING_SNAKE_CASE ( snake_case , snake_case ) -> list[tuple[int, int]]:
__lowercase , __lowercase = position
__lowercase = [
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
__lowercase = []
for position in positions:
__lowercase , __lowercase = position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(snake_case )
return permissible_positions
def SCREAMING_SNAKE_CASE ( snake_case ) -> bool:
return not any(elem == 0 for row in board for elem in row )
def SCREAMING_SNAKE_CASE ( snake_case , snake_case , snake_case ) -> bool:
if is_complete(snake_case ):
return True
for position in get_valid_pos(snake_case , len(snake_case ) ):
__lowercase , __lowercase = position
if board[y][x] == 0:
__lowercase = curr + 1
if open_knight_tour_helper(snake_case , snake_case , curr + 1 ):
return True
__lowercase = 0
return False
def SCREAMING_SNAKE_CASE ( snake_case ) -> list[list[int]]:
__lowercase = [[0 for i in range(snake_case )] for j in range(snake_case )]
for i in range(snake_case ):
for j in range(snake_case ):
__lowercase = 1
if open_knight_tour_helper(snake_case , (i, j) , 1 ):
return board
__lowercase = 0
__lowercase = F"Open Kight Tour cannot be performed on a board of size {n}"
raise ValueError(snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 375
| 0
|
"""simple docstring"""
from ... import PretrainedConfig
UpperCamelCase_ : List[Any] = {
"""sijunhe/nezha-cn-base""": """https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json""",
}
class _lowercase ( lowerCAmelCase ):
_a : Any = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP
_a : str = '''nezha'''
def __init__( self : Any , a : Dict=2_1_1_2_8 , a : int=7_6_8 , a : Optional[Any]=1_2 , a : Optional[int]=1_2 , a : Optional[Any]=3_0_7_2 , a : List[str]="gelu" , a : Tuple=0.1 , a : List[str]=0.1 , a : str=5_1_2 , a : Optional[int]=6_4 , a : List[Any]=2 , a : Optional[Any]=0.0_2 , a : str=1e-12 , a : str=0.1 , a : Union[str, Any]=0 , a : Union[str, Any]=2 , a : List[Any]=3 , a : int=True , **a : List[Any] , ):
"""simple docstring"""
super().__init__(pad_token_id=a , bos_token_id=a , eos_token_id=a , **a )
__snake_case : Optional[Any] =vocab_size
__snake_case : Optional[int] =hidden_size
__snake_case : Optional[int] =num_hidden_layers
__snake_case : List[Any] =num_attention_heads
__snake_case : Optional[Any] =hidden_act
__snake_case : Tuple =intermediate_size
__snake_case : Optional[Any] =hidden_dropout_prob
__snake_case : List[Any] =attention_probs_dropout_prob
__snake_case : List[str] =max_position_embeddings
__snake_case : Optional[Any] =max_relative_position
__snake_case : Dict =type_vocab_size
__snake_case : int =initializer_range
__snake_case : Optional[Any] =layer_norm_eps
__snake_case : str =classifier_dropout
__snake_case : Union[str, Any] =use_cache
| 497
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCamelCase_ : Optional[Any] = logging.get_logger(__name__)
UpperCamelCase_ : Union[str, Any] = {"""vocab_file""": """sentencepiece.bpe.model"""}
UpperCamelCase_ : Optional[Any] = {
"""vocab_file""": {
"""moussaKam/mbarthez""": """https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model""",
"""moussaKam/barthez""": """https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model""",
"""moussaKam/barthez-orangesum-title""": (
"""https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model"""
),
},
}
UpperCamelCase_ : int = {
"""moussaKam/mbarthez""": 1024,
"""moussaKam/barthez""": 1024,
"""moussaKam/barthez-orangesum-title""": 1024,
}
UpperCamelCase_ : Optional[int] = """▁"""
class _lowercase ( lowerCAmelCase ):
_a : int = VOCAB_FILES_NAMES
_a : int = PRETRAINED_VOCAB_FILES_MAP
_a : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a : Optional[int] = ['''input_ids''', '''attention_mask''']
def __init__( self : str , a : Optional[Any] , a : Optional[Any]="<s>" , a : Dict="</s>" , a : Any="</s>" , a : Optional[int]="<s>" , a : Optional[Any]="<unk>" , a : int="<pad>" , a : Tuple="<mask>" , a : Optional[Dict[str, Any]] = None , **a : List[Any] , ):
"""simple docstring"""
__snake_case : Dict =AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else mask_token
__snake_case : List[str] ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=a , eos_token=a , unk_token=a , sep_token=a , cls_token=a , pad_token=a , mask_token=a , sp_model_kwargs=self.sp_model_kwargs , **a , )
__snake_case : Optional[int] =vocab_file
__snake_case : Dict =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(a ) )
__snake_case : Optional[Any] ={'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
__snake_case : Optional[int] =len(self.sp_model ) - 1
__snake_case : Union[str, Any] ={v: k for k, v in self.fairseq_tokens_to_ids.items()}
def _UpperCamelCase ( self : Tuple , a : List[int] , a : Optional[List[int]] = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__snake_case : Dict =[self.cls_token_id]
__snake_case : List[str] =[self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _UpperCamelCase ( self : str , a : List[int] , a : Optional[List[int]] = None , a : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a , token_ids_a=a , already_has_special_tokens=a )
if token_ids_a is None:
return [1] + ([0] * len(a )) + [1]
return [1] + ([0] * len(a )) + [1, 1] + ([0] * len(a )) + [1]
def _UpperCamelCase ( self : Dict , a : List[int] , a : Optional[List[int]] = None ):
"""simple docstring"""
__snake_case : Optional[int] =[self.sep_token_id]
__snake_case : int =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def _UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
return len(self.sp_model )
def _UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
__snake_case : List[Any] ={self.convert_ids_to_tokens(a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _UpperCamelCase ( self : str , a : str ):
"""simple docstring"""
return self.sp_model.encode(a , out_type=a )
def _UpperCamelCase ( self : int , a : Union[str, Any] ):
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
__snake_case : List[Any] =self.sp_model.PieceToId(a )
return spm_id if spm_id else self.unk_token_id
def _UpperCamelCase ( self : List[str] , a : Dict ):
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(a )
def _UpperCamelCase ( self : Optional[int] , a : int ):
"""simple docstring"""
__snake_case : int =[]
__snake_case : Optional[int] =''''''
__snake_case : Optional[int] =False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(a ) + token
__snake_case : str =True
__snake_case : int =[]
else:
current_sub_tokens.append(a )
__snake_case : Tuple =False
out_string += self.sp_model.decode(a )
return out_string.strip()
def __getstate__( self : Optional[int] ):
"""simple docstring"""
__snake_case : List[Any] =self.__dict__.copy()
__snake_case : Optional[Any] =None
return state
def __setstate__( self : Optional[int] , a : Optional[Any] ):
"""simple docstring"""
__snake_case : Any =d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
__snake_case : Tuple ={}
__snake_case : int =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _UpperCamelCase ( self : Optional[Any] , a : str , a : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(a ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__snake_case : int =os.path.join(
a , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , a )
elif not os.path.isfile(self.vocab_file ):
with open(a , '''wb''' ) as fi:
__snake_case : List[Any] =self.sp_model.serialized_model_proto()
fi.write(a )
return (out_vocab_file,)
| 497
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
A = {
"configuration_xlm": ["XLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLMConfig", "XLMOnnxConfig"],
"tokenization_xlm": ["XLMTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
"XLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"XLMForMultipleChoice",
"XLMForQuestionAnswering",
"XLMForQuestionAnsweringSimple",
"XLMForSequenceClassification",
"XLMForTokenClassification",
"XLMModel",
"XLMPreTrainedModel",
"XLMWithLMHeadModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
"TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXLMForMultipleChoice",
"TFXLMForQuestionAnsweringSimple",
"TFXLMForSequenceClassification",
"TFXLMForTokenClassification",
"TFXLMMainLayer",
"TFXLMModel",
"TFXLMPreTrainedModel",
"TFXLMWithLMHeadModel",
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 475
|
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class _lowerCAmelCase :
def __init__( self , a_ , a_=13 , a_=7 , a_=True , a_=True , a_=True , a_=99 , a_=32 , a_=5 , a_=4 , a_=37 , a_="gelu" , a_=0.1 , a_=0.1 , a_=512 , a_=16 , a_=2 , a_=0.02 , a_=3 , a_=4 , a_=None , ) -> List[str]:
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_token_type_ids
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = num_labels
_UpperCAmelCase = num_choices
_UpperCAmelCase = scope
_UpperCAmelCase = self.vocab_size - 1
def _a ( self ) -> Union[str, Any]:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase = None
if self.use_token_type_ids:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
_UpperCAmelCase = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
_UpperCAmelCase = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def _a ( self , a_ , a_ , a_ , a_ , *a_ ) -> Optional[int]:
_UpperCAmelCase = OpenAIGPTModel(config=a_ )
model.to(a_ )
model.eval()
_UpperCAmelCase = model(a_ , token_type_ids=a_ , head_mask=a_ )
_UpperCAmelCase = model(a_ , token_type_ids=a_ )
_UpperCAmelCase = model(a_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self , a_ , a_ , a_ , a_ , *a_ ) -> List[Any]:
_UpperCAmelCase = OpenAIGPTLMHeadModel(a_ )
model.to(a_ )
model.eval()
_UpperCAmelCase = model(a_ , token_type_ids=a_ , labels=a_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self , a_ , a_ , a_ , a_ , *a_ ) -> Optional[Any]:
_UpperCAmelCase = OpenAIGPTDoubleHeadsModel(a_ )
model.to(a_ )
model.eval()
_UpperCAmelCase = model(a_ , token_type_ids=a_ , labels=a_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self , a_ , a_ , a_ , a_ , *a_ ) -> Dict:
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = OpenAIGPTForSequenceClassification(a_ )
model.to(a_ )
model.eval()
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = model(a_ , token_type_ids=a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a ( self ) -> List[str]:
_UpperCAmelCase = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = config_and_inputs
_UpperCAmelCase = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"head_mask": head_mask,
}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( lowerCamelCase , lowerCamelCase , lowerCamelCase , unittest.TestCase ):
lowercase_ : Any = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
lowercase_ : Optional[Any] = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
lowercase_ : Union[str, Any] = (
{
'''feature-extraction''': OpenAIGPTModel,
'''text-classification''': OpenAIGPTForSequenceClassification,
'''text-generation''': OpenAIGPTLMHeadModel,
'''zero-shot''': OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def _a ( self , a_ , a_ , a_ , a_ , a_ ) -> Any:
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def _a ( self , a_ , a_ , a_=False ) -> Optional[int]:
_UpperCAmelCase = super()._prepare_for_class(a_ , a_ , return_labels=a_ )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
_UpperCAmelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=a_ , )
_UpperCAmelCase = inputs_dict["labels"]
_UpperCAmelCase = inputs_dict["labels"]
_UpperCAmelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=a_ , )
_UpperCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a_ )
return inputs_dict
def _a ( self ) -> Optional[int]:
_UpperCAmelCase = OpenAIGPTModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=a_ , n_embd=37 )
def _a ( self ) -> Union[str, Any]:
self.config_tester.run_common_tests()
def _a ( self ) -> Any:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*a_ )
def _a ( self ) -> Tuple:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*a_ )
def _a ( self ) -> List[Any]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*a_ )
def _a ( self ) -> List[str]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*a_ )
@slow
def _a ( self ) -> int:
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = OpenAIGPTModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
@slow
def _a ( self ) -> Any:
_UpperCAmelCase = OpenAIGPTLMHeadModel.from_pretrained("openai-gpt" )
model.to(a_ )
_UpperCAmelCase = torch.tensor([[481, 4735, 544]] , dtype=torch.long , device=a_ ) # the president is
_UpperCAmelCase = [
481,
4735,
544,
246,
963,
870,
762,
239,
244,
40477,
244,
249,
719,
881,
487,
544,
240,
244,
603,
481,
] # the president is a very good man. " \n " i\'m sure he is, " said the
_UpperCAmelCase = model.generate(a_ , do_sample=a_ )
self.assertListEqual(output_ids[0].tolist() , a_ )
| 657
| 0
|
"""simple docstring"""
from __future__ import annotations
def lowercase__ ( lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Optional[int] = None ):
'''simple docstring'''
a__ : List[str] = word_bank or []
# create a table
a__ : int = len(__lowerCAmelCase ) + 1
a__ : list[list[list[str]]] = []
for _ in range(__lowerCAmelCase ):
table.append([] )
# seed value
a__ : Union[str, Any] = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(__lowerCAmelCase ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(__lowerCAmelCase )] == word:
a__ : list[list[str]] = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(__lowerCAmelCase )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(__lowerCAmelCase )]:
combination.reverse()
return table[len(__lowerCAmelCase )]
if __name__ == "__main__":
print(all_construct('''jwajalapa''', ['''jwa''', '''j''', '''w''', '''a''', '''la''', '''lapa''']))
print(all_construct('''rajamati''', ['''s''', '''raj''', '''amat''', '''raja''', '''ma''', '''i''', '''t''']))
print(
all_construct(
'''hexagonosaurus''',
['''h''', '''ex''', '''hex''', '''ag''', '''ago''', '''ru''', '''auru''', '''rus''', '''go''', '''no''', '''o''', '''s'''],
)
)
| 712
|
"""simple docstring"""
import warnings
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/config.json''',
# See all BART models at https://huggingface.co/models?filter=bart
}
class __UpperCAmelCase ( _UpperCamelCase ):
__lowerCamelCase : str = "bart"
__lowerCamelCase : Dict = ["past_key_values"]
__lowerCamelCase : List[str] = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self : Union[str, Any] , a_ : Union[str, Any]=5_02_65 , a_ : List[str]=10_24 , a_ : str=12 , a_ : Union[str, Any]=40_96 , a_ : Tuple=16 , a_ : List[str]=12 , a_ : int=40_96 , a_ : Tuple=16 , a_ : int=0.0 , a_ : Optional[int]=0.0 , a_ : Dict="gelu" , a_ : Optional[int]=10_24 , a_ : Tuple=0.1 , a_ : str=0.0 , a_ : str=0.0 , a_ : Optional[Any]=0.02 , a_ : Any=0.0 , a_ : int=False , a_ : Dict=True , a_ : List[Any]=3 , a_ : Tuple=1 , a_ : Optional[Any]=0 , a_ : Any=2 , a_ : List[Any]=True , a_ : Dict=2 , a_ : List[str]=2 , **a_ : Dict , ) -> Dict:
'''simple docstring'''
a__ : List[Any] = vocab_size
a__ : Dict = max_position_embeddings
a__ : Optional[Any] = d_model
a__ : Optional[Any] = encoder_ffn_dim
a__ : Union[str, Any] = encoder_layers
a__ : Union[str, Any] = encoder_attention_heads
a__ : Tuple = decoder_ffn_dim
a__ : Union[str, Any] = decoder_layers
a__ : Union[str, Any] = decoder_attention_heads
a__ : Optional[Any] = dropout
a__ : str = attention_dropout
a__ : Dict = activation_dropout
a__ : List[Any] = activation_function
a__ : Dict = init_std
a__ : Dict = encoder_layerdrop
a__ : List[Any] = decoder_layerdrop
a__ : List[Any] = classifier_dropout
a__ : Union[str, Any] = use_cache
a__ : Any = encoder_layers
a__ : str = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
num_labels=a_ , pad_token_id=a_ , bos_token_id=a_ , eos_token_id=a_ , is_encoder_decoder=a_ , decoder_start_token_id=a_ , forced_eos_token_id=a_ , **a_ , )
# ensure backward compatibility for BART CNN models
if self.forced_bos_token_id is None and kwargs.get("force_bos_token_to_be_generated" , a_ ):
a__ : List[Any] = self.bos_token_id
warnings.warn(
F"Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. "
"The config can simply be saved and uploaded again to be fixed." )
class __UpperCAmelCase ( _UpperCamelCase ):
@property
def UpperCAmelCase ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
a__ : Optional[Any] = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
a__ : Optional[int] = {0: "batch"}
a__ : Dict = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
a__ : Dict = {0: "batch", 1: "decoder_sequence"}
a__ : Union[str, Any] = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(a_ , direction="inputs" )
elif self.task == "causal-lm":
# TODO: figure this case out.
a__ : int = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
a__ , a__ : Any = self.num_layers
for i in range(a_ ):
a__ : Tuple = {0: "batch", 2: "past_sequence + sequence"}
a__ : Optional[int] = {0: "batch", 2: "past_sequence + sequence"}
else:
a__ : List[Any] = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}),
("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}),
] )
return common_inputs
@property
def UpperCAmelCase ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
a__ : int = super().outputs
else:
a__ : Tuple = super(a_ , self ).outputs
if self.use_past:
a__ , a__ : List[str] = self.num_layers
for i in range(a_ ):
a__ : List[Any] = {0: "batch", 2: "past_sequence + sequence"}
a__ : List[str] = {0: "batch", 2: "past_sequence + sequence"}
return common_outputs
def UpperCAmelCase ( self : Dict , a_ : PreTrainedTokenizer , a_ : int = -1 , a_ : int = -1 , a_ : bool = False , a_ : Optional[TensorType] = None , ) -> Mapping[str, Any]:
'''simple docstring'''
a__ : Optional[Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
a_ , a_ , a_ , a_ , a_ )
# Generate decoder inputs
a__ : Optional[Any] = seq_length if not self.use_past else 1
a__ : Tuple = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
a_ , a_ , a_ , a_ , a_ )
a__ : Union[str, Any] = {F"decoder_{name}": tensor for name, tensor in decoder_inputs.items()}
a__ : str = dict(**a_ , **a_ )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
a__ , a__ : Union[str, Any] = common_inputs["input_ids"].shape
a__ : List[Any] = common_inputs["decoder_input_ids"].shape[1]
a__ , a__ : Tuple = self.num_attention_heads
a__ : List[Any] = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
a__ : List[str] = decoder_seq_length + 3
a__ : Tuple = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
a__ : List[Any] = torch.cat(
[common_inputs["decoder_attention_mask"], torch.ones(a_ , a_ )] , dim=1 )
a__ : Tuple = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
a__ , a__ : Any = self.num_layers
a__ : Dict = min(a_ , a_ )
a__ : Optional[int] = max(a_ , a_ ) - min_num_layers
a__ : str = "encoder" if num_encoder_layers > num_decoder_layers else "decoder"
for _ in range(a_ ):
common_inputs["past_key_values"].append(
(
torch.zeros(a_ ),
torch.zeros(a_ ),
torch.zeros(a_ ),
torch.zeros(a_ ),
) )
# TODO: test this.
a__ : List[Any] = encoder_shape if remaining_side_name == "encoder" else decoder_shape
for _ in range(a_ , a_ ):
common_inputs["past_key_values"].append((torch.zeros(a_ ), torch.zeros(a_ )) )
return common_inputs
def UpperCAmelCase ( self : Optional[Any] , a_ : PreTrainedTokenizer , a_ : int = -1 , a_ : int = -1 , a_ : bool = False , a_ : Optional[TensorType] = None , ) -> Mapping[str, Any]:
'''simple docstring'''
a__ : List[str] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
a_ , a_ , a_ , a_ , a_ )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
a__ , a__ : List[str] = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
a__ : Any = seqlen + 2
a__ , a__ : Any = self.num_layers
a__ , a__ : List[Any] = self.num_attention_heads
a__ : Optional[int] = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
a__ : str = common_inputs["attention_mask"].dtype
a__ : List[str] = torch.cat(
[common_inputs["attention_mask"], torch.ones(a_ , a_ , dtype=a_ )] , dim=1 )
a__ : Optional[Any] = [
(torch.zeros(a_ ), torch.zeros(a_ )) for _ in range(a_ )
]
return common_inputs
def UpperCAmelCase ( self : List[Any] , a_ : PreTrainedTokenizer , a_ : int = -1 , a_ : int = -1 , a_ : bool = False , a_ : Optional[TensorType] = None , ) -> Mapping[str, Any]:
'''simple docstring'''
a__ : Any = compute_effective_axis_dimension(
a_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
a__ : Any = tokenizer.num_special_tokens_to_add(a_ )
a__ : Dict = compute_effective_axis_dimension(
a_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=a_ )
# Generate dummy inputs according to compute batch and sequence
a__ : int = [" ".join([tokenizer.unk_token] ) * seq_length] * batch_size
a__ : Any = dict(tokenizer(a_ , return_tensors=a_ ) )
return common_inputs
def UpperCAmelCase ( self : List[Any] , a_ : PreTrainedTokenizer , a_ : int = -1 , a_ : int = -1 , a_ : bool = False , a_ : Optional[TensorType] = None , ) -> Mapping[str, Any]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
a__ : List[Any] = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
a_ , batch_size=a_ , seq_length=a_ , is_pair=a_ , framework=a_ )
elif self.task == "causal-lm":
a__ : Optional[int] = self._generate_dummy_inputs_for_causal_lm(
a_ , batch_size=a_ , seq_length=a_ , is_pair=a_ , framework=a_ )
else:
a__ : int = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
a_ , batch_size=a_ , seq_length=a_ , is_pair=a_ , framework=a_ )
return common_inputs
def UpperCAmelCase ( self : Optional[Any] , a_ : int , a_ : Optional[Any] , a_ : Union[str, Any] , a_ : List[Any] ) -> str:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
a__ : int = super()._flatten_past_key_values_(a_ , a_ , a_ , a_ )
else:
a__ : Union[str, Any] = super(a_ , self )._flatten_past_key_values_(
a_ , a_ , a_ , a_ )
| 251
| 0
|
'''simple docstring'''
from __future__ import annotations
from decimal import Decimal
from numpy import array
def _lowerCamelCase (__lowerCamelCase : list[list[float]] ) -> list[list[float]]:
a__ = Decimal
# Check if the provided matrix has 2 rows and 2 columns
# since this implementation only works for 2x2 matrices
if len(__lowerCamelCase ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2:
# Calculate the determinant of the matrix
a__ = float(
d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) )
if determinant == 0:
raise ValueError("This matrix has no inverse." )
# Creates a copy of the matrix with swapped positions of the elements
a__ = [[0.0, 0.0], [0.0, 0.0]]
a__ , a__ = matrix[1][1], matrix[0][0]
a__ , a__ = -matrix[1][0], -matrix[0][1]
# Calculate the inverse of the matrix
return [
[(float(d(__lowerCamelCase ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix
]
elif (
len(__lowerCamelCase ) == 3
and len(matrix[0] ) == 3
and len(matrix[1] ) == 3
and len(matrix[2] ) == 3
):
# Calculate the determinant of the matrix using Sarrus rule
a__ = float(
(
(d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] ))
+ (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] ))
+ (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] ))
)
- (
(d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] ))
+ (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] ))
+ (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] ))
) )
if determinant == 0:
raise ValueError("This matrix has no inverse." )
# Creating cofactor matrix
a__ = [
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
]
a__ = (d(matrix[1][1] ) * d(matrix[2][2] )) - (
d(matrix[1][2] ) * d(matrix[2][1] )
)
a__ = -(
(d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] ))
)
a__ = (d(matrix[1][0] ) * d(matrix[2][1] )) - (
d(matrix[1][1] ) * d(matrix[2][0] )
)
a__ = -(
(d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] ))
)
a__ = (d(matrix[0][0] ) * d(matrix[2][2] )) - (
d(matrix[0][2] ) * d(matrix[2][0] )
)
a__ = -(
(d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] ))
)
a__ = (d(matrix[0][1] ) * d(matrix[1][2] )) - (
d(matrix[0][2] ) * d(matrix[1][1] )
)
a__ = -(
(d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] ))
)
a__ = (d(matrix[0][0] ) * d(matrix[1][1] )) - (
d(matrix[0][1] ) * d(matrix[1][0] )
)
# Transpose the cofactor matrix (Adjoint matrix)
a__ = array(__lowerCamelCase )
for i in range(3 ):
for j in range(3 ):
a__ = cofactor_matrix[j][i]
# Inverse of the matrix using the formula (1/determinant) * adjoint matrix
a__ = array(__lowerCamelCase )
for i in range(3 ):
for j in range(3 ):
inverse_matrix[i][j] /= d(__lowerCamelCase )
# Calculate the inverse of the matrix
return [[float(d(__lowerCamelCase ) ) or 0.0 for n in row] for row in inverse_matrix]
raise ValueError("Please provide a matrix of size 2x2 or 3x3." )
| 489
|
'''simple docstring'''
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCamelCase__ ( __lowerCAmelCase ):
lowerCAmelCase__ : Optional[int] = (UnCLIPScheduler,)
def __a ( self : Any , **lowerCamelCase : int ):
'''simple docstring'''
a__ = {
"num_train_timesteps": 1_0_0_0,
"variance_type": "fixed_small_log",
"clip_sample": True,
"clip_sample_range": 1.0,
"prediction_type": "epsilon",
}
config.update(**lowerCamelCase )
return config
def __a ( self : Dict ):
'''simple docstring'''
for timesteps in [1, 5, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=lowerCamelCase )
def __a ( self : List[str] ):
'''simple docstring'''
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=lowerCamelCase )
def __a ( self : Any ):
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=lowerCamelCase )
def __a ( self : List[str] ):
'''simple docstring'''
for clip_sample_range in [1, 5, 1_0, 2_0]:
self.check_over_configs(clip_sample_range=lowerCamelCase )
def __a ( self : Optional[Any] ):
'''simple docstring'''
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=lowerCamelCase )
def __a ( self : str ):
'''simple docstring'''
for time_step in [0, 5_0_0, 9_9_9]:
for prev_timestep in [None, 5, 1_0_0, 2_5_0, 5_0_0, 7_5_0]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=lowerCamelCase , prev_timestep=lowerCamelCase )
def __a ( self : int ):
'''simple docstring'''
a__ = self.scheduler_classes[0]
a__ = self.get_scheduler_config(variance_type="fixed_small_log" )
a__ = scheduler_class(**lowerCamelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0000e-10 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_8_7 ) - 0.0549625 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_9_9 ) - 0.9994987 ) ) < 1e-5
def __a ( self : List[Any] ):
'''simple docstring'''
a__ = self.scheduler_classes[0]
a__ = self.get_scheduler_config(variance_type="learned_range" )
a__ = scheduler_class(**lowerCamelCase )
a__ = 0.5
assert scheduler._get_variance(1 , predicted_variance=lowerCamelCase ) - -10.1712790 < 1e-5
assert scheduler._get_variance(4_8_7 , predicted_variance=lowerCamelCase ) - -5.7998052 < 1e-5
assert scheduler._get_variance(9_9_9 , predicted_variance=lowerCamelCase ) - -0.0010011 < 1e-5
def __a ( self : List[Any] ):
'''simple docstring'''
a__ = self.scheduler_classes[0]
a__ = self.get_scheduler_config()
a__ = scheduler_class(**lowerCamelCase )
a__ = scheduler.timesteps
a__ = self.dummy_model()
a__ = self.dummy_sample_deter
a__ = torch.manual_seed(0 )
for i, t in enumerate(lowerCamelCase ):
# 1. predict noise residual
a__ = model(lowerCamelCase , lowerCamelCase )
# 2. predict previous mean of sample x_t-1
a__ = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , generator=lowerCamelCase ).prev_sample
a__ = pred_prev_sample
a__ = torch.sum(torch.abs(lowerCamelCase ) )
a__ = torch.mean(torch.abs(lowerCamelCase ) )
assert abs(result_sum.item() - 252.2682495 ) < 1e-2
assert abs(result_mean.item() - 0.3284743 ) < 1e-3
def __a ( self : Union[str, Any] ):
'''simple docstring'''
a__ = self.scheduler_classes[0]
a__ = self.get_scheduler_config()
a__ = scheduler_class(**lowerCamelCase )
scheduler.set_timesteps(2_5 )
a__ = scheduler.timesteps
a__ = self.dummy_model()
a__ = self.dummy_sample_deter
a__ = torch.manual_seed(0 )
for i, t in enumerate(lowerCamelCase ):
# 1. predict noise residual
a__ = model(lowerCamelCase , lowerCamelCase )
if i + 1 == timesteps.shape[0]:
a__ = None
else:
a__ = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
a__ = scheduler.step(
lowerCamelCase , lowerCamelCase , lowerCamelCase , prev_timestep=lowerCamelCase , generator=lowerCamelCase ).prev_sample
a__ = pred_prev_sample
a__ = torch.sum(torch.abs(lowerCamelCase ) )
a__ = torch.mean(torch.abs(lowerCamelCase ) )
assert abs(result_sum.item() - 258.2044983 ) < 1e-2
assert abs(result_mean.item() - 0.3362038 ) < 1e-3
def __a ( self : List[Any] ):
'''simple docstring'''
pass
def __a ( self : Dict ):
'''simple docstring'''
pass
| 489
| 1
|
'''simple docstring'''
import argparse
import re
from pathlib import Path
import requests
import torch
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from transformers import (
EfficientFormerConfig,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerImageProcessor,
)
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def _a ( __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
snake_case__ : int = old_name
if "patch_embed" in old_name:
snake_case__ : Optional[int] = old_name.split('''.''' )
if layer == "0":
snake_case__ : List[str] = old_name.replace('''0''' , '''convolution1''' )
elif layer == "1":
snake_case__ : Tuple = old_name.replace('''1''' , '''batchnorm_before''' )
elif layer == "3":
snake_case__ : Dict = old_name.replace('''3''' , '''convolution2''' )
else:
snake_case__ : str = old_name.replace('''4''' , '''batchnorm_after''' )
if "network" in old_name and re.search(r'''\d\.\d''' , __lowerCAmelCase ):
snake_case__ : Tuple = r'''\b\d{2}\b'''
if bool(re.search(__lowerCAmelCase , __lowerCAmelCase ) ):
snake_case__ : int = re.search(r'''\d\.\d\d.''' , __lowerCAmelCase ).group()
else:
snake_case__ : List[str] = re.search(r'''\d\.\d.''' , __lowerCAmelCase ).group()
if int(match[0] ) < 6:
snake_case__ : List[Any] = old_name.replace(__lowerCAmelCase , '''''' )
snake_case__ : Dict = trimmed_name.replace('''network''' , match[0] + '''.meta4D_layers.blocks.''' + match[2:-1] )
snake_case__ : List[str] = '''intermediate_stages.''' + trimmed_name
else:
snake_case__ : Optional[int] = old_name.replace(__lowerCAmelCase , '''''' )
if int(match[2] ) < num_meta4D_last_stage:
snake_case__ : Any = trimmed_name.replace('''network''' , '''meta4D_layers.blocks.''' + match[2] )
else:
snake_case__ : Tuple = str(int(match[2] ) - num_meta4D_last_stage )
snake_case__ : Union[str, Any] = trimmed_name.replace('''network''' , '''meta3D_layers.blocks.''' + layer_index )
if "norm1" in old_name:
snake_case__ : int = trimmed_name.replace('''norm1''' , '''layernorm1''' )
elif "norm2" in old_name:
snake_case__ : Union[str, Any] = trimmed_name.replace('''norm2''' , '''layernorm2''' )
elif "fc1" in old_name:
snake_case__ : Dict = trimmed_name.replace('''fc1''' , '''linear_in''' )
elif "fc2" in old_name:
snake_case__ : int = trimmed_name.replace('''fc2''' , '''linear_out''' )
snake_case__ : Dict = '''last_stage.''' + trimmed_name
elif "network" in old_name and re.search(r'''.\d.''' , __lowerCAmelCase ):
snake_case__ : Optional[Any] = old_name.replace('''network''' , '''intermediate_stages''' )
if "fc" in new_name:
snake_case__ : Optional[int] = new_name.replace('''fc''' , '''convolution''' )
elif ("norm1" in new_name) and ("layernorm1" not in new_name):
snake_case__ : Optional[Any] = new_name.replace('''norm1''' , '''batchnorm_before''' )
elif ("norm2" in new_name) and ("layernorm2" not in new_name):
snake_case__ : Tuple = new_name.replace('''norm2''' , '''batchnorm_after''' )
if "proj" in new_name:
snake_case__ : Optional[Any] = new_name.replace('''proj''' , '''projection''' )
if "dist_head" in new_name:
snake_case__ : Tuple = new_name.replace('''dist_head''' , '''distillation_classifier''' )
elif "head" in new_name:
snake_case__ : Any = new_name.replace('''head''' , '''classifier''' )
elif "patch_embed" in new_name:
snake_case__ : Optional[int] = '''efficientformer.''' + new_name
elif new_name == "norm.weight" or new_name == "norm.bias":
snake_case__ : List[Any] = new_name.replace('''norm''' , '''layernorm''' )
snake_case__ : str = '''efficientformer.''' + new_name
else:
snake_case__ : int = '''efficientformer.encoder.''' + new_name
return new_name
def _a ( __lowerCAmelCase : Tuple , __lowerCAmelCase : int ):
"""simple docstring"""
for key in checkpoint.copy().keys():
snake_case__ : Optional[Any] = checkpoint.pop(__lowerCAmelCase )
snake_case__ : int = val
return checkpoint
def _a ( ):
"""simple docstring"""
snake_case__ : Tuple = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
snake_case__ : List[Any] = Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw )
return image
def _a ( __lowerCAmelCase : Path , __lowerCAmelCase : Path , __lowerCAmelCase : Path , __lowerCAmelCase : bool ):
"""simple docstring"""
snake_case__ : Union[str, Any] = torch.load(__lowerCAmelCase , map_location='''cpu''' )['''model''']
snake_case__ : Optional[int] = EfficientFormerConfig.from_json_file(__lowerCAmelCase )
snake_case__ : List[str] = EfficientFormerForImageClassificationWithTeacher(__lowerCAmelCase )
snake_case__ : Union[str, Any] = '''_'''.join(checkpoint_path.split('''/''' )[-1].split('''.''' )[0].split('''_''' )[:-1] )
snake_case__ : Any = config.depths[-1] - config.num_metaad_blocks + 1
snake_case__ : Tuple = convert_torch_checkpoint(__lowerCAmelCase , __lowerCAmelCase )
model.load_state_dict(__lowerCAmelCase )
model.eval()
snake_case__ : Union[str, Any] = {
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
# prepare image
snake_case__ : Optional[Any] = prepare_img()
snake_case__ : List[str] = 2_56
snake_case__ : List[Any] = 2_24
snake_case__ : int = EfficientFormerImageProcessor(
size={'''shortest_edge''': image_size} , crop_size={'''height''': crop_size, '''width''': crop_size} , resample=pillow_resamplings['''bicubic'''] , )
snake_case__ : int = processor(images=__lowerCAmelCase , return_tensors='''pt''' ).pixel_values
# original processing pipeline
snake_case__ : Union[str, Any] = Compose(
[
Resize(__lowerCAmelCase , interpolation=pillow_resamplings['''bicubic'''] ),
CenterCrop(__lowerCAmelCase ),
ToTensor(),
Normalize(__lowerCAmelCase , __lowerCAmelCase ),
] )
snake_case__ : List[Any] = image_transforms(__lowerCAmelCase ).unsqueeze(0 )
assert torch.allclose(__lowerCAmelCase , __lowerCAmelCase )
snake_case__ : Optional[int] = model(__lowerCAmelCase )
snake_case__ : List[str] = outputs.logits
snake_case__ : List[str] = (1, 10_00)
if "l1" in model_name:
snake_case__ : List[Any] = torch.Tensor(
[-0.1312, 0.4353, -1.0499, -0.5124, 0.4183, -0.6793, -1.3777, -0.0893, -0.7358, -2.4328] )
assert torch.allclose(logits[0, :10] , __lowerCAmelCase , atol=1e-3 )
assert logits.shape == expected_shape
elif "l3" in model_name:
snake_case__ : Union[str, Any] = torch.Tensor(
[-1.3150, -1.5456, -1.2556, -0.8496, -0.7127, -0.7897, -0.9728, -0.3052, 0.3751, -0.3127] )
assert torch.allclose(logits[0, :10] , __lowerCAmelCase , atol=1e-3 )
assert logits.shape == expected_shape
elif "l7" in model_name:
snake_case__ : Optional[Any] = torch.Tensor(
[-1.0283, -1.4131, -0.5644, -1.3115, -0.5785, -1.2049, -0.7528, 0.1992, -0.3822, -0.0878] )
assert logits.shape == expected_shape
else:
raise ValueError(
F"""Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7""" )
# Save Checkpoints
Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase )
model.save_pretrained(__lowerCAmelCase )
print(F"""Checkpoint successfuly converted. Model saved at {pytorch_dump_path}""" )
processor.save_pretrained(__lowerCAmelCase )
print(F"""Processor successfuly saved at {pytorch_dump_path}""" )
if push_to_hub:
print('''Pushing model to the hub...''' )
model.push_to_hub(
repo_id=F"""Bearnardd/{pytorch_dump_path}""" , commit_message='''Add model''' , use_temp_dir=__lowerCAmelCase , )
processor.push_to_hub(
repo_id=F"""Bearnardd/{pytorch_dump_path}""" , commit_message='''Add image processor''' , use_temp_dir=__lowerCAmelCase , )
if __name__ == "__main__":
lowerCAmelCase__ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--pytorch_model_path""",
default=None,
type=str,
required=True,
help="""Path to EfficientFormer pytorch checkpoint.""",
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The json file for EfficientFormer model config.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""")
parser.add_argument(
"""--no-push_to_hub""",
dest="""push_to_hub""",
action="""store_false""",
help="""Do not push model and image processor to the hub""",
)
parser.set_defaults(push_to_hub=True)
lowerCAmelCase__ : Tuple = parser.parse_args()
convert_efficientformer_checkpoint(
checkpoint_path=args.pytorch_model_path,
efficientformer_config_file=args.config_file,
pytorch_dump_path=args.pytorch_dump_path,
push_to_hub=args.push_to_hub,
)
| 716
|
'''simple docstring'''
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
lowerCAmelCase__ : Dict = argparse.ArgumentParser()
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument(
"""--txt2img_unclip""",
default="""kakaobrain/karlo-v1-alpha""",
type=str,
required=False,
help="""The pretrained txt2img unclip.""",
)
lowerCAmelCase__ : List[Any] = parser.parse_args()
lowerCAmelCase__ : Optional[Any] = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
lowerCAmelCase__ : Optional[int] = CLIPImageProcessor()
lowerCAmelCase__ : str = CLIPVisionModelWithProjection.from_pretrained("""openai/clip-vit-large-patch14""")
lowerCAmelCase__ : Dict = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 502
| 0
|
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionAttendAndExcitePipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_numpy, skip_mps, slow
from diffusers.utils.testing_utils import require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
__lowercase = False
@skip_mps
class lowerCamelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
a__ : Union[str, Any] = StableDiffusionAttendAndExcitePipeline
a__ : str = False
a__ : Dict = TEXT_TO_IMAGE_PARAMS
a__ : str = TEXT_TO_IMAGE_BATCH_PARAMS.union({"""token_indices"""} )
a__ : Optional[int] = TEXT_TO_IMAGE_IMAGE_PARAMS
a__ : Tuple = TEXT_TO_IMAGE_IMAGE_PARAMS
@classmethod
def UpperCamelCase__ ( cls) -> Optional[Any]:
super().setUpClass()
torch.use_deterministic_algorithms(__lowercase)
@classmethod
def UpperCamelCase__ ( cls) -> Tuple:
super().tearDownClass()
torch.use_deterministic_algorithms(__lowercase)
def UpperCamelCase__ ( self) -> Tuple:
torch.manual_seed(0)
__UpperCamelCase :Optional[Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=__lowercase , )
__UpperCamelCase :List[Any] = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='''scaled_linear''' , clip_sample=__lowercase , set_alpha_to_one=__lowercase , )
torch.manual_seed(0)
__UpperCamelCase :str = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0)
__UpperCamelCase :List[str] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act='''gelu''' , projection_dim=512 , )
__UpperCamelCase :Any = CLIPTextModel(__lowercase)
__UpperCamelCase :str = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''')
__UpperCamelCase :Union[str, Any] = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def UpperCamelCase__ ( self , __lowercase , __lowercase=0) -> List[str]:
if str(__lowercase).startswith('''mps'''):
__UpperCamelCase :Tuple = torch.manual_seed(__lowercase)
else:
__UpperCamelCase :Optional[Any] = torch.Generator(device=__lowercase).manual_seed(__lowercase)
__UpperCamelCase :Optional[Any] = {
'''prompt''': '''a cat and a frog''',
'''token_indices''': [2, 5],
'''generator''': generator,
'''num_inference_steps''': 1,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
'''max_iter_to_alter''': 2,
'''thresholds''': {0: 0.7},
}
return inputs
def UpperCamelCase__ ( self) -> str:
__UpperCamelCase :List[Any] = '''cpu'''
__UpperCamelCase :Dict = self.get_dummy_components()
__UpperCamelCase :Any = self.pipeline_class(**__lowercase)
pipe.to(__lowercase)
pipe.set_progress_bar_config(disable=__lowercase)
__UpperCamelCase :str = self.get_dummy_inputs(__lowercase)
__UpperCamelCase :Union[str, Any] = pipe(**__lowercase).images
__UpperCamelCase :Union[str, Any] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 64, 64, 3))
__UpperCamelCase :str = np.array(
[0.63_90_53_64, 0.62_89_73_07, 0.48_59_90_17, 0.5_13_36_24, 0.5_55_00_48, 0.45_76_95_16, 0.50_32_69_73, 0.5_02_31_39, 0.45_38_44_96])
__UpperCamelCase :Any = np.abs(image_slice.flatten() - expected_slice).max()
self.assertLessEqual(__lowercase , 1E-3)
def UpperCamelCase__ ( self) -> List[str]:
super().test_cpu_offload_forward_pass(expected_max_diff=5E-4)
def UpperCamelCase__ ( self) -> Dict:
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2])
def UpperCamelCase__ ( self) -> Optional[Any]:
self._test_inference_batch_single_identical(batch_size=2 , expected_max_diff=7E-4)
def UpperCamelCase__ ( self) -> Dict:
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3)
def UpperCamelCase__ ( self) -> Optional[Any]:
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5E-4)
def UpperCamelCase__ ( self) -> str:
super().test_save_load_local(expected_max_difference=5E-4)
def UpperCamelCase__ ( self) -> Any:
super().test_save_load_optional_components(expected_max_difference=4E-4)
@require_torch_gpu
@slow
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def UpperCamelCase__ ( cls) -> Optional[int]:
super().setUpClass()
torch.use_deterministic_algorithms(__lowercase)
@classmethod
def UpperCamelCase__ ( cls) -> Optional[Any]:
super().tearDownClass()
torch.use_deterministic_algorithms(__lowercase)
def UpperCamelCase__ ( self) -> Any:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self) -> Union[str, Any]:
__UpperCamelCase :Optional[Any] = torch.manual_seed(51)
__UpperCamelCase :List[Any] = StableDiffusionAttendAndExcitePipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , safety_checker=__lowercase , torch_dtype=torch.floataa)
pipe.to('''cuda''')
__UpperCamelCase :Optional[int] = '''a painting of an elephant with glasses'''
__UpperCamelCase :List[str] = [5, 7]
__UpperCamelCase :List[Any] = pipe(
prompt=__lowercase , token_indices=__lowercase , guidance_scale=7.5 , generator=__lowercase , num_inference_steps=5 , max_iter_to_alter=5 , output_type='''numpy''' , ).images[0]
__UpperCamelCase :Dict = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy''')
assert np.abs((expected_image - image).max()) < 5E-1
| 167
|
import logging
import os
import sys
from dataclasses import dataclass, field
from importlib import import_module
from typing import Dict, List, Optional, Tuple
import numpy as np
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch import nn
from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask
import transformers
from transformers import (
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
__lowercase = logging.getLogger(__name__)
@dataclass
class lowerCamelCase_ :
'''simple docstring'''
a__ : str = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
a__ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
a__ : Optional[str] = field(
default="""NER""" , metadata={"""help""": """Task type to fine tune in training (e.g. NER, POS, etc)"""} )
a__ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
a__ : bool = field(default=UpperCAmelCase_ , metadata={"""help""": """Set this flag to use fast tokenization."""} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
a__ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
@dataclass
class lowerCamelCase_ :
'''simple docstring'''
a__ : str = field(
metadata={"""help""": """The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task."""} )
a__ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"""help""": """Path to a file containing all labels. If not specified, CoNLL-2003 labels are used."""} , )
a__ : int = field(
default=1_2_8 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
a__ : bool = field(
default=UpperCAmelCase_ , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def lowerCamelCase ( ):
'''simple docstring'''
__UpperCamelCase :Union[str, Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase :int = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase :Optional[Any] = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
''' --overwrite_output_dir to overcome.''' )
__UpperCamelCase :Union[str, Any] = import_module('''tasks''' )
try:
__UpperCamelCase :int = getattr(SCREAMING_SNAKE_CASE , model_args.task_type )
__UpperCamelCase :TokenClassificationTask = token_classification_task_clazz()
except AttributeError:
raise ValueError(
f"""Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. """
f"""Available tasks classes are: {TokenClassificationTask.__subclasses__()}""" )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , SCREAMING_SNAKE_CASE )
# Set seed
set_seed(training_args.seed )
# Prepare CONLL-2003 task
__UpperCamelCase :Optional[Any] = token_classification_task.get_labels(data_args.labels )
__UpperCamelCase :Dict[int, str] = dict(enumerate(SCREAMING_SNAKE_CASE ) )
__UpperCamelCase :Tuple = len(SCREAMING_SNAKE_CASE )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__UpperCamelCase :Optional[Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=SCREAMING_SNAKE_CASE , idalabel=SCREAMING_SNAKE_CASE , labelaid={label: i for i, label in enumerate(SCREAMING_SNAKE_CASE )} , cache_dir=model_args.cache_dir , )
__UpperCamelCase :Union[str, Any] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , )
__UpperCamelCase :str = AutoModelForTokenClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir , )
# Get datasets
__UpperCamelCase :Dict = (
TokenClassificationDataset(
token_classification_task=SCREAMING_SNAKE_CASE , data_dir=data_args.data_dir , tokenizer=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
__UpperCamelCase :Optional[int] = (
TokenClassificationDataset(
token_classification_task=SCREAMING_SNAKE_CASE , data_dir=data_args.data_dir , tokenizer=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def align_predictions(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Tuple[List[int], List[int]]:
__UpperCamelCase :Optional[int] = np.argmax(SCREAMING_SNAKE_CASE , axis=2 )
__UpperCamelCase , __UpperCamelCase :int = preds.shape
__UpperCamelCase :str = [[] for _ in range(SCREAMING_SNAKE_CASE )]
__UpperCamelCase :int = [[] for _ in range(SCREAMING_SNAKE_CASE )]
for i in range(SCREAMING_SNAKE_CASE ):
for j in range(SCREAMING_SNAKE_CASE ):
if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index:
out_label_list[i].append(label_map[label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
return preds_list, out_label_list
def compute_metrics(SCREAMING_SNAKE_CASE ) -> Dict:
__UpperCamelCase , __UpperCamelCase :Optional[Any] = align_predictions(p.predictions , p.label_ids )
return {
"accuracy_score": accuracy_score(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ),
"precision": precision_score(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ),
"recall": recall_score(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ),
"f1": fa_score(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ),
}
# Data collator
__UpperCamelCase :Tuple = DataCollatorWithPadding(SCREAMING_SNAKE_CASE , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
__UpperCamelCase :Union[str, Any] = Trainer(
model=SCREAMING_SNAKE_CASE , args=SCREAMING_SNAKE_CASE , train_dataset=SCREAMING_SNAKE_CASE , eval_dataset=SCREAMING_SNAKE_CASE , compute_metrics=SCREAMING_SNAKE_CASE , data_collator=SCREAMING_SNAKE_CASE , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_process_zero():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__UpperCamelCase :int = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
__UpperCamelCase :Union[str, Any] = trainer.evaluate()
__UpperCamelCase :List[str] = os.path.join(training_args.output_dir , '''eval_results.txt''' )
if trainer.is_world_process_zero():
with open(SCREAMING_SNAKE_CASE , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(''' %s = %s''' , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
writer.write('''%s = %s\n''' % (key, value) )
results.update(SCREAMING_SNAKE_CASE )
# Predict
if training_args.do_predict:
__UpperCamelCase :Any = TokenClassificationDataset(
token_classification_task=SCREAMING_SNAKE_CASE , data_dir=data_args.data_dir , tokenizer=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase :int = trainer.predict(SCREAMING_SNAKE_CASE )
__UpperCamelCase , __UpperCamelCase :Tuple = align_predictions(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__UpperCamelCase :Tuple = os.path.join(training_args.output_dir , '''test_results.txt''' )
if trainer.is_world_process_zero():
with open(SCREAMING_SNAKE_CASE , '''w''' ) as writer:
for key, value in metrics.items():
logger.info(''' %s = %s''' , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
writer.write('''%s = %s\n''' % (key, value) )
# Save predictions
__UpperCamelCase :List[str] = os.path.join(training_args.output_dir , '''test_predictions.txt''' )
if trainer.is_world_process_zero():
with open(SCREAMING_SNAKE_CASE , '''w''' ) as writer:
with open(os.path.join(data_args.data_dir , '''test.txt''' ) , '''r''' ) as f:
token_classification_task.write_predictions_to_file(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return results
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 167
| 1
|
snake_case_ : Tuple = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(10_00_00)]
def SCREAMING_SNAKE_CASE_( a__):
_SCREAMING_SNAKE_CASE =0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 10_0000]
number //= 10_0000
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
snake_case_ : list[bool | None] = [None] * 10_00_00_00
snake_case_ : List[Any] = True
snake_case_ : List[str] = False
def SCREAMING_SNAKE_CASE_( a__):
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
_SCREAMING_SNAKE_CASE =chain(next_number(a__))
_SCREAMING_SNAKE_CASE =number_chain
while number < 1000_0000:
_SCREAMING_SNAKE_CASE =number_chain
number *= 10
return number_chain
def SCREAMING_SNAKE_CASE_( a__ = 1000_0000):
for i in range(1 ,a__):
if CHAINS[i] is None:
chain(i + 1)
return CHAINS[:number].count(a__)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"""{solution() = }""")
| 701
|
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import DDIMScheduler, KandinskyVaaPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class A__ ( UpperCamelCase__ , unittest.TestCase ):
UpperCAmelCase = KandinskyVaaPipeline
UpperCAmelCase = [
"image_embeds",
"negative_image_embeds",
]
UpperCAmelCase = ["image_embeds", "negative_image_embeds"]
UpperCAmelCase = [
"generator",
"height",
"width",
"latents",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
UpperCAmelCase = False
@property
def __UpperCamelCase ( self : Tuple ) -> str:
"""simple docstring"""
return 32
@property
def __UpperCamelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
return 32
@property
def __UpperCamelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
return self.time_input_dim
@property
def __UpperCamelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
return self.time_input_dim * 4
@property
def __UpperCamelCase ( self : List[str] ) -> Dict:
"""simple docstring"""
return 100
@property
def __UpperCamelCase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE ={
'''in_channels''': 4,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
_SCREAMING_SNAKE_CASE =UNetaDConditionModel(**_a )
return model
@property
def __UpperCamelCase ( self : Tuple ) -> Tuple:
"""simple docstring"""
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __UpperCamelCase ( self : Any ) -> Tuple:
"""simple docstring"""
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE =VQModel(**self.dummy_movq_kwargs )
return model
def __UpperCamelCase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.dummy_unet
_SCREAMING_SNAKE_CASE =self.dummy_movq
_SCREAMING_SNAKE_CASE =DDIMScheduler(
num_train_timesteps=1000 , beta_schedule='''linear''' , beta_start=0.0_00_85 , beta_end=0.0_12 , clip_sample=_a , set_alpha_to_one=_a , steps_offset=1 , prediction_type='''epsilon''' , thresholding=_a , )
_SCREAMING_SNAKE_CASE ={
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def __UpperCamelCase ( self : Optional[int] , _a : Tuple , _a : Tuple=0 ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_a ) ).to(_a )
_SCREAMING_SNAKE_CASE =floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
_a )
if str(_a ).startswith('''mps''' ):
_SCREAMING_SNAKE_CASE =torch.manual_seed(_a )
else:
_SCREAMING_SNAKE_CASE =torch.Generator(device=_a ).manual_seed(_a )
_SCREAMING_SNAKE_CASE ={
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''guidance_scale''': 4.0,
'''num_inference_steps''': 2,
'''output_type''': '''np''',
}
return inputs
def __UpperCamelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE ='''cpu'''
_SCREAMING_SNAKE_CASE =self.get_dummy_components()
_SCREAMING_SNAKE_CASE =self.pipeline_class(**_a )
_SCREAMING_SNAKE_CASE =pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
_SCREAMING_SNAKE_CASE =pipe(**self.get_dummy_inputs(_a ) )
_SCREAMING_SNAKE_CASE =output.images
_SCREAMING_SNAKE_CASE =pipe(
**self.get_dummy_inputs(_a ) , return_dict=_a , )[0]
_SCREAMING_SNAKE_CASE =image[0, -3:, -3:, -1]
_SCREAMING_SNAKE_CASE =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_SCREAMING_SNAKE_CASE =np.array(
[0.6_23_79_76, 1.0, 0.36_44_13_32, 1.0, 0.70_63_96_34, 0.29_87_71_86, 0.85_65_21_25, 0.5_21_68_43, 0.54_45_40_46] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
@slow
@require_torch_gpu
class A__ ( unittest.TestCase ):
def __UpperCamelCase ( self : List[Any] ) -> Dict:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_text2img_cat_fp16.npy''' )
_SCREAMING_SNAKE_CASE =KandinskyVaaPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(_a )
_SCREAMING_SNAKE_CASE =KandinskyVaaPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-decoder''' , torch_dtype=torch.floataa )
_SCREAMING_SNAKE_CASE =pipeline.to(_a )
pipeline.set_progress_bar_config(disable=_a )
_SCREAMING_SNAKE_CASE ='''red cat, 4k photo'''
_SCREAMING_SNAKE_CASE =torch.Generator(device='''cuda''' ).manual_seed(0 )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =pipe_prior(
_a , generator=_a , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
_SCREAMING_SNAKE_CASE =torch.Generator(device='''cuda''' ).manual_seed(0 )
_SCREAMING_SNAKE_CASE =pipeline(
image_embeds=_a , negative_image_embeds=_a , generator=_a , num_inference_steps=100 , output_type='''np''' , )
_SCREAMING_SNAKE_CASE =output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(_a , _a )
| 191
| 0
|
"""simple docstring"""
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
if length <= 0 or not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise ValueError("Length must be a positive integer." )
return [n * (2 * n - 1) for n in range(UpperCamelCase__ )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 657
|
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class _lowerCAmelCase :
def __init__( self , a_ , a_=13 , a_=7 , a_=True , a_=True , a_=True , a_=99 , a_=32 , a_=5 , a_=4 , a_=37 , a_="gelu" , a_=0.1 , a_=0.1 , a_=512 , a_=16 , a_=2 , a_=0.02 , a_=3 , a_=4 , a_=None , ) -> List[str]:
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_token_type_ids
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = num_labels
_UpperCAmelCase = num_choices
_UpperCAmelCase = scope
_UpperCAmelCase = self.vocab_size - 1
def _a ( self ) -> Union[str, Any]:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase = None
if self.use_token_type_ids:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
_UpperCAmelCase = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
_UpperCAmelCase = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def _a ( self , a_ , a_ , a_ , a_ , *a_ ) -> Optional[int]:
_UpperCAmelCase = OpenAIGPTModel(config=a_ )
model.to(a_ )
model.eval()
_UpperCAmelCase = model(a_ , token_type_ids=a_ , head_mask=a_ )
_UpperCAmelCase = model(a_ , token_type_ids=a_ )
_UpperCAmelCase = model(a_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self , a_ , a_ , a_ , a_ , *a_ ) -> List[Any]:
_UpperCAmelCase = OpenAIGPTLMHeadModel(a_ )
model.to(a_ )
model.eval()
_UpperCAmelCase = model(a_ , token_type_ids=a_ , labels=a_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self , a_ , a_ , a_ , a_ , *a_ ) -> Optional[Any]:
_UpperCAmelCase = OpenAIGPTDoubleHeadsModel(a_ )
model.to(a_ )
model.eval()
_UpperCAmelCase = model(a_ , token_type_ids=a_ , labels=a_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self , a_ , a_ , a_ , a_ , *a_ ) -> Dict:
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = OpenAIGPTForSequenceClassification(a_ )
model.to(a_ )
model.eval()
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = model(a_ , token_type_ids=a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a ( self ) -> List[str]:
_UpperCAmelCase = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = config_and_inputs
_UpperCAmelCase = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"head_mask": head_mask,
}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( lowerCamelCase , lowerCamelCase , lowerCamelCase , unittest.TestCase ):
lowercase_ : Any = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
lowercase_ : Optional[Any] = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
lowercase_ : Union[str, Any] = (
{
'''feature-extraction''': OpenAIGPTModel,
'''text-classification''': OpenAIGPTForSequenceClassification,
'''text-generation''': OpenAIGPTLMHeadModel,
'''zero-shot''': OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def _a ( self , a_ , a_ , a_ , a_ , a_ ) -> Any:
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def _a ( self , a_ , a_ , a_=False ) -> Optional[int]:
_UpperCAmelCase = super()._prepare_for_class(a_ , a_ , return_labels=a_ )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
_UpperCAmelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=a_ , )
_UpperCAmelCase = inputs_dict["labels"]
_UpperCAmelCase = inputs_dict["labels"]
_UpperCAmelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=a_ , )
_UpperCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a_ )
return inputs_dict
def _a ( self ) -> Optional[int]:
_UpperCAmelCase = OpenAIGPTModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=a_ , n_embd=37 )
def _a ( self ) -> Union[str, Any]:
self.config_tester.run_common_tests()
def _a ( self ) -> Any:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*a_ )
def _a ( self ) -> Tuple:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*a_ )
def _a ( self ) -> List[Any]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*a_ )
def _a ( self ) -> List[str]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*a_ )
@slow
def _a ( self ) -> int:
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = OpenAIGPTModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
@slow
def _a ( self ) -> Any:
_UpperCAmelCase = OpenAIGPTLMHeadModel.from_pretrained("openai-gpt" )
model.to(a_ )
_UpperCAmelCase = torch.tensor([[481, 4735, 544]] , dtype=torch.long , device=a_ ) # the president is
_UpperCAmelCase = [
481,
4735,
544,
246,
963,
870,
762,
239,
244,
40477,
244,
249,
719,
881,
487,
544,
240,
244,
603,
481,
] # the president is a very good man. " \n " i\'m sure he is, " said the
_UpperCAmelCase = model.generate(a_ , do_sample=a_ )
self.assertListEqual(output_ids[0].tolist() , a_ )
| 657
| 1
|
import time
from dataclasses import dataclass
from multiprocessing import Pool
from unittest import TestCase
from unittest.mock import patch
import multiprocess
import numpy as np
import pytest
from datasets.utils.py_utils import (
NestedDataStructure,
asdict,
iflatmap_unordered,
map_nested,
temp_seed,
temporary_assignment,
zip_dict,
)
from .utils import require_tf, require_torch
def __a ( __lowerCAmelCase ) -> Optional[Any]: # picklable for multiprocessing
return x.sum()
def __a ( __lowerCAmelCase ) -> Optional[Any]: # picklable for multiprocessing
return i + 1
@dataclass
class lowercase :
'''simple docstring'''
UpperCAmelCase : int
UpperCAmelCase : str
class lowercase ( SCREAMING_SNAKE_CASE_):
'''simple docstring'''
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = {}
SCREAMING_SNAKE_CASE : int = []
SCREAMING_SNAKE_CASE : Tuple = 1
SCREAMING_SNAKE_CASE : Dict = [1, 2]
SCREAMING_SNAKE_CASE : Tuple = {'a': 1, 'b': 2}
SCREAMING_SNAKE_CASE : List[str] = {'a': [1, 2], 'b': [3, 4]}
SCREAMING_SNAKE_CASE : Any = {'a': {'1': 1}, 'b': 2}
SCREAMING_SNAKE_CASE : int = {'a': 1, 'b': 2, 'c': 3, 'd': 4}
SCREAMING_SNAKE_CASE : Optional[int] = {}
SCREAMING_SNAKE_CASE : Optional[Any] = []
SCREAMING_SNAKE_CASE : Optional[int] = 2
SCREAMING_SNAKE_CASE : str = [2, 3]
SCREAMING_SNAKE_CASE : Union[str, Any] = {'a': 2, 'b': 3}
SCREAMING_SNAKE_CASE : Optional[int] = {'a': [2, 3], 'b': [4, 5]}
SCREAMING_SNAKE_CASE : str = {'a': {'1': 2}, 'b': 3}
SCREAMING_SNAKE_CASE : Any = {'a': 2, 'b': 3, 'c': 4, 'd': 5}
self.assertEqual(map_nested(snake_case , snake_case ) , snake_case )
self.assertEqual(map_nested(snake_case , snake_case ) , snake_case )
self.assertEqual(map_nested(snake_case , snake_case ) , snake_case )
self.assertEqual(map_nested(snake_case , snake_case ) , snake_case )
self.assertEqual(map_nested(snake_case , snake_case ) , snake_case )
self.assertEqual(map_nested(snake_case , snake_case ) , snake_case )
self.assertEqual(map_nested(snake_case , snake_case ) , snake_case )
self.assertEqual(map_nested(snake_case , snake_case ) , snake_case )
SCREAMING_SNAKE_CASE : Optional[Any] = 2
self.assertEqual(map_nested(snake_case , snake_case , num_proc=snake_case ) , snake_case )
self.assertEqual(map_nested(snake_case , snake_case , num_proc=snake_case ) , snake_case )
self.assertEqual(map_nested(snake_case , snake_case , num_proc=snake_case ) , snake_case )
self.assertEqual(map_nested(snake_case , snake_case , num_proc=snake_case ) , snake_case )
self.assertEqual(map_nested(snake_case , snake_case , num_proc=snake_case ) , snake_case )
self.assertEqual(map_nested(snake_case , snake_case , num_proc=snake_case ) , snake_case )
self.assertEqual(map_nested(snake_case , snake_case , num_proc=snake_case ) , snake_case )
self.assertEqual(map_nested(snake_case , snake_case , num_proc=snake_case ) , snake_case )
SCREAMING_SNAKE_CASE : Dict = {'a': np.eye(2 ), 'b': np.zeros(3 ), 'c': np.ones(2 )}
SCREAMING_SNAKE_CASE : Optional[int] = {'a': 2, 'b': 0, 'c': 2}
SCREAMING_SNAKE_CASE : Optional[Any] = {
'a': np.eye(2 ).astype(snake_case ),
'b': np.zeros(3 ).astype(snake_case ),
'c': np.ones(2 ).astype(snake_case ),
}
self.assertEqual(map_nested(snake_case , snake_case , map_numpy=snake_case ) , snake_case )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(snake_case , snake_case , map_numpy=snake_case ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
self.assertEqual(map_nested(snake_case , snake_case , map_numpy=snake_case , num_proc=snake_case ) , snake_case )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(snake_case , snake_case , map_numpy=snake_case , num_proc=snake_case ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
with self.assertRaises(snake_case ): # can't pickle a local lambda
map_nested(lambda snake_case : x + 1 , snake_case , num_proc=snake_case )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = {'a': 1, 'b': 2}
SCREAMING_SNAKE_CASE : Union[str, Any] = {'a': 3, 'b': 4}
SCREAMING_SNAKE_CASE : Any = {'a': 5, 'b': 6}
SCREAMING_SNAKE_CASE : int = sorted([('a', (1, 3, 5)), ('b', (2, 4, 6))] )
self.assertEqual(sorted(zip_dict(snake_case , snake_case , snake_case ) ) , snake_case )
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
class lowercase :
'''simple docstring'''
UpperCAmelCase : Any = 'bar'
SCREAMING_SNAKE_CASE : List[str] = Foo()
self.assertEqual(foo.my_attr , 'bar' )
with temporary_assignment(snake_case , 'my_attr' , 'BAR' ):
self.assertEqual(foo.my_attr , 'BAR' )
self.assertEqual(foo.my_attr , 'bar' )
@pytest.mark.parametrize(
'iterable_length, num_proc, expected_num_proc' , [
(1, None, 1),
(1, 1, 1),
(2, None, 1),
(2, 1, 1),
(2, 2, 1),
(2, 3, 1),
(3, 2, 1),
(16, 16, 16),
(16, 17, 16),
(17, 16, 16),
] , )
def __a ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> List[str]:
with patch('datasets.utils.py_utils._single_map_nested' ) as mock_single_map_nested, patch(
'datasets.parallel.parallel.Pool' ) as mock_multiprocessing_pool:
SCREAMING_SNAKE_CASE : Tuple = {F'''{i}''': i for i in range(__lowerCAmelCase )}
SCREAMING_SNAKE_CASE : List[str] = map_nested(lambda __lowerCAmelCase : x + 10 , __lowerCAmelCase , num_proc=__lowerCAmelCase , parallel_min_length=16 )
if expected_num_proc == 1:
assert mock_single_map_nested.called
assert not mock_multiprocessing_pool.called
else:
assert not mock_single_map_nested.called
assert mock_multiprocessing_pool.called
assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc
class lowercase ( SCREAMING_SNAKE_CASE_):
'''simple docstring'''
@require_tf
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
import tensorflow as tf
from tensorflow.keras import layers
SCREAMING_SNAKE_CASE : List[Any] = layers.Dense(2 )
def gen_random_output():
SCREAMING_SNAKE_CASE : Any = tf.random.uniform((1, 3) )
return model(snake_case ).numpy()
with temp_seed(42 , set_tensorflow=snake_case ):
SCREAMING_SNAKE_CASE : Optional[Any] = gen_random_output()
with temp_seed(42 , set_tensorflow=snake_case ):
SCREAMING_SNAKE_CASE : Optional[int] = gen_random_output()
SCREAMING_SNAKE_CASE : int = gen_random_output()
np.testing.assert_equal(snake_case , snake_case )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@require_torch
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
import torch
def gen_random_output():
SCREAMING_SNAKE_CASE : Optional[int] = torch.nn.Linear(3 , 2 )
SCREAMING_SNAKE_CASE : List[Any] = torch.rand(1 , 3 )
return model(snake_case ).detach().numpy()
with temp_seed(42 , set_pytorch=snake_case ):
SCREAMING_SNAKE_CASE : int = gen_random_output()
with temp_seed(42 , set_pytorch=snake_case ):
SCREAMING_SNAKE_CASE : str = gen_random_output()
SCREAMING_SNAKE_CASE : Tuple = gen_random_output()
np.testing.assert_equal(snake_case , snake_case )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
def gen_random_output():
return np.random.rand(1 , 3 )
with temp_seed(42 ):
SCREAMING_SNAKE_CASE : Union[str, Any] = gen_random_output()
with temp_seed(42 ):
SCREAMING_SNAKE_CASE : List[Any] = gen_random_output()
SCREAMING_SNAKE_CASE : Any = gen_random_output()
np.testing.assert_equal(snake_case , snake_case )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@pytest.mark.parametrize('input_data' , [{}] )
def __a ( __lowerCAmelCase ) -> int:
SCREAMING_SNAKE_CASE : List[Any] = NestedDataStructure(__lowerCAmelCase ).data
assert output_data == input_data
@pytest.mark.parametrize(
'data, expected_output' , [
({}, []),
([], []),
('foo', ['foo']),
(['foo', 'bar'], ['foo', 'bar']),
([['foo', 'bar']], ['foo', 'bar']),
([[['foo'], ['bar']]], ['foo', 'bar']),
([[['foo'], 'bar']], ['foo', 'bar']),
({'a': 1, 'b': 2}, [1, 2]),
({'a': [1, 2], 'b': [3, 4]}, [1, 2, 3, 4]),
({'a': [[1, 2]], 'b': [[3, 4]]}, [1, 2, 3, 4]),
({'a': [[1, 2]], 'b': [3, 4]}, [1, 2, 3, 4]),
({'a': [[[1], [2]]], 'b': [[[3], [4]]]}, [1, 2, 3, 4]),
({'a': [[[1], [2]]], 'b': [[3, 4]]}, [1, 2, 3, 4]),
({'a': [[[1], [2]]], 'b': [3, 4]}, [1, 2, 3, 4]),
({'a': [[[1], [2]]], 'b': [3, [4]]}, [1, 2, 3, 4]),
({'a': {'1': 1}, 'b': 2}, [1, 2]),
({'a': {'1': [1]}, 'b': 2}, [1, 2]),
({'a': {'1': [1]}, 'b': [2]}, [1, 2]),
] , )
def __a ( __lowerCAmelCase , __lowerCAmelCase ) -> Dict:
SCREAMING_SNAKE_CASE : List[Any] = NestedDataStructure(__lowerCAmelCase ).flatten()
assert output == expected_output
def __a ( ) -> Any:
SCREAMING_SNAKE_CASE : str = A(x=1 , y='foobar' )
SCREAMING_SNAKE_CASE : Tuple = {'x': 1, 'y': 'foobar'}
assert asdict(__lowerCAmelCase ) == expected_output
SCREAMING_SNAKE_CASE : Optional[int] = {'a': {'b': A(x=10 , y='foo' )}, 'c': [A(x=20 , y='bar' )]}
SCREAMING_SNAKE_CASE : int = {'a': {'b': {'x': 10, 'y': 'foo'}}, 'c': [{'x': 20, 'y': 'bar'}]}
assert asdict(__lowerCAmelCase ) == expected_output
with pytest.raises(__lowerCAmelCase ):
asdict([1, A(x=10 , y='foo' )] )
def __a ( __lowerCAmelCase ) -> List[str]:
return text.split()
def __a ( __lowerCAmelCase ) -> Optional[int]:
yield (time.time(), content)
time.sleep(2 )
yield (time.time(), content)
def __a ( ) -> Union[str, Any]:
with Pool(2 ) as pool:
SCREAMING_SNAKE_CASE : Optional[Any] = list(iflatmap_unordered(__lowerCAmelCase , _split_text , kwargs_iterable=[{'text': 'hello there'}] * 10 ) )
assert out.count('hello' ) == 10
assert out.count('there' ) == 10
assert len(__lowerCAmelCase ) == 20
# check multiprocess from pathos (uses dill for pickling)
with multiprocess.Pool(2 ) as pool:
SCREAMING_SNAKE_CASE : Union[str, Any] = list(iflatmap_unordered(__lowerCAmelCase , _split_text , kwargs_iterable=[{'text': 'hello there'}] * 10 ) )
assert out.count('hello' ) == 10
assert out.count('there' ) == 10
assert len(__lowerCAmelCase ) == 20
# check that we get items as fast as possible
with Pool(2 ) as pool:
SCREAMING_SNAKE_CASE : int = []
for yield_time, content in iflatmap_unordered(
__lowerCAmelCase , _aseconds_generator_of_aitems_with_timing , kwargs_iterable=[{'content': 'a'}, {'content': 'b'}] ):
assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded"
out.append(__lowerCAmelCase )
assert out.count('a' ) == 2
assert out.count('b' ) == 2
assert len(__lowerCAmelCase ) == 4
| 708
|
import unittest
from transformers import SPIECE_UNDERLINE, ReformerTokenizer, ReformerTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_lowerCamelCase : Tuple = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
@require_tokenizers
class lowercase ( SCREAMING_SNAKE_CASE_ , unittest.TestCase):
'''simple docstring'''
UpperCAmelCase : Tuple = ReformerTokenizer
UpperCAmelCase : Dict = ReformerTokenizerFast
UpperCAmelCase : Any = True
UpperCAmelCase : List[Any] = False
UpperCAmelCase : Any = True
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
super().setUp()
SCREAMING_SNAKE_CASE : Optional[Any] = ReformerTokenizer(snake_case , keep_accents=snake_case )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = '<s>'
SCREAMING_SNAKE_CASE : Optional[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case ) , snake_case )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case ) , snake_case )
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<unk>' )
self.assertEqual(vocab_keys[1] , '<s>' )
self.assertEqual(vocab_keys[-1] , 'j' )
self.assertEqual(len(snake_case ) , 1000 )
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
SCREAMING_SNAKE_CASE : List[Any] = self.get_tokenizer()
SCREAMING_SNAKE_CASE : List[Any] = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE : List[str] = 'I was born in 92000, and this is falsé.'
SCREAMING_SNAKE_CASE : List[Any] = tokenizer.tokenize(snake_case )
SCREAMING_SNAKE_CASE : Optional[int] = rust_tokenizer.tokenize(snake_case )
self.assertListEqual(snake_case , snake_case )
SCREAMING_SNAKE_CASE : List[str] = tokenizer.encode(snake_case , add_special_tokens=snake_case )
SCREAMING_SNAKE_CASE : Tuple = rust_tokenizer.encode(snake_case , add_special_tokens=snake_case )
self.assertListEqual(snake_case , snake_case )
SCREAMING_SNAKE_CASE : Optional[int] = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.encode(snake_case )
SCREAMING_SNAKE_CASE : Union[str, Any] = rust_tokenizer.encode(snake_case )
self.assertListEqual(snake_case , snake_case )
def lowerCamelCase_ ( self : str , snake_case : Optional[int]=15 ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
SCREAMING_SNAKE_CASE : Any = self.rust_tokenizer_class.from_pretrained(snake_case , **snake_case )
# Simple input
SCREAMING_SNAKE_CASE : Optional[int] = 'This is a simple input'
SCREAMING_SNAKE_CASE : Union[str, Any] = ['This is a simple input 1', 'This is a simple input 2']
SCREAMING_SNAKE_CASE : str = ('This is a simple input', 'This is a pair')
SCREAMING_SNAKE_CASE : Dict = [
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
self.assertRaises(snake_case , tokenizer_r.encode , snake_case , max_length=snake_case , padding='max_length' )
# Simple input
self.assertRaises(snake_case , tokenizer_r.encode_plus , snake_case , max_length=snake_case , padding='max_length' )
# Simple input
self.assertRaises(
snake_case , tokenizer_r.batch_encode_plus , snake_case , max_length=snake_case , padding='max_length' , )
# Pair input
self.assertRaises(snake_case , tokenizer_r.encode , snake_case , max_length=snake_case , padding='max_length' )
# Pair input
self.assertRaises(snake_case , tokenizer_r.encode_plus , snake_case , max_length=snake_case , padding='max_length' )
# Pair input
self.assertRaises(
snake_case , tokenizer_r.batch_encode_plus , snake_case , max_length=snake_case , padding='max_length' , )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
pass
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = ReformerTokenizer(snake_case , keep_accents=snake_case )
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.tokenize('This is a test' )
self.assertListEqual(snake_case , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(snake_case ) , [285, 46, 10, 170, 382] , )
SCREAMING_SNAKE_CASE : int = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
snake_case , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
SCREAMING_SNAKE_CASE : Any = tokenizer.convert_tokens_to_ids(snake_case )
self.assertListEqual(
snake_case , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
SCREAMING_SNAKE_CASE : int = tokenizer.convert_ids_to_tokens(snake_case )
self.assertListEqual(
snake_case , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
@cached_property
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
return ReformerTokenizer.from_pretrained('google/reformer-crime-and-punishment' )
@slow
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = 'Hello World!'
SCREAMING_SNAKE_CASE : int = [126, 32, 262, 152, 38, 72, 287]
self.assertListEqual(snake_case , self.big_tokenizer.encode(snake_case ) )
@slow
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = (
'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'
' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'
)
SCREAMING_SNAKE_CASE : Optional[Any] = [
108,
265,
24,
111,
4,
258,
156,
35,
28,
275,
3,
259,
297,
260,
84,
4,
35,
110,
44,
8,
259,
91,
268,
21,
11,
209,
274,
109,
266,
277,
117,
86,
93,
315,
258,
278,
258,
277,
258,
0,
258,
288,
258,
319,
258,
0,
258,
0,
258,
0,
258,
0,
258,
287,
258,
315,
258,
289,
258,
278,
99,
269,
266,
262,
8,
259,
241,
4,
217,
230,
268,
266,
55,
168,
106,
75,
193,
266,
223,
27,
49,
26,
282,
25,
264,
299,
19,
26,
0,
258,
277,
117,
86,
93,
176,
183,
270,
11,
262,
42,
61,
265,
]
self.assertListEqual(snake_case , self.big_tokenizer.encode(snake_case ) )
@require_torch
@slow
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
import torch
from transformers import ReformerConfig, ReformerModel
# Build sequence
SCREAMING_SNAKE_CASE : Tuple = list(self.big_tokenizer.get_vocab().keys() )[:10]
SCREAMING_SNAKE_CASE : Dict = ' '.join(snake_case )
SCREAMING_SNAKE_CASE : Dict = self.big_tokenizer.encode_plus(snake_case , return_tensors='pt' )
SCREAMING_SNAKE_CASE : Dict = self.big_tokenizer.batch_encode_plus([sequence, sequence] , return_tensors='pt' )
SCREAMING_SNAKE_CASE : List[str] = ReformerConfig()
# The input gets padded during training so adjust the axial position encodings from the pretrained model value of (512, 1024)
SCREAMING_SNAKE_CASE : Optional[Any] = encoded_sequence['input_ids'].shape
SCREAMING_SNAKE_CASE : Union[str, Any] = ReformerModel(snake_case )
# Reformer has config.vocab_size == tokenizer.vocab_size == len(tokenizer) - 1 = 320; len(tokenizer) is 321 (including a pad token with id 320)
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**snake_case )
model(**snake_case )
@slow
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = {'input_ids': [[108, 265, 24, 111, 4, 258, 156, 7, 51, 279, 58, 7, 76, 25, 69, 278], [140, 243, 264, 134, 17, 267, 77, 263, 22, 262, 297, 258, 304, 177, 279, 266, 14, 89, 13, 35, 261, 299, 272, 137, 275, 278]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# This tokenizer does not know some characters like ")".
# That is the reason why we use very simple texts here.
# Also see https://github.com/huggingface/transformers/pull/11737#issuecomment-850769064
SCREAMING_SNAKE_CASE : Any = [
'This is a very simple sentence.',
'The quick brown fox jumps over the lazy dog.',
]
self.tokenizer_integration_test_util(
expected_encoding=snake_case , model_name='google/reformer-crime-and-punishment' , revision='0e6c3decb8211d49bf881013425dc8b0448b3f5a' , padding=snake_case , sequences=snake_case , )
| 308
| 0
|
'''simple docstring'''
import unittest
from transformers import CamembertTokenizer, CamembertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase : Optional[Any] = get_tests_dir('fixtures/test_sentencepiece.model')
UpperCamelCase : Any = get_tests_dir('fixtures/test_sentencepiece_bpe.model')
UpperCamelCase : int = 'pt' if is_torch_available() else 'tf'
@require_sentencepiece
@require_tokenizers
class UpperCamelCase__ (a ,unittest.TestCase ):
'''simple docstring'''
_UpperCamelCase = CamembertTokenizer
_UpperCamelCase = CamembertTokenizerFast
_UpperCamelCase = True
_UpperCamelCase = True
def UpperCamelCase_ ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
lowerCamelCase__ = CamembertTokenizer(_lowerCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = """<pad>"""
lowerCamelCase__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowerCAmelCase ) ,_lowerCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowerCAmelCase ) ,_lowerCAmelCase )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,"""<s>NOTUSED""" )
self.assertEqual(vocab_keys[1] ,"""<pad>""" )
self.assertEqual(vocab_keys[-1] ,"""<mask>""" )
self.assertEqual(len(_lowerCAmelCase ) ,10_04 )
def UpperCamelCase_ ( self ):
self.assertEqual(self.get_tokenizer().vocab_size ,10_05 )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = CamembertTokenizer(_lowerCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
lowerCamelCase__ = CamembertTokenizerFast.from_pretrained(self.tmpdirname )
lowerCamelCase__ = """I was born in 92000, and this is falsé."""
lowerCamelCase__ = tokenizer.encode(_lowerCAmelCase )
lowerCamelCase__ = rust_tokenizer.encode(_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = tokenizer.encode(_lowerCAmelCase ,add_special_tokens=_lowerCAmelCase )
lowerCamelCase__ = rust_tokenizer.encode(_lowerCAmelCase ,add_special_tokens=_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase ,_lowerCAmelCase )
# <unk> tokens are not the same for `rust` than for `slow`.
# Because spm gives back raw token instead of `unk` in EncodeAsPieces
# tokens = tokenizer.tokenize(sequence)
lowerCamelCase__ = tokenizer.convert_ids_to_tokens(_lowerCAmelCase )
lowerCamelCase__ = rust_tokenizer.tokenize(_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase ,_lowerCAmelCase )
def UpperCamelCase_ ( self ):
if not self.test_rust_tokenizer:
return
lowerCamelCase__ = self.get_tokenizer()
lowerCamelCase__ = self.get_rust_tokenizer()
lowerCamelCase__ = """I was born in 92000, and this is falsé."""
lowerCamelCase__ = tokenizer.tokenize(_lowerCAmelCase )
lowerCamelCase__ = rust_tokenizer.tokenize(_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = tokenizer.encode(_lowerCAmelCase ,add_special_tokens=_lowerCAmelCase )
lowerCamelCase__ = rust_tokenizer.encode(_lowerCAmelCase ,add_special_tokens=_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = self.get_rust_tokenizer()
lowerCamelCase__ = tokenizer.encode(_lowerCAmelCase )
lowerCamelCase__ = rust_tokenizer.encode(_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase ,_lowerCAmelCase )
@slow
def UpperCamelCase_ ( self ):
# fmt: off
lowerCamelCase__ = {"""input_ids""": [[5, 54, 71_96, 2_97, 30, 23, 7_76, 18, 11, 32_15, 37_05, 82_52, 22, 31_64, 11_81, 21_16, 29, 16, 8_13, 25, 7_91, 33_14, 20, 34_46, 38, 2_75_75, 1_20, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 4_68, 17, 11, 90_88, 20, 15_17, 8, 2_28_04, 1_88_18, 10, 38, 6_29, 6_07, 6_07, 1_42, 19, 71_96, 8_67, 56, 1_03_26, 24, 22_67, 20, 4_16, 50_72, 1_56_12, 2_33, 7_34, 7, 23_99, 27, 16, 30_15, 16_49, 7, 24, 20, 43_38, 23_99, 27, 13, 34_00, 14, 13, 61_89, 8, 9_30, 9, 6]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# camembert is a french model. So we also use french texts.
lowerCamelCase__ = [
"""Le transformeur est un modèle d'apprentissage profond introduit en 2017, """
"""utilisé principalement dans le domaine du traitement automatique des langues (TAL).""",
"""À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus """
"""pour gérer des données séquentielles, telles que le langage naturel, pour des tâches """
"""telles que la traduction et la synthèse de texte.""",
]
self.tokenizer_integration_test_util(
expected_encoding=_lowerCAmelCase ,model_name="""camembert-base""" ,revision="""3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf""" ,sequences=_lowerCAmelCase ,)
| 50
|
'''simple docstring'''
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def A__ ( __lowerCAmelCase : Any ):
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0x4_e_0_0 and cp <= 0x9_f_f_f)
or (cp >= 0x3_4_0_0 and cp <= 0x4_d_b_f) #
or (cp >= 0x2_0_0_0_0 and cp <= 0x2_a_6_d_f) #
or (cp >= 0x2_a_7_0_0 and cp <= 0x2_b_7_3_f) #
or (cp >= 0x2_b_7_4_0 and cp <= 0x2_b_8_1_f) #
or (cp >= 0x2_b_8_2_0 and cp <= 0x2_c_e_a_f) #
or (cp >= 0xf_9_0_0 and cp <= 0xf_a_f_f)
or (cp >= 0x2_f_8_0_0 and cp <= 0x2_f_a_1_f) #
): #
return True
return False
def A__ ( __lowerCAmelCase : str ):
# word like '180' or '身高' or '神'
for char in word:
lowerCamelCase__ = ord(__lowerCAmelCase )
if not _is_chinese_char(__lowerCAmelCase ):
return 0
return 1
def A__ ( __lowerCAmelCase : List[str] ):
lowerCamelCase__ = set()
for token in tokens:
lowerCamelCase__ = len(__lowerCAmelCase ) > 1 and is_chinese(__lowerCAmelCase )
if chinese_word:
word_set.add(__lowerCAmelCase )
lowerCamelCase__ = list(__lowerCAmelCase )
return word_list
def A__ ( __lowerCAmelCase : List[str] , __lowerCAmelCase : set() ):
if not chinese_word_set:
return bert_tokens
lowerCamelCase__ = max([len(__lowerCAmelCase ) for w in chinese_word_set] )
lowerCamelCase__ = bert_tokens
lowerCamelCase__ , lowerCamelCase__ = 0, len(__lowerCAmelCase )
while start < end:
lowerCamelCase__ = True
if is_chinese(bert_word[start] ):
lowerCamelCase__ = min(end - start , __lowerCAmelCase )
for i in range(__lowerCAmelCase , 1 , -1 ):
lowerCamelCase__ = """""".join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
lowerCamelCase__ = """##""" + bert_word[j]
lowerCamelCase__ = start + i
lowerCamelCase__ = False
break
if single_word:
start += 1
return bert_word
def A__ ( __lowerCAmelCase : List[str] , __lowerCAmelCase : LTP , __lowerCAmelCase : BertTokenizer ):
lowerCamelCase__ = []
for i in range(0 , len(__lowerCAmelCase ) , 100 ):
lowerCamelCase__ = ltp_tokenizer.pipeline(lines[i : i + 100] , tasks=["""cws"""] ).cws
lowerCamelCase__ = [get_chinese_word(__lowerCAmelCase ) for r in res]
ltp_res.extend(__lowerCAmelCase )
assert len(__lowerCAmelCase ) == len(__lowerCAmelCase )
lowerCamelCase__ = []
for i in range(0 , len(__lowerCAmelCase ) , 100 ):
lowerCamelCase__ = bert_tokenizer(lines[i : i + 100] , add_special_tokens=__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=512 )
bert_res.extend(res["""input_ids"""] )
assert len(__lowerCAmelCase ) == len(__lowerCAmelCase )
lowerCamelCase__ = []
for input_ids, chinese_word in zip(__lowerCAmelCase , __lowerCAmelCase ):
lowerCamelCase__ = []
for id in input_ids:
lowerCamelCase__ = bert_tokenizer._convert_id_to_token(__lowerCAmelCase )
input_tokens.append(__lowerCAmelCase )
lowerCamelCase__ = add_sub_symbol(__lowerCAmelCase , __lowerCAmelCase )
lowerCamelCase__ = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(__lowerCAmelCase ):
if token[:2] == "##":
lowerCamelCase__ = token[2:]
# save chinese tokens' pos
if len(__lowerCAmelCase ) == 1 and _is_chinese_char(ord(__lowerCAmelCase ) ):
ref_id.append(__lowerCAmelCase )
ref_ids.append(__lowerCAmelCase )
assert len(__lowerCAmelCase ) == len(__lowerCAmelCase )
return ref_ids
def A__ ( __lowerCAmelCase : Optional[int] ):
# For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm)
# If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp)
with open(args.file_name , """r""" , encoding="""utf-8""" ) as f:
lowerCamelCase__ = f.readlines()
lowerCamelCase__ = [line.strip() for line in data if len(__lowerCAmelCase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
lowerCamelCase__ = LTP(args.ltp ) # faster in GPU device
lowerCamelCase__ = BertTokenizer.from_pretrained(args.bert )
lowerCamelCase__ = prepare_ref(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
with open(args.save_path , """w""" , encoding="""utf-8""" ) as f:
lowerCamelCase__ = [json.dumps(__lowerCAmelCase ) + """\n""" for ref in ref_ids]
f.writelines(__lowerCAmelCase )
if __name__ == "__main__":
UpperCamelCase : Optional[int] = argparse.ArgumentParser(description='prepare_chinese_ref')
parser.add_argument(
'--file_name',
required=False,
type=str,
default='./resources/chinese-demo.txt',
help='file need process, same as training data in lm',
)
parser.add_argument(
'--ltp',
required=False,
type=str,
default='./resources/ltp',
help='resources for LTP tokenizer, usually a path',
)
parser.add_argument(
'--bert',
required=False,
type=str,
default='./resources/robert',
help='resources for Bert tokenizer',
)
parser.add_argument(
'--save_path',
required=False,
type=str,
default='./resources/ref.txt',
help='path to save res',
)
UpperCamelCase : Any = parser.parse_args()
main(args)
| 50
| 1
|
class _lowercase :
'''simple docstring'''
def __init__( self :Optional[int] , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Optional[int] ) -> Dict:
__SCREAMING_SNAKE_CASE : Optional[int] = None
__SCREAMING_SNAKE_CASE : str = None
__SCREAMING_SNAKE_CASE : Optional[int] = graph
self._normalize_graph(lowerCAmelCase__ , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = len(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = None
def __magic_name__( self :Optional[Any] , lowerCAmelCase__ :str , lowerCAmelCase__ :int ) -> Union[str, Any]:
if sources is int:
__SCREAMING_SNAKE_CASE : Any = [sources]
if sinks is int:
__SCREAMING_SNAKE_CASE : int = [sinks]
if len(lowerCAmelCase__ ) == 0 or len(lowerCAmelCase__ ) == 0:
return
__SCREAMING_SNAKE_CASE : List[Any] = sources[0]
__SCREAMING_SNAKE_CASE : str = sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(lowerCAmelCase__ ) > 1 or len(lowerCAmelCase__ ) > 1:
__SCREAMING_SNAKE_CASE : Optional[int] = 0
for i in sources:
max_input_flow += sum(self.graph[i] )
__SCREAMING_SNAKE_CASE : List[str] = len(self.graph ) + 1
for room in self.graph:
room.insert(0 , 0 )
self.graph.insert(0 , [0] * size )
for i in sources:
__SCREAMING_SNAKE_CASE : str = max_input_flow
__SCREAMING_SNAKE_CASE : Any = 0
__SCREAMING_SNAKE_CASE : Dict = len(self.graph ) + 1
for room in self.graph:
room.append(0 )
self.graph.append([0] * size )
for i in sinks:
__SCREAMING_SNAKE_CASE : List[Any] = max_input_flow
__SCREAMING_SNAKE_CASE : Union[str, Any] = size - 1
def __magic_name__( self :List[str] ) -> List[str]:
if self.maximum_flow_algorithm is None:
raise Exception('''You need to set maximum flow algorithm before.''' )
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def __magic_name__( self :List[Any] , lowerCAmelCase__ :Tuple ) -> str:
__SCREAMING_SNAKE_CASE : Dict = algorithm(self )
class _lowercase :
'''simple docstring'''
def __init__( self :Dict , lowerCAmelCase__ :Optional[int] ) -> Tuple:
__SCREAMING_SNAKE_CASE : Tuple = flow_network
__SCREAMING_SNAKE_CASE : Optional[Any] = flow_network.verticesCount
__SCREAMING_SNAKE_CASE : str = flow_network.sourceIndex
__SCREAMING_SNAKE_CASE : Tuple = flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
__SCREAMING_SNAKE_CASE : Any = flow_network.graph
__SCREAMING_SNAKE_CASE : str = False
def __magic_name__( self :str ) -> List[Any]:
if not self.executed:
self._algorithm()
__SCREAMING_SNAKE_CASE : Optional[int] = True
def __magic_name__( self :List[Any] ) -> Dict:
pass
class _lowercase ( A__ ):
'''simple docstring'''
def __init__( self :Optional[Any] , lowerCAmelCase__ :List[str] ) -> List[Any]:
super().__init__(lowerCAmelCase__ )
# use this to save your result
__SCREAMING_SNAKE_CASE : int = -1
def __magic_name__( self :int ) -> List[Any]:
if not self.executed:
raise Exception('''You should execute algorithm before using its result!''' )
return self.maximum_flow
class _lowercase ( A__ ):
'''simple docstring'''
def __init__( self :Tuple , lowerCAmelCase__ :Any ) -> Union[str, Any]:
super().__init__(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = [[0] * self.verticies_count for i in range(self.verticies_count )]
__SCREAMING_SNAKE_CASE : List[str] = [0] * self.verticies_count
__SCREAMING_SNAKE_CASE : Tuple = [0] * self.verticies_count
def __magic_name__( self :Union[str, Any] ) -> str:
__SCREAMING_SNAKE_CASE : List[Any] = self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
__SCREAMING_SNAKE_CASE : str = [
i
for i in range(self.verticies_count )
if i != self.source_index and i != self.sink_index
]
# move through list
__SCREAMING_SNAKE_CASE : Optional[Any] = 0
while i < len(lowerCAmelCase__ ):
__SCREAMING_SNAKE_CASE : Any = vertices_list[i]
__SCREAMING_SNAKE_CASE : Tuple = self.heights[vertex_index]
self.process_vertex(lowerCAmelCase__ )
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0 , vertices_list.pop(lowerCAmelCase__ ) )
__SCREAMING_SNAKE_CASE : Union[str, Any] = 0
else:
i += 1
__SCREAMING_SNAKE_CASE : str = sum(self.preflow[self.source_index] )
def __magic_name__( self :Optional[Any] , lowerCAmelCase__ :Optional[Any] ) -> List[str]:
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count ):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(lowerCAmelCase__ , lowerCAmelCase__ )
self.relabel(lowerCAmelCase__ )
def __magic_name__( self :List[Any] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :str ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE : int = min(
self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , )
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def __magic_name__( self :List[str] , lowerCAmelCase__ :int ) -> int:
__SCREAMING_SNAKE_CASE : List[Any] = None
for to_index in range(self.verticies_count ):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
__SCREAMING_SNAKE_CASE : str = self.heights[to_index]
if min_height is not None:
__SCREAMING_SNAKE_CASE : Union[str, Any] = min_height + 1
if __name__ == "__main__":
__lowerCAmelCase : Union[str, Any] =[0]
__lowerCAmelCase : Union[str, Any] =[3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
__lowerCAmelCase : Tuple =[[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
__lowerCAmelCase : List[Any] =FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
__lowerCAmelCase : List[str] =flow_network.find_maximum_flow()
print(f"""maximum flow is {maximum_flow}""")
| 707
|
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.linear_model import LinearRegression
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
# Fitting Polynomial Regression to the dataset
from sklearn.preprocessing import PolynomialFeatures
# Importing the dataset
__lowerCAmelCase : Any =pd.read_csv(
'https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/'
'position_salaries.csv'
)
__lowerCAmelCase : Dict =dataset.iloc[:, 1:2].values
__lowerCAmelCase : Any =dataset.iloc[:, 2].values
__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase : List[str] =train_test_split(X, y, test_size=0.2, random_state=0)
__lowerCAmelCase : List[Any] =PolynomialFeatures(degree=4)
__lowerCAmelCase : Any =poly_reg.fit_transform(X)
__lowerCAmelCase : str =LinearRegression()
pol_reg.fit(X_poly, y)
def _UpperCamelCase ( ):
plt.scatter(lowercase__ , lowercase__ , color='''red''' )
plt.plot(lowercase__ , pol_reg.predict(poly_reg.fit_transform(lowercase__ ) ) , color='''blue''' )
plt.title('''Truth or Bluff (Linear Regression)''' )
plt.xlabel('''Position level''' )
plt.ylabel('''Salary''' )
plt.show()
if __name__ == "__main__":
viz_polymonial()
# Predicting a new result with Polymonial Regression
pol_reg.predict(poly_reg.fit_transform([[5.5]]))
# output should be 132148.43750003
| 260
| 0
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=7 , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=18 , SCREAMING_SNAKE_CASE__=30 , SCREAMING_SNAKE_CASE__=400 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=[0.5, 0.5, 0.5] , SCREAMING_SNAKE_CASE__=[0.5, 0.5, 0.5] , ) -> Any:
A__ = parent
A__ = batch_size
A__ = num_channels
A__ = image_size
A__ = min_resolution
A__ = max_resolution
A__ = do_resize
A__ = size if size is not None else {"height": 18, "width": 20}
A__ = do_thumbnail
A__ = do_align_axis
A__ = do_pad
A__ = do_normalize
A__ = image_mean
A__ = image_std
def snake_case__ ( self ) -> Union[str, Any]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class UpperCamelCase__ ( _lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
A__ : Union[str, Any] = DonutImageProcessor if is_vision_available() else None
def snake_case__ ( self ) -> Dict:
A__ = DonutImageProcessingTester(self )
@property
def snake_case__ ( self ) -> List[str]:
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case__ ( self ) -> List[Any]:
A__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , "do_resize" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , "size" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , "do_thumbnail" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , "do_align_long_axis" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , "do_pad" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , "do_normalize" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , "image_mean" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , "image_std" ) )
def snake_case__ ( self ) -> Dict:
A__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 18, "width": 20} )
A__ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"height": 42, "width": 42} )
# Previous config had dimensions in (width, height) order
A__ = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) )
self.assertEqual(image_processor.size , {"height": 84, "width": 42} )
def snake_case__ ( self ) -> Any:
pass
@is_flaky()
def snake_case__ ( self ) -> Tuple:
# Initialize image_processing
A__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , Image.Image )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
A__ = image_processing(SCREAMING_SNAKE_CASE__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
@is_flaky()
def snake_case__ ( self ) -> Any:
# Initialize image_processing
A__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ , numpify=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , np.ndarray )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
A__ = image_processing(SCREAMING_SNAKE_CASE__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
@is_flaky()
def snake_case__ ( self ) -> Dict:
# Initialize image_processing
A__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ , torchify=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , torch.Tensor )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
A__ = image_processing(SCREAMING_SNAKE_CASE__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
| 104
|
'''simple docstring'''
import gc
import unittest
from parameterized import parameterized
from diffusers import FlaxUNetaDConditionModel
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
@slow
@require_flax
class snake_case__ ( unittest.TestCase ):
def A_ ( self : int , __a : Optional[int] , __a : Union[str, Any] ) -> str:
'''simple docstring'''
return f'''gaussian_noise_s={seed}_shape={"_".join([str(__a ) for s in shape] )}.npy'''
def A_ ( self : Any ) -> List[str]:
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def A_ ( self : List[Any] , __a : Union[str, Any]=0 , __a : List[str]=(4, 4, 64, 64) , __a : Optional[Any]=False ) -> List[str]:
'''simple docstring'''
__snake_case : Optional[Any] = jnp.bfloataa if fpaa else jnp.floataa
__snake_case : List[Any] = jnp.array(load_hf_numpy(self.get_file_format(__a , __a ) ) , dtype=__a )
return image
def A_ ( self : Any , __a : Any=False , __a : Dict="CompVis/stable-diffusion-v1-4" ) -> Optional[int]:
'''simple docstring'''
__snake_case : Tuple = jnp.bfloataa if fpaa else jnp.floataa
__snake_case : int = 'bf16' if fpaa else None
__snake_case , __snake_case : Union[str, Any] = FlaxUNetaDConditionModel.from_pretrained(
__a , subfolder='unet' , dtype=__a , revision=__a )
return model, params
def A_ ( self : Any , __a : Dict=0 , __a : Dict=(4, 77, 768) , __a : List[str]=False ) -> List[Any]:
'''simple docstring'''
__snake_case : List[Any] = jnp.bfloataa if fpaa else jnp.floataa
__snake_case : Any = jnp.array(load_hf_numpy(self.get_file_format(__a , __a ) ) , dtype=__a )
return hidden_states
@parameterized.expand(
[
# fmt: off
[83, 4, [-0.2_3_2_3, -0.1_3_0_4, 0.0_8_1_3, -0.3_0_9_3, -0.0_9_1_9, -0.1_5_7_1, -0.1_1_2_5, -0.5_8_0_6]],
[17, 0.5_5, [-0.0_8_3_1, -0.2_4_4_3, 0.0_9_0_1, -0.0_9_1_9, 0.3_3_9_6, 0.0_1_0_3, -0.3_7_4_3, 0.0_7_0_1]],
[8, 0.8_9, [-0.4_8_6_3, 0.0_8_5_9, 0.0_8_7_5, -0.1_6_5_8, 0.9_1_9_9, -0.0_1_1_4, 0.4_8_3_9, 0.4_6_3_9]],
[3, 1000, [-0.5_6_4_9, 0.2_4_0_2, -0.5_5_1_8, 0.1_2_4_8, 1.1_3_2_8, -0.2_4_4_3, -0.0_3_2_5, -1.0_0_7_8]],
# fmt: on
] )
def A_ ( self : List[Any] , __a : List[str] , __a : Union[str, Any] , __a : Any ) -> Dict:
'''simple docstring'''
__snake_case , __snake_case : Tuple = self.get_unet_model(model_id='CompVis/stable-diffusion-v1-4' , fpaa=__a )
__snake_case : Tuple = self.get_latents(__a , fpaa=__a )
__snake_case : int = self.get_encoder_hidden_states(__a , fpaa=__a )
__snake_case : List[str] = model.apply(
{'params': params} , __a , jnp.array(__a , dtype=jnp.intaa ) , encoder_hidden_states=__a , ).sample
assert sample.shape == latents.shape
__snake_case : List[Any] = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
__snake_case : str = jnp.array(__a , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware
assert jnp.allclose(__a , __a , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[83, 4, [0.1_5_1_4, 0.0_8_0_7, 0.1_6_2_4, 0.1_0_1_6, -0.1_8_9_6, 0.0_2_6_3, 0.0_6_7_7, 0.2_3_1_0]],
[17, 0.5_5, [0.1_1_6_4, -0.0_2_1_6, 0.0_1_7_0, 0.1_5_8_9, -0.3_1_2_0, 0.1_0_0_5, -0.0_5_8_1, -0.1_4_5_8]],
[8, 0.8_9, [-0.1_7_5_8, -0.0_1_6_9, 0.1_0_0_4, -0.1_4_1_1, 0.1_3_1_2, 0.1_1_0_3, -0.1_9_9_6, 0.2_1_3_9]],
[3, 1000, [0.1_2_1_4, 0.0_3_5_2, -0.0_7_3_1, -0.1_5_6_2, -0.0_9_9_4, -0.0_9_0_6, -0.2_3_4_0, -0.0_5_3_9]],
# fmt: on
] )
def A_ ( self : str , __a : Optional[int] , __a : Union[str, Any] , __a : Tuple ) -> Optional[Any]:
'''simple docstring'''
__snake_case , __snake_case : int = self.get_unet_model(model_id='stabilityai/stable-diffusion-2' , fpaa=__a )
__snake_case : int = self.get_latents(__a , shape=(4, 4, 96, 96) , fpaa=__a )
__snake_case : Optional[Any] = self.get_encoder_hidden_states(__a , shape=(4, 77, 1024) , fpaa=__a )
__snake_case : List[str] = model.apply(
{'params': params} , __a , jnp.array(__a , dtype=jnp.intaa ) , encoder_hidden_states=__a , ).sample
assert sample.shape == latents.shape
__snake_case : List[Any] = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
__snake_case : int = jnp.array(__a , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware
assert jnp.allclose(__a , __a , atol=1e-2 )
| 286
| 0
|
import re
def snake_case_ ( snake_case ) -> str:
if len(re.findall('[ATCG]' , __UpperCamelCase ) ) != len(__UpperCamelCase ):
raise ValueError('Invalid Strand' )
return dna.translate(dna.maketrans('ATCG' , 'TAGC' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 708
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
'''google/canine-s''': '''https://huggingface.co/google/canine-s/resolve/main/config.json''',
# See all CANINE models at https://huggingface.co/models?filter=canine
}
class __a ( __UpperCamelCase ):
__lowercase : int = 'canine'
def __init__( self , lowerCAmelCase__=768 , lowerCAmelCase__=12 , lowerCAmelCase__=12 , lowerCAmelCase__=3_072 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=16_384 , lowerCAmelCase__=16 , lowerCAmelCase__=0.0_2 , lowerCAmelCase__=1E-12 , lowerCAmelCase__=0 , lowerCAmelCase__=0xe000 , lowerCAmelCase__=0xe001 , lowerCAmelCase__=4 , lowerCAmelCase__=4 , lowerCAmelCase__=8 , lowerCAmelCase__=16_384 , lowerCAmelCase__=128 , **lowerCAmelCase__ , ) -> Tuple:
'''simple docstring'''
super().__init__(pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
lowercase__: Optional[int] = max_position_embeddings
lowercase__: Any = hidden_size
lowercase__: List[str] = num_hidden_layers
lowercase__: Any = num_attention_heads
lowercase__: Optional[Any] = intermediate_size
lowercase__: List[Any] = hidden_act
lowercase__: str = hidden_dropout_prob
lowercase__: Dict = attention_probs_dropout_prob
lowercase__: Dict = initializer_range
lowercase__: List[str] = type_vocab_size
lowercase__: List[Any] = layer_norm_eps
# Character config:
lowercase__: List[str] = downsampling_rate
lowercase__: int = upsampling_kernel_size
lowercase__: Optional[Any] = num_hash_functions
lowercase__: Optional[Any] = num_hash_buckets
lowercase__: Optional[Any] = local_transformer_stride
| 335
| 0
|
def UpperCamelCase ( snake_case__ : List[Any] ,snake_case__ : List[Any] ):
'''simple docstring'''
assert x is not None
assert y is not None
__snake_case :Optional[int] = len(_lowercase )
__snake_case :int = len(_lowercase )
# declaring the array for storing the dp values
__snake_case :str = [[0] * (n + 1) for _ in range(m + 1 )] # noqa: E741
for i in range(1 ,m + 1 ):
for j in range(1 ,n + 1 ):
__snake_case :Tuple = 1 if x[i - 1] == y[j - 1] else 0
__snake_case :Dict = max(l[i - 1][j] ,l[i][j - 1] ,l[i - 1][j - 1] + match )
__snake_case :Tuple = """"""
__snake_case :int = m, n
while i > 0 and j > 0:
__snake_case :Union[str, Any] = 1 if x[i - 1] == y[j - 1] else 0
if l[i][j] == l[i - 1][j - 1] + match:
if match == 1:
__snake_case :str = x[i - 1] + seq
i -= 1
j -= 1
elif l[i][j] == l[i - 1][j]:
i -= 1
else:
j -= 1
return l[m][n], seq
if __name__ == "__main__":
lowerCamelCase__ = """AGGTAB"""
lowerCamelCase__ = """GXTXAYB"""
lowerCamelCase__ = 4
lowerCamelCase__ = """GTAB"""
lowerCamelCase__ = longest_common_subsequence(a, b)
print("""len =""", ln, """, sub-sequence =""", subseq)
import doctest
doctest.testmod()
| 455
|
'''simple docstring'''
from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
def a_ ( self : Union[str, Any] , A__ : float ):
"""simple docstring"""
return 0.0
def __lowercase (_lowercase, _lowercase ) -> tuple[int | float, int | float]:
"""simple docstring"""
__lowerCamelCase : Dict = min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] )
__lowerCamelCase : List[str] = max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] )
return lowest, highest
def __lowercase (_lowercase, _lowercase ) -> None:
"""simple docstring"""
__lowerCamelCase : int = 512
__lowerCamelCase : List[str] = [1] + [0] * (size - 1)
__lowerCamelCase : Dict = [filter_type.process(_lowercase ) for item in inputs]
__lowerCamelCase : List[Any] = [0] * (samplerate - size) # zero-padding
outputs += filler
__lowerCamelCase : Union[str, Any] = np.abs(np.fft.fft(_lowercase ) )
__lowerCamelCase : Dict = 20 * np.logaa(_lowercase )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24, samplerate / 2 - 1 )
plt.xlabel("""Frequency (Hz)""" )
plt.xscale("""log""" )
# Display within reasonable bounds
__lowerCamelCase : List[Any] = get_bounds(_lowercase, _lowercase )
plt.ylim(max([-80, bounds[0]] ), min([80, bounds[1]] ) )
plt.ylabel("""Gain (dB)""" )
plt.plot(_lowercase )
plt.show()
def __lowercase (_lowercase, _lowercase ) -> None:
"""simple docstring"""
__lowerCamelCase : Dict = 512
__lowerCamelCase : List[str] = [1] + [0] * (size - 1)
__lowerCamelCase : str = [filter_type.process(_lowercase ) for item in inputs]
__lowerCamelCase : Optional[int] = [0] * (samplerate - size) # zero-padding
outputs += filler
__lowerCamelCase : List[Any] = np.angle(np.fft.fft(_lowercase ) )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24, samplerate / 2 - 1 )
plt.xlabel("""Frequency (Hz)""" )
plt.xscale("""log""" )
plt.ylim(-2 * pi, 2 * pi )
plt.ylabel("""Phase shift (Radians)""" )
plt.plot(np.unwrap(_lowercase, -2 * pi ) )
plt.show()
| 150
| 0
|
import sys
_UpperCAmelCase = (
"""73167176531330624919225119674426574742355349194934"""
"""96983520312774506326239578318016984801869478851843"""
"""85861560789112949495459501737958331952853208805511"""
"""12540698747158523863050715693290963295227443043557"""
"""66896648950445244523161731856403098711121722383113"""
"""62229893423380308135336276614282806444486645238749"""
"""30358907296290491560440772390713810515859307960866"""
"""70172427121883998797908792274921901699720888093776"""
"""65727333001053367881220235421809751254540594752243"""
"""52584907711670556013604839586446706324415722155397"""
"""53697817977846174064955149290862569321978468622482"""
"""83972241375657056057490261407972968652414535100474"""
"""82166370484403199890008895243450658541227588666881"""
"""16427171479924442928230863465674813919123162824586"""
"""17866458359124566529476545682848912883142607690042"""
"""24219022671055626321111109370544217506941658960408"""
"""07198403850962455444362981230987879927244284909188"""
"""84580156166097919133875499200524063689912560717606"""
"""05886116467109405077541002256983155200055935729725"""
"""71636269561882670428252483600823257530420752963450"""
)
def UpperCamelCase ( __lowercase : str = N ):
'''simple docstring'''
A_ : List[Any] = -sys.maxsize - 1
for i in range(len(__lowercase ) - 12 ):
A_ : List[Any] = 1
for j in range(13 ):
product *= int(n[i + j] )
if product > largest_product:
A_ : int = product
return largest_product
if __name__ == "__main__":
print(F"""{solution() = }""")
| 70
|
def UpperCamelCase ( __lowercase : str ):
'''simple docstring'''
A_ : int = len(__lowercase )
A_ : List[Any] = sum(__lowercase )
A_ : List[str] = [[False for x in range(s + 1 )] for y in range(n + 1 )]
for i in range(1 ,n + 1 ):
A_ : Optional[Any] = True
for i in range(1 ,s + 1 ):
A_ : Tuple = False
for i in range(1 ,n + 1 ):
for j in range(1 ,s + 1 ):
A_ : Dict = dp[i][j - 1]
if arr[i - 1] <= j:
A_ : Dict = dp[i][j] or dp[i - 1][j - arr[i - 1]]
for j in range(int(s / 2 ) ,-1 ,-1 ):
if dp[n][j] is True:
A_ : List[Any] = s - 2 * j
break
return diff
| 70
| 1
|
"""simple docstring"""
A : Tuple = 8.3_144_598
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
if temperature < 0:
raise Exception("Temperature cannot be less than 0 K" )
if molar_mass <= 0:
raise Exception("Molar mass cannot be less than or equal to 0 kg/mol" )
else:
return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# example
A : Optional[Any] = 3_0_0
A : Any = 2_8
A : int = rms_speed_of_molecule(temperature, molar_mass)
print(f'''Vrms of Nitrogen gas at 300 K is {vrms} m/s''')
| 636
|
"""simple docstring"""
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = False
while is_sorted is False: # Until all the indices are traversed keep looping
__lowerCAmelCase = True
for i in range(0 , len(_UpperCamelCase ) - 1 , 2 ): # iterating over all even indices
if input_list[i] > input_list[i + 1]:
__lowerCAmelCase , __lowerCAmelCase = input_list[i + 1], input_list[i]
# swapping if elements not in order
__lowerCAmelCase = False
for i in range(1 , len(_UpperCamelCase ) - 1 , 2 ): # iterating over all odd indices
if input_list[i] > input_list[i + 1]:
__lowerCAmelCase , __lowerCAmelCase = input_list[i + 1], input_list[i]
# swapping if elements not in order
__lowerCAmelCase = False
return input_list
if __name__ == "__main__":
print("Enter list to be sorted")
A : Tuple = [int(x) for x in input().split()]
# inputing elements of the list in one line
A : Dict = odd_even_sort(input_list)
print("The sorted list is")
print(sorted_list)
| 636
| 1
|
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
A__ = logging.getLogger(__name__)
class __UpperCamelCase ( SCREAMING_SNAKE_CASE ):
def __init__( self: int , __UpperCamelCase: str=-1 ):
'''simple docstring'''
__magic_name__ = label_idx
def _SCREAMING_SNAKE_CASE ( self: Optional[int] , __UpperCamelCase: List[str] , __UpperCamelCase: Union[Split, str] ):
'''simple docstring'''
if isinstance(__UpperCamelCase , __UpperCamelCase ):
__magic_name__ = mode.value
__magic_name__ = os.path.join(__UpperCamelCase , F'{mode}.txt' )
__magic_name__ = 1
__magic_name__ = []
with open(__UpperCamelCase , encoding='utf-8' ) as f:
__magic_name__ = []
__magic_name__ = []
for line in f:
if line.startswith('-DOCSTART-' ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=F'{mode}-{guid_index}' , words=__UpperCamelCase , labels=__UpperCamelCase ) )
guid_index += 1
__magic_name__ = []
__magic_name__ = []
else:
__magic_name__ = line.split(' ' )
words.append(splits[0] )
if len(__UpperCamelCase ) > 1:
labels.append(splits[self.label_idx].replace('\n' , '' ) )
else:
# Examples could have no label for mode = "test"
labels.append('O' )
if words:
examples.append(InputExample(guid=F'{mode}-{guid_index}' , words=__UpperCamelCase , labels=__UpperCamelCase ) )
return examples
def _SCREAMING_SNAKE_CASE ( self: List[Any] , __UpperCamelCase: TextIO , __UpperCamelCase: TextIO , __UpperCamelCase: List ):
'''simple docstring'''
__magic_name__ = 0
for line in test_input_reader:
if line.startswith('-DOCSTART-' ) or line == "" or line == "\n":
writer.write(__UpperCamelCase )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
__magic_name__ = line.split()[0] + ' ' + preds_list[example_id].pop(0 ) + '\n'
writer.write(__UpperCamelCase )
else:
logger.warning('Maximum sequence length exceeded: No prediction for \'%s\'.' , line.split()[0] )
def _SCREAMING_SNAKE_CASE ( self: int , __UpperCamelCase: str ):
'''simple docstring'''
if path:
with open(__UpperCamelCase , 'r' ) as f:
__magic_name__ = f.read().splitlines()
if "O" not in labels:
__magic_name__ = ['O'] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class __UpperCamelCase ( SCREAMING_SNAKE_CASE ):
def __init__( self: Tuple ):
'''simple docstring'''
super().__init__(label_idx=-2 )
def _SCREAMING_SNAKE_CASE ( self: Optional[Any] , __UpperCamelCase: str ):
'''simple docstring'''
if path:
with open(__UpperCamelCase , 'r' ) as f:
__magic_name__ = f.read().splitlines()
if "O" not in labels:
__magic_name__ = ['O'] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class __UpperCamelCase ( SCREAMING_SNAKE_CASE ):
def _SCREAMING_SNAKE_CASE ( self: Any , __UpperCamelCase: Optional[int] , __UpperCamelCase: Union[Split, str] ):
'''simple docstring'''
if isinstance(__UpperCamelCase , __UpperCamelCase ):
__magic_name__ = mode.value
__magic_name__ = os.path.join(__UpperCamelCase , F'{mode}.txt' )
__magic_name__ = 1
__magic_name__ = []
with open(__UpperCamelCase , encoding='utf-8' ) as f:
for sentence in parse_incr(__UpperCamelCase ):
__magic_name__ = []
__magic_name__ = []
for token in sentence:
words.append(token['form'] )
labels.append(token['upos'] )
assert len(__UpperCamelCase ) == len(__UpperCamelCase )
if words:
examples.append(InputExample(guid=F'{mode}-{guid_index}' , words=__UpperCamelCase , labels=__UpperCamelCase ) )
guid_index += 1
return examples
def _SCREAMING_SNAKE_CASE ( self: List[Any] , __UpperCamelCase: TextIO , __UpperCamelCase: TextIO , __UpperCamelCase: List ):
'''simple docstring'''
__magic_name__ = 0
for sentence in parse_incr(__UpperCamelCase ):
__magic_name__ = preds_list[example_id]
__magic_name__ = ''
for token in sentence:
out += F'{token["form"]} ({token["upos"]}|{s_p.pop(0 )}) '
out += "\n"
writer.write(__UpperCamelCase )
example_id += 1
def _SCREAMING_SNAKE_CASE ( self: int , __UpperCamelCase: str ):
'''simple docstring'''
if path:
with open(__UpperCamelCase , 'r' ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 184
|
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
A__ = {"configuration_van": ["VAN_PRETRAINED_CONFIG_ARCHIVE_MAP", "VanConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = [
"VAN_PRETRAINED_MODEL_ARCHIVE_LIST",
"VanForImageClassification",
"VanModel",
"VanPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_van import (
VAN_PRETRAINED_MODEL_ARCHIVE_LIST,
VanForImageClassification,
VanModel,
VanPreTrainedModel,
)
else:
import sys
A__ = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 184
| 1
|
from __future__ import annotations
from collections.abc import Callable
lowerCAmelCase_ = list[list[float | int]]
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> Matrix:
"""simple docstring"""
snake_case_ : int = len(_UpperCamelCase )
snake_case_ : Matrix = [[0 for _ in range(size + 1 )] for _ in range(_UpperCamelCase )]
snake_case_ : int
snake_case_ : int
snake_case_ : int
snake_case_ : int
snake_case_ : int
snake_case_ : float
for row in range(_UpperCamelCase ):
for col in range(_UpperCamelCase ):
snake_case_ : int = matrix[row][col]
snake_case_ : int = vector[row][0]
snake_case_ : str = 0
snake_case_ : List[str] = 0
while row < size and col < size:
# pivoting
snake_case_ : Any = max((abs(augmented[rowa][col] ), rowa) for rowa in range(_UpperCamelCase , _UpperCamelCase ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
snake_case_ , snake_case_ : int = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , _UpperCamelCase ):
snake_case_ : Tuple = augmented[rowa][col] / augmented[row][col]
snake_case_ : str = 0
for cola in range(col + 1 , size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , _UpperCamelCase ):
for row in range(_UpperCamelCase ):
snake_case_ : Tuple = augmented[row][col] / augmented[col][col]
for cola in range(_UpperCamelCase , size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(_UpperCamelCase )
]
def lowerCamelCase_ ( _UpperCamelCase ) -> Callable[[int], int]:
"""simple docstring"""
snake_case_ : int = len(_UpperCamelCase )
snake_case_ : Matrix = [[0 for _ in range(_UpperCamelCase )] for _ in range(_UpperCamelCase )]
snake_case_ : Matrix = [[0] for _ in range(_UpperCamelCase )]
snake_case_ : Matrix
snake_case_ : int
snake_case_ : int
snake_case_ : int
for x_val, y_val in enumerate(_UpperCamelCase ):
for col in range(_UpperCamelCase ):
snake_case_ : str = (x_val + 1) ** (size - col - 1)
snake_case_ : Optional[int] = y_val
snake_case_ : Any = solve(_UpperCamelCase , _UpperCamelCase )
def interpolated_func(_UpperCamelCase ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(_UpperCamelCase ) )
return interpolated_func
def lowerCamelCase_ ( _UpperCamelCase ) -> int:
"""simple docstring"""
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def lowerCamelCase_ ( _UpperCamelCase = question_function , _UpperCamelCase = 10 ) -> int:
"""simple docstring"""
snake_case_ : list[int] = [func(_UpperCamelCase ) for x_val in range(1 , order + 1 )]
snake_case_ : list[Callable[[int], int]] = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 )
]
snake_case_ : int = 0
snake_case_ : Callable[[int], int]
snake_case_ : int
for poly in polynomials:
snake_case_ : List[Any] = 1
while func(_UpperCamelCase ) == poly(_UpperCamelCase ):
x_val += 1
ret += poly(_UpperCamelCase )
return ret
if __name__ == "__main__":
print(F'''{solution() = }''')
| 60
|
import argparse
import os
import re
import numpy as np
import PIL
import torch
from timm import create_model
from torch.optim.lr_scheduler import OneCycleLR
from torch.utils.data import DataLoader, Dataset
from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor
from accelerate import Accelerator
def lowerCAmelCase_ ( lowercase: Dict ) -> Any:
'''simple docstring'''
_UpperCamelCase: Optional[Any] = fname.split(os.path.sep )[-1]
return re.search(R'''^(.*)_\d+\.jpg$''' , lowercase ).groups()[0]
class __magic_name__ ( __a ):
"""simple docstring"""
def __init__( self : Dict , _lowercase : Any , _lowercase : Any=None , _lowercase : List[str]=None ):
"""simple docstring"""
_UpperCamelCase: str = file_names
_UpperCamelCase: List[Any] = image_transform
_UpperCamelCase: Tuple = label_to_id
def __len__( self : List[str] ):
"""simple docstring"""
return len(self.file_names )
def __getitem__( self : List[str] , _lowercase : Optional[Any] ):
"""simple docstring"""
_UpperCamelCase: Optional[int] = self.file_names[idx]
_UpperCamelCase: Optional[int] = PIL.Image.open(_lowercase )
_UpperCamelCase: List[str] = raw_image.convert('''RGB''' )
if self.image_transform is not None:
_UpperCamelCase: Optional[Any] = self.image_transform(_lowercase )
_UpperCamelCase: Tuple = extract_label(_lowercase )
if self.label_to_id is not None:
_UpperCamelCase: Any = self.label_to_id[label]
return {"image": image, "label": label}
def lowerCAmelCase_ ( lowercase: Optional[Any] , lowercase: Any ) -> str:
'''simple docstring'''
# Initialize accelerator
if args.with_tracking:
_UpperCamelCase: Tuple = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with='''all''' , project_dir=args.project_dir )
else:
_UpperCamelCase: Dict = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_UpperCamelCase: List[str] = config['''lr''']
_UpperCamelCase: int = int(config['''num_epochs'''] )
_UpperCamelCase: Optional[Any] = int(config['''seed'''] )
_UpperCamelCase: Optional[Any] = int(config['''batch_size'''] )
_UpperCamelCase: List[str] = config['''image_size''']
if not isinstance(lowercase , (list, tuple) ):
_UpperCamelCase: Optional[int] = (image_size, image_size)
# Parse out whether we are saving every epoch or after a certain number of batches
if hasattr(args.checkpointing_steps , '''isdigit''' ):
if args.checkpointing_steps == "epoch":
_UpperCamelCase: Any = args.checkpointing_steps
elif args.checkpointing_steps.isdigit():
_UpperCamelCase: List[str] = int(args.checkpointing_steps )
else:
raise ValueError(
F"""Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed.""" )
else:
_UpperCamelCase: Any = None
# We need to initialize the trackers we use, and also store our configuration
if args.with_tracking:
_UpperCamelCase: Union[str, Any] = os.path.split(lowercase )[-1].split('''.''' )[0]
accelerator.init_trackers(lowercase , lowercase )
# Grab all the image filenames
_UpperCamelCase: List[str] = [os.path.join(args.data_dir , lowercase ) for fname in os.listdir(args.data_dir ) if fname.endswith('''.jpg''' )]
# Build the label correspondences
_UpperCamelCase: Optional[Any] = [extract_label(lowercase ) for fname in file_names]
_UpperCamelCase: int = list(set(lowercase ) )
id_to_label.sort()
_UpperCamelCase: Tuple = {lbl: i for i, lbl in enumerate(lowercase )}
# Set the seed before splitting the data.
np.random.seed(lowercase )
torch.manual_seed(lowercase )
torch.cuda.manual_seed_all(lowercase )
# Split our filenames between train and validation
_UpperCamelCase: List[str] = np.random.permutation(len(lowercase ) )
_UpperCamelCase: Dict = int(0.8 * len(lowercase ) )
_UpperCamelCase: Optional[int] = random_perm[:cut]
_UpperCamelCase: Optional[int] = random_perm[cut:]
# For training we use a simple RandomResizedCrop
_UpperCamelCase: List[Any] = Compose([RandomResizedCrop(lowercase , scale=(0.5, 1.0) ), ToTensor()] )
_UpperCamelCase: int = PetsDataset(
[file_names[i] for i in train_split] , image_transform=lowercase , label_to_id=lowercase )
# For evaluation, we use a deterministic Resize
_UpperCamelCase: Dict = Compose([Resize(lowercase ), ToTensor()] )
_UpperCamelCase: str = PetsDataset([file_names[i] for i in eval_split] , image_transform=lowercase , label_to_id=lowercase )
# Instantiate dataloaders.
_UpperCamelCase: str = DataLoader(lowercase , shuffle=lowercase , batch_size=lowercase , num_workers=4 )
_UpperCamelCase: str = DataLoader(lowercase , shuffle=lowercase , batch_size=lowercase , num_workers=4 )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_UpperCamelCase: str = create_model('''resnet50d''' , pretrained=lowercase , num_classes=len(lowercase ) )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_UpperCamelCase: Optional[int] = model.to(accelerator.device )
# Freezing the base model
for param in model.parameters():
_UpperCamelCase: Optional[Any] = False
for param in model.get_classifier().parameters():
_UpperCamelCase: Optional[int] = True
# We normalize the batches of images to be a bit faster.
_UpperCamelCase: Union[str, Any] = torch.tensor(model.default_cfg['''mean'''] )[None, :, None, None].to(accelerator.device )
_UpperCamelCase: List[Any] = torch.tensor(model.default_cfg['''std'''] )[None, :, None, None].to(accelerator.device )
# Instantiate optimizer
_UpperCamelCase: List[str] = torch.optim.Adam(params=model.parameters() , lr=lr / 25 )
# Instantiate learning rate scheduler
_UpperCamelCase: Any = OneCycleLR(optimizer=lowercase , max_lr=lowercase , epochs=lowercase , steps_per_epoch=len(lowercase ) )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase: List[str] = accelerator.prepare(
lowercase , lowercase , lowercase , lowercase , lowercase )
# We need to keep track of how many total steps we have iterated over
_UpperCamelCase: Tuple = 0
# We also need to keep track of the starting epoch so files are named properly
_UpperCamelCase: List[str] = 0
# Potentially load in the weights and states from a previous save
if args.resume_from_checkpoint:
if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "":
accelerator.print(F"""Resumed from checkpoint: {args.resume_from_checkpoint}""" )
accelerator.load_state(args.resume_from_checkpoint )
_UpperCamelCase: Union[str, Any] = os.path.basename(args.resume_from_checkpoint )
else:
# Get the most recent checkpoint
_UpperCamelCase: Union[str, Any] = [f.name for f in os.scandir(os.getcwd() ) if f.is_dir()]
dirs.sort(key=os.path.getctime )
_UpperCamelCase: Union[str, Any] = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last
# Extract `epoch_{i}` or `step_{i}`
_UpperCamelCase: List[Any] = os.path.splitext(lowercase )[0]
if "epoch" in training_difference:
_UpperCamelCase: Any = int(training_difference.replace('''epoch_''' , '''''' ) ) + 1
_UpperCamelCase: List[str] = None
else:
_UpperCamelCase: int = int(training_difference.replace('''step_''' , '''''' ) )
_UpperCamelCase: List[Any] = resume_step // len(lowercase )
resume_step -= starting_epoch * len(lowercase )
# Now we train the model
for epoch in range(lowercase , lowercase ):
model.train()
if args.with_tracking:
_UpperCamelCase: Optional[int] = 0
if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None:
# We need to skip steps until we reach the resumed step
_UpperCamelCase: Union[str, Any] = accelerator.skip_first_batches(lowercase , lowercase )
overall_step += resume_step
else:
# After the first iteration though, we need to go back to the original dataloader
_UpperCamelCase: Optional[int] = train_dataloader
for batch in active_dataloader:
# We could avoid this line since we set the accelerator with `device_placement=True`.
_UpperCamelCase: Optional[Any] = {k: v.to(accelerator.device ) for k, v in batch.items()}
_UpperCamelCase: int = (batch['''image'''] - mean) / std
_UpperCamelCase: List[str] = model(lowercase )
_UpperCamelCase: Dict = torch.nn.functional.cross_entropy(lowercase , batch['''label'''] )
# We keep track of the loss at each epoch
if args.with_tracking:
total_loss += loss.detach().float()
accelerator.backward(lowercase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
if isinstance(lowercase , lowercase ):
_UpperCamelCase: Any = F"""step_{overall_step}"""
if overall_step % checkpointing_steps == 0:
if args.output_dir is not None:
_UpperCamelCase: List[Any] = os.path.join(args.output_dir , lowercase )
accelerator.save_state(lowercase )
model.eval()
_UpperCamelCase: List[str] = 0
_UpperCamelCase: Union[str, Any] = 0
for step, batch in enumerate(lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
_UpperCamelCase: Optional[Any] = {k: v.to(accelerator.device ) for k, v in batch.items()}
_UpperCamelCase: Union[str, Any] = (batch['''image'''] - mean) / std
with torch.no_grad():
_UpperCamelCase: List[Any] = model(lowercase )
_UpperCamelCase: Optional[Any] = outputs.argmax(dim=-1 )
_UpperCamelCase , _UpperCamelCase: Optional[int] = accelerator.gather_for_metrics((predictions, batch['''label''']) )
_UpperCamelCase: List[str] = predictions == references
num_elems += accurate_preds.shape[0]
accurate += accurate_preds.long().sum()
_UpperCamelCase: Optional[int] = accurate.item() / num_elems
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}: {100 * eval_metric:.2f}""" )
if args.with_tracking:
accelerator.log(
{
'''accuracy''': 100 * eval_metric,
'''train_loss''': total_loss.item() / len(lowercase ),
'''epoch''': epoch,
} , step=lowercase , )
if checkpointing_steps == "epoch":
_UpperCamelCase: Tuple = F"""epoch_{epoch}"""
if args.output_dir is not None:
_UpperCamelCase: Any = os.path.join(args.output_dir , lowercase )
accelerator.save_state(lowercase )
if args.with_tracking:
accelerator.end_training()
def lowerCAmelCase_ ( ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase: str = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument('''--data_dir''' , required=lowercase , help='''The data folder on disk.''' )
parser.add_argument('''--fp16''' , action='''store_true''' , help='''If passed, will use FP16 training.''' )
parser.add_argument(
'''--mixed_precision''' , type=lowercase , default=lowercase , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
parser.add_argument(
'''--checkpointing_steps''' , type=lowercase , default=lowercase , help='''Whether the various states should be saved at the end of every n steps, or \'epoch\' for each epoch.''' , )
parser.add_argument(
'''--output_dir''' , type=lowercase , default='''.''' , help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' , )
parser.add_argument(
'''--resume_from_checkpoint''' , type=lowercase , default=lowercase , help='''If the training should continue from a checkpoint folder.''' , )
parser.add_argument(
'''--with_tracking''' , action='''store_true''' , help='''Whether to load in all available experiment trackers from the environment and use them for logging.''' , )
parser.add_argument(
'''--project_dir''' , type=lowercase , default='''logs''' , help='''Location on where to store experiment tracking logs` and relevent project information''' , )
_UpperCamelCase: Any = parser.parse_args()
_UpperCamelCase: Optional[int] = {'''lr''': 3E-2, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 64, '''image_size''': 224}
training_function(lowercase , lowercase )
if __name__ == "__main__":
main()
| 271
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
'''google/pegasus-large''': '''https://huggingface.co/google/pegasus-large/resolve/main/config.json''',
# See all PEGASUS models at https://huggingface.co/models?filter=pegasus
}
class __lowercase ( a__ ):
_lowerCAmelCase = "pegasus"
_lowerCAmelCase = ["past_key_values"]
_lowerCAmelCase = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self : str , lowercase__ : List[Any]=5_0_2_6_5 , lowercase__ : Any=1_0_2_4 , lowercase__ : List[Any]=1_2 , lowercase__ : Tuple=4_0_9_6 , lowercase__ : int=1_6 , lowercase__ : Tuple=1_2 , lowercase__ : Optional[int]=4_0_9_6 , lowercase__ : List[str]=1_6 , lowercase__ : Any=0.0 , lowercase__ : List[Any]=0.0 , lowercase__ : str=True , lowercase__ : str=True , lowercase__ : int="gelu" , lowercase__ : Tuple=1_0_2_4 , lowercase__ : Dict=0.1 , lowercase__ : str=0.0 , lowercase__ : Tuple=0.0 , lowercase__ : Any=0.02 , lowercase__ : List[Any]=0 , lowercase__ : str=False , lowercase__ : Any=0 , lowercase__ : List[str]=1 , lowercase__ : Dict=1 , **lowercase__ : Any , ):
a_ = vocab_size
a_ = max_position_embeddings
a_ = d_model
a_ = encoder_ffn_dim
a_ = encoder_layers
a_ = encoder_attention_heads
a_ = decoder_ffn_dim
a_ = decoder_layers
a_ = decoder_attention_heads
a_ = dropout
a_ = attention_dropout
a_ = activation_dropout
a_ = activation_function
a_ = init_std
a_ = encoder_layerdrop
a_ = decoder_layerdrop
a_ = use_cache
a_ = encoder_layers
a_ = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=lowercase__ , eos_token_id=lowercase__ , is_encoder_decoder=lowercase__ , decoder_start_token_id=lowercase__ , forced_eos_token_id=lowercase__ , **lowercase__ , )
@property
def __magic_name__ ( self : Optional[int] ):
return self.encoder_attention_heads
@property
def __magic_name__ ( self : Tuple ):
return self.d_model
| 143
|
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
UpperCamelCase__ = logging.getLogger(__name__)
def UpperCAmelCase__ ( _A , _A ):
"""simple docstring"""
a_ = np.argmax(_A , axis=1 )
return np.sum(outputs == labels )
def UpperCAmelCase__ ( _A ):
"""simple docstring"""
with open(_A , encoding='''utf_8''' ) as f:
a_ = csv.reader(_A )
a_ = []
next(_A ) # skip the first line
for line in tqdm(_A ):
output.append((''' '''.join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) )
return output
def UpperCAmelCase__ ( _A , _A , _A , _A , _A , _A ):
"""simple docstring"""
a_ = []
for dataset in encoded_datasets:
a_ = len(_A )
a_ = np.zeros((n_batch, 2, input_len) , dtype=np.intaa )
a_ = np.zeros((n_batch, 2) , dtype=np.intaa )
a_ = np.full((n_batch, 2, input_len) , fill_value=-100 , dtype=np.intaa )
a_ = np.zeros((n_batch,) , dtype=np.intaa )
for (
i,
(story, conta, conta, mc_label),
) in enumerate(_A ):
a_ = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
a_ = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
a_ = with_conta
a_ = with_conta
a_ = len(_A ) - 1
a_ = len(_A ) - 1
a_ = with_conta
a_ = with_conta
a_ = mc_label
a_ = (input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(_A ) for t in all_inputs ) )
return tensor_datasets
def UpperCAmelCase__ ( ):
"""simple docstring"""
a_ = argparse.ArgumentParser()
parser.add_argument('''--model_name''' , type=_A , default='''openai-gpt''' , help='''pretrained model name''' )
parser.add_argument('''--do_train''' , action='''store_true''' , help='''Whether to run training.''' )
parser.add_argument('''--do_eval''' , action='''store_true''' , help='''Whether to run eval on the dev set.''' )
parser.add_argument(
'''--output_dir''' , default=_A , type=_A , required=_A , help='''The output directory where the model predictions and checkpoints will be written.''' , )
parser.add_argument('''--train_dataset''' , type=_A , default='''''' )
parser.add_argument('''--eval_dataset''' , type=_A , default='''''' )
parser.add_argument('''--seed''' , type=_A , default=42 )
parser.add_argument('''--num_train_epochs''' , type=_A , default=3 )
parser.add_argument('''--train_batch_size''' , type=_A , default=8 )
parser.add_argument('''--eval_batch_size''' , type=_A , default=16 )
parser.add_argument('''--adam_epsilon''' , default=1e-8 , type=_A , help='''Epsilon for Adam optimizer.''' )
parser.add_argument('''--max_grad_norm''' , type=_A , default=1 )
parser.add_argument(
'''--max_steps''' , default=-1 , type=_A , help=(
'''If > 0: set total number of training steps to perform. Override num_train_epochs.'''
) , )
parser.add_argument(
'''--gradient_accumulation_steps''' , type=_A , default=1 , help='''Number of updates steps to accumulate before performing a backward/update pass.''' , )
parser.add_argument('''--learning_rate''' , type=_A , default=6.2_5e-5 )
parser.add_argument('''--warmup_steps''' , default=0 , type=_A , help='''Linear warmup over warmup_steps.''' )
parser.add_argument('''--lr_schedule''' , type=_A , default='''warmup_linear''' )
parser.add_argument('''--weight_decay''' , type=_A , default=0.01 )
parser.add_argument('''--lm_coef''' , type=_A , default=0.9 )
parser.add_argument('''--n_valid''' , type=_A , default=374 )
parser.add_argument('''--server_ip''' , type=_A , default='''''' , help='''Can be used for distant debugging.''' )
parser.add_argument('''--server_port''' , type=_A , default='''''' , help='''Can be used for distant debugging.''' )
a_ = parser.parse_args()
print(_A )
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('''Waiting for debugger attach''' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=_A )
ptvsd.wait_for_attach()
random.seed(args.seed )
np.random.seed(args.seed )
torch.manual_seed(args.seed )
torch.cuda.manual_seed_all(args.seed )
a_ = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
a_ = torch.cuda.device_count()
logger.info('''device: {}, n_gpu {}'''.format(_A , _A ) )
if not args.do_train and not args.do_eval:
raise ValueError('''At least one of `do_train` or `do_eval` must be True.''' )
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
a_ = ['''_start_''', '''_delimiter_''', '''_classify_''']
a_ = OpenAIGPTTokenizer.from_pretrained(args.model_name )
tokenizer.add_tokens(_A )
a_ = tokenizer.convert_tokens_to_ids(_A )
a_ = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name )
model.resize_token_embeddings(len(_A ) )
model.to(_A )
# Load and encode the datasets
def tokenize_and_encode(_A ):
if isinstance(_A , _A ):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(_A ) )
elif isinstance(_A , _A ):
return obj
return [tokenize_and_encode(_A ) for o in obj]
logger.info('''Encoding dataset...''' )
a_ = load_rocstories_dataset(args.train_dataset )
a_ = load_rocstories_dataset(args.eval_dataset )
a_ = (train_dataset, eval_dataset)
a_ = tokenize_and_encode(_A )
# Compute the max input length for the Transformer
a_ = model.config.n_positions // 2 - 2
a_ = max(
len(story[:max_length] ) + max(len(conta[:max_length] ) , len(conta[:max_length] ) ) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset )
a_ = min(_A , model.config.n_positions ) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
a_ = pre_process_datasets(_A , _A , _A , *_A )
a_ , a_ = tensor_datasets[0], tensor_datasets[1]
a_ = TensorDataset(*_A )
a_ = RandomSampler(_A )
a_ = DataLoader(_A , sampler=_A , batch_size=args.train_batch_size )
a_ = TensorDataset(*_A )
a_ = SequentialSampler(_A )
a_ = DataLoader(_A , sampler=_A , batch_size=args.eval_batch_size )
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
a_ = args.max_steps
a_ = args.max_steps // (len(_A ) // args.gradient_accumulation_steps) + 1
else:
a_ = len(_A ) // args.gradient_accumulation_steps * args.num_train_epochs
a_ = list(model.named_parameters() )
a_ = ['''bias''', '''LayerNorm.bias''', '''LayerNorm.weight''']
a_ = [
{
'''params''': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )],
'''weight_decay''': args.weight_decay,
},
{'''params''': [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], '''weight_decay''': 0.0},
]
a_ = AdamW(_A , lr=args.learning_rate , eps=args.adam_epsilon )
a_ = get_linear_schedule_with_warmup(
_A , num_warmup_steps=args.warmup_steps , num_training_steps=_A )
if args.do_train:
a_ , a_ , a_ = 0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs ) , desc='''Epoch''' ):
a_ = 0
a_ = 0
a_ = tqdm(_A , desc='''Training''' )
for step, batch in enumerate(_A ):
a_ = tuple(t.to(_A ) for t in batch )
a_ , a_ , a_ , a_ = batch
a_ = model(_A , mc_token_ids=_A , lm_labels=_A , mc_labels=_A )
a_ = args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
a_ = (
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
a_ = '''Training loss: {:.2e} lr: {:.2e}'''.format(_A , scheduler.get_lr()[0] )
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
a_ = model.module if hasattr(_A , '''module''' ) else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
a_ = os.path.join(args.output_dir , _A )
a_ = os.path.join(args.output_dir , _A )
torch.save(model_to_save.state_dict() , _A )
model_to_save.config.to_json_file(_A )
tokenizer.save_vocabulary(args.output_dir )
# Load a trained model and vocabulary that you have fine-tuned
a_ = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir )
a_ = OpenAIGPTTokenizer.from_pretrained(args.output_dir )
model.to(_A )
if args.do_eval:
model.eval()
a_ , a_ = 0, 0
a_ , a_ = 0, 0
for batch in tqdm(_A , desc='''Evaluating''' ):
a_ = tuple(t.to(_A ) for t in batch )
a_ , a_ , a_ , a_ = batch
with torch.no_grad():
a_ , a_ , a_ , a_ = model(
_A , mc_token_ids=_A , lm_labels=_A , mc_labels=_A )
a_ = mc_logits.detach().cpu().numpy()
a_ = mc_labels.to('''cpu''' ).numpy()
a_ = accuracy(_A , _A )
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0 )
nb_eval_steps += 1
a_ = eval_loss / nb_eval_steps
a_ = eval_accuracy / nb_eval_examples
a_ = tr_loss / nb_tr_steps if args.do_train else None
a_ = {'''eval_loss''': eval_loss, '''eval_accuracy''': eval_accuracy, '''train_loss''': train_loss}
a_ = os.path.join(args.output_dir , '''eval_results.txt''' )
with open(_A , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key in sorted(result.keys() ):
logger.info(''' %s = %s''' , _A , str(result[key] ) )
writer.write('''%s = %s\n''' % (key, str(result[key] )) )
if __name__ == "__main__":
main()
| 143
| 1
|
'''simple docstring'''
def __snake_case ( lowercase : str , lowercase : bool = False ):
if not isinstance(lowercase_ , lowercase_ ):
snake_case_ = f'''Expected string as input, found {type(lowercase_ )}'''
raise ValueError(lowercase_ )
if not isinstance(lowercase_ , lowercase_ ):
snake_case_ = f'''Expected boolean as use_pascal parameter, found {type(lowercase_ )}'''
raise ValueError(lowercase_ )
snake_case_ = input_str.split("_" )
snake_case_ = 0 if use_pascal else 1
snake_case_ = words[start_index:]
snake_case_ = [word[0].upper() + word[1:] for word in words_to_capitalize]
snake_case_ = "" if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 508
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__lowerCAmelCase = {"configuration_glpn": ["GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP", "GLPNConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = ["GLPNFeatureExtractor"]
__lowerCAmelCase = ["GLPNImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
"GLPN_PRETRAINED_MODEL_ARCHIVE_LIST",
"GLPNForDepthEstimation",
"GLPNLayer",
"GLPNModel",
"GLPNPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_glpn import GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP, GLPNConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_glpn import GLPNFeatureExtractor
from .image_processing_glpn import GLPNImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_glpn import (
GLPN_PRETRAINED_MODEL_ARCHIVE_LIST,
GLPNForDepthEstimation,
GLPNLayer,
GLPNModel,
GLPNPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 536
| 0
|
import argparse
import json
import os
import torch
from transformers.file_utils import has_file
from diffusers import UNetaDConditionModel, UNetaDModel
__magic_name__ : List[Any] = False
__magic_name__ : int = True
__magic_name__ : List[str] = False
if __name__ == "__main__":
__magic_name__ : str = argparse.ArgumentParser()
parser.add_argument(
'--repo_path',
default=None,
type=str,
required=True,
help='The config json file corresponding to the architecture.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
__magic_name__ : Tuple = parser.parse_args()
__magic_name__ : Tuple = {
'image_size': 'sample_size',
'num_res_blocks': 'layers_per_block',
'block_channels': 'block_out_channels',
'down_blocks': 'down_block_types',
'up_blocks': 'up_block_types',
'downscale_freq_shift': 'freq_shift',
'resnet_num_groups': 'norm_num_groups',
'resnet_act_fn': 'act_fn',
'resnet_eps': 'norm_eps',
'num_head_channels': 'attention_head_dim',
}
__magic_name__ : str = {
'time_steps': 'time_proj',
'mid': 'mid_block',
'downsample_blocks': 'down_blocks',
'upsample_blocks': 'up_blocks',
}
__magic_name__ : Optional[Any] = '' if has_file(args.repo_path, 'config.json') else 'unet'
with open(os.path.join(args.repo_path, subfolder, 'config.json'), 'r', encoding='utf-8') as reader:
__magic_name__ : Dict = reader.read()
__magic_name__ : List[str] = json.loads(text)
if do_only_config:
for key in config_parameters_to_change.keys():
config.pop(key, None)
if has_file(args.repo_path, 'config.json'):
__magic_name__ : Optional[int] = UNetaDModel(**config)
else:
__magic_name__ : Optional[int] = UNetaDConditionModel if 'ldm-text2im-large-256' in args.repo_path else UNetaDModel
__magic_name__ : List[Any] = class_name(**config)
if do_only_config:
model.save_config(os.path.join(args.repo_path, subfolder))
__magic_name__ : int = dict(model.config)
if do_only_renaming:
for key, value in config_parameters_to_change.items():
if key in config:
__magic_name__ : Tuple = config[key]
del config[key]
__magic_name__ : Dict = [k.replace('UNetRes', '') for k in config['down_block_types']]
__magic_name__ : Tuple = [k.replace('UNetRes', '') for k in config['up_block_types']]
if do_only_weights:
__magic_name__ : int = torch.load(os.path.join(args.repo_path, subfolder, 'diffusion_pytorch_model.bin'))
__magic_name__ : List[Any] = {}
for param_key, param_value in state_dict.items():
if param_key.endswith('.op.bias') or param_key.endswith('.op.weight'):
continue
__magic_name__ : str = False
for key, new_key in key_parameters_to_change.items():
if not has_changed and param_key.split('.')[0] == key:
__magic_name__ : Tuple = param_value
__magic_name__ : Union[str, Any] = True
if not has_changed:
__magic_name__ : Any = param_value
model.load_state_dict(new_state_dict)
model.save_pretrained(os.path.join(args.repo_path, subfolder))
| 608
|
from typing import Optional, Tuple
import jax
import jax.numpy as jnp
from flax import linen as nn
from flax.core.frozen_dict import FrozenDict
from transformers import CLIPConfig, FlaxPreTrainedModel
from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule
def lowerCAmelCase ( snake_case__ : List[Any] , snake_case__ : Optional[Any] , snake_case__ : Any=1e-12 )-> List[str]:
A_ = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(snake_case__ , axis=1 ) , a_min=snake_case__ ) ).T
A_ = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(snake_case__ , axis=1 ) , a_min=snake_case__ ) ).T
return jnp.matmul(snake_case__ , norm_emb_a.T )
class lowerCamelCase ( nn.Module ):
"""simple docstring"""
lowerCAmelCase_ = 42
lowerCAmelCase_ = jnp.floataa
def lowercase_ ( self ):
A_ = FlaxCLIPVisionModule(self.config.vision_config )
A_ = nn.Dense(self.config.projection_dim , use_bias=__UpperCamelCase , dtype=self.dtype )
A_ = self.param("concept_embeds" , jax.nn.initializers.ones , (17, self.config.projection_dim) )
A_ = self.param(
"special_care_embeds" , jax.nn.initializers.ones , (3, self.config.projection_dim) )
A_ = self.param("concept_embeds_weights" , jax.nn.initializers.ones , (17,) )
A_ = self.param("special_care_embeds_weights" , jax.nn.initializers.ones , (3,) )
def __call__( self , __UpperCamelCase ):
A_ = self.vision_model(__UpperCamelCase )[1]
A_ = self.visual_projection(__UpperCamelCase )
A_ = jax_cosine_distance(__UpperCamelCase , self.special_care_embeds )
A_ = jax_cosine_distance(__UpperCamelCase , self.concept_embeds )
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign image inputs
A_ = 0.0
A_ = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment
A_ = jnp.round(__UpperCamelCase , 3 )
A_ = jnp.any(special_scores > 0 , axis=1 , keepdims=__UpperCamelCase )
# Use a lower threshold if an image has any special care concept
A_ = is_special_care * 0.01
A_ = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment
A_ = jnp.round(__UpperCamelCase , 3 )
A_ = jnp.any(concept_scores > 0 , axis=1 )
return has_nsfw_concepts
class lowerCamelCase ( __snake_case ):
"""simple docstring"""
lowerCAmelCase_ = CLIPConfig
lowerCAmelCase_ = """clip_input"""
lowerCAmelCase_ = FlaxStableDiffusionSafetyCheckerModule
def __init__( self , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = 0 , __UpperCamelCase = jnp.floataa , __UpperCamelCase = True , **__UpperCamelCase , ):
if input_shape is None:
A_ = (1, 224, 224, 3)
A_ = self.module_class(config=__UpperCamelCase , dtype=__UpperCamelCase , **__UpperCamelCase )
super().__init__(__UpperCamelCase , __UpperCamelCase , input_shape=__UpperCamelCase , seed=__UpperCamelCase , dtype=__UpperCamelCase , _do_init=_do_init )
def lowercase_ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None ):
# init input tensor
A_ = jax.random.normal(__UpperCamelCase , __UpperCamelCase )
A_ , A_ = jax.random.split(__UpperCamelCase )
A_ = {"params": params_rng, "dropout": dropout_rng}
A_ = self.module.init(__UpperCamelCase , __UpperCamelCase )["params"]
return random_params
def __call__( self , __UpperCamelCase , __UpperCamelCase = None , ):
A_ = jnp.transpose(__UpperCamelCase , (0, 2, 3, 1) )
return self.module.apply(
{"params": params or self.params} , jnp.array(__UpperCamelCase , dtype=jnp.floataa ) , rngs={} , )
| 608
| 1
|
"""simple docstring"""
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
__A : List[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
class _a ( lowerCAmelCase , lowerCAmelCase):
"""simple docstring"""
@register_to_config
def __init__( self : Union[str, Any] , __UpperCamelCase : bool , __UpperCamelCase : Optional[int] = None , __UpperCamelCase : Optional[int] = None )->Tuple:
super().__init__()
_UpperCAmelCase = learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
_UpperCAmelCase = torch.zeros(__UpperCamelCase , __UpperCamelCase )
else:
_UpperCAmelCase = None
_UpperCAmelCase = torch.nn.Parameter(__UpperCamelCase )
class _a ( lowerCAmelCase):
"""simple docstring"""
UpperCamelCase__ = 42
UpperCamelCase__ = 42
UpperCamelCase__ = 42
UpperCamelCase__ = 42
UpperCamelCase__ = 42
UpperCamelCase__ = 42
def __init__( self : Optional[int] , __UpperCamelCase : VQModel , __UpperCamelCase : CLIPTextModel , __UpperCamelCase : CLIPTokenizer , __UpperCamelCase : TransformeraDModel , __UpperCamelCase : VQDiffusionScheduler , __UpperCamelCase : LearnedClassifierFreeSamplingEmbeddings , )->Optional[int]:
super().__init__()
self.register_modules(
vqvae=__UpperCamelCase , transformer=__UpperCamelCase , text_encoder=__UpperCamelCase , tokenizer=__UpperCamelCase , scheduler=__UpperCamelCase , learned_classifier_free_sampling_embeddings=__UpperCamelCase , )
def lowercase__ ( self : Dict , __UpperCamelCase : List[Any] , __UpperCamelCase : Dict , __UpperCamelCase : Optional[Any] )->str:
_UpperCAmelCase = len(__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else 1
# get prompt text embeddings
_UpperCAmelCase = self.tokenizer(
__UpperCamelCase , padding='''max_length''' , max_length=self.tokenizer.model_max_length , return_tensors='''pt''' , )
_UpperCAmelCase = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
_UpperCAmelCase = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'''The following part of your input was truncated because CLIP can only handle sequences up to'''
F' {self.tokenizer.model_max_length} tokens: {removed_text}' )
_UpperCAmelCase = text_input_ids[:, : self.tokenizer.model_max_length]
_UpperCAmelCase = self.text_encoder(text_input_ids.to(self.device ) )[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
_UpperCAmelCase = prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=__UpperCamelCase )
# duplicate text embeddings for each generation per prompt
_UpperCAmelCase = prompt_embeds.repeat_interleave(__UpperCamelCase , dim=0 )
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
_UpperCAmelCase = self.learned_classifier_free_sampling_embeddings.embeddings
_UpperCAmelCase = negative_prompt_embeds.unsqueeze(0 ).repeat(__UpperCamelCase , 1 , 1 )
else:
_UpperCAmelCase = [''''''] * batch_size
_UpperCAmelCase = text_input_ids.shape[-1]
_UpperCAmelCase = self.tokenizer(
__UpperCamelCase , padding='''max_length''' , max_length=__UpperCamelCase , truncation=__UpperCamelCase , return_tensors='''pt''' , )
_UpperCAmelCase = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# See comment for normalizing text embeddings
_UpperCAmelCase = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=__UpperCamelCase )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
_UpperCAmelCase = negative_prompt_embeds.shape[1]
_UpperCAmelCase = negative_prompt_embeds.repeat(1 , __UpperCamelCase , 1 )
_UpperCAmelCase = negative_prompt_embeds.view(batch_size * num_images_per_prompt , __UpperCamelCase , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_UpperCAmelCase = torch.cat([negative_prompt_embeds, prompt_embeds] )
return prompt_embeds
@torch.no_grad()
def __call__( self : List[Any] , __UpperCamelCase : Union[str, List[str]] , __UpperCamelCase : int = 1_0_0 , __UpperCamelCase : float = 5.0 , __UpperCamelCase : float = 1.0 , __UpperCamelCase : int = 1 , __UpperCamelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __UpperCamelCase : Optional[torch.FloatTensor] = None , __UpperCamelCase : Optional[str] = "pil" , __UpperCamelCase : bool = True , __UpperCamelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __UpperCamelCase : int = 1 , )->Union[ImagePipelineOutput, Tuple]:
if isinstance(__UpperCamelCase , __UpperCamelCase ):
_UpperCAmelCase = 1
elif isinstance(__UpperCamelCase , __UpperCamelCase ):
_UpperCAmelCase = len(__UpperCamelCase )
else:
raise ValueError(F'`prompt` has to be of type `str` or `list` but is {type(__UpperCamelCase )}' )
_UpperCAmelCase = batch_size * num_images_per_prompt
_UpperCAmelCase = guidance_scale > 1.0
_UpperCAmelCase = self._encode_prompt(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(__UpperCamelCase , __UpperCamelCase ) or callback_steps <= 0)
):
raise ValueError(
F'`callback_steps` has to be a positive integer but is {callback_steps} of type'
F' {type(__UpperCamelCase )}.' )
# get the initial completely masked latents unless the user supplied it
_UpperCAmelCase = (batch_size, self.transformer.num_latent_pixels)
if latents is None:
_UpperCAmelCase = self.transformer.num_vector_embeds - 1
_UpperCAmelCase = torch.full(__UpperCamelCase , __UpperCamelCase ).to(self.device )
else:
if latents.shape != latents_shape:
raise ValueError(F'Unexpected latents shape, got {latents.shape}, expected {latents_shape}' )
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
'''Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,'''
F' {self.transformer.num_vector_embeds - 1} (inclusive).' )
_UpperCAmelCase = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(__UpperCamelCase , device=self.device )
_UpperCAmelCase = self.scheduler.timesteps.to(self.device )
_UpperCAmelCase = latents
for i, t in enumerate(self.progress_bar(__UpperCamelCase ) ):
# expand the sample if we are doing classifier free guidance
_UpperCAmelCase = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
_UpperCAmelCase = self.transformer(__UpperCamelCase , encoder_hidden_states=__UpperCamelCase , timestep=__UpperCamelCase ).sample
if do_classifier_free_guidance:
_UpperCAmelCase , _UpperCAmelCase = model_output.chunk(2 )
_UpperCAmelCase = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(__UpperCamelCase , dim=1 , keepdim=__UpperCamelCase )
_UpperCAmelCase = self.truncate(__UpperCamelCase , __UpperCamelCase )
# remove `log(0)`'s (`-inf`s)
_UpperCAmelCase = model_output.clamp(-7_0 )
# compute the previous noisy sample x_t -> x_t-1
_UpperCAmelCase = self.scheduler.step(__UpperCamelCase , timestep=__UpperCamelCase , sample=__UpperCamelCase , generator=__UpperCamelCase ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = self.vqvae.config.vq_embed_dim
_UpperCAmelCase = (batch_size, self.transformer.height, self.transformer.width, embedding_channels)
_UpperCAmelCase = self.vqvae.quantize.get_codebook_entry(__UpperCamelCase , shape=__UpperCamelCase )
_UpperCAmelCase = self.vqvae.decode(__UpperCamelCase , force_not_quantize=__UpperCamelCase ).sample
_UpperCAmelCase = (image / 2 + 0.5).clamp(0 , 1 )
_UpperCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_UpperCAmelCase = self.numpy_to_pil(__UpperCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__UpperCamelCase )
def lowercase__ ( self : Dict , __UpperCamelCase : torch.FloatTensor , __UpperCamelCase : float )->torch.FloatTensor:
_UpperCAmelCase , _UpperCAmelCase = torch.sort(__UpperCamelCase , 1 , descending=__UpperCamelCase )
_UpperCAmelCase = torch.exp(__UpperCamelCase )
_UpperCAmelCase = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
_UpperCAmelCase = torch.full_like(keep_mask[:, 0:1, :] , __UpperCamelCase )
_UpperCAmelCase = torch.cat((all_true, keep_mask) , dim=1 )
_UpperCAmelCase = keep_mask[:, :-1, :]
_UpperCAmelCase = keep_mask.gather(1 , indices.argsort(1 ) )
_UpperCAmelCase = log_p_x_0.clone()
_UpperCAmelCase = -torch.inf # -inf = log(0)
return rv
| 602
|
"""simple docstring"""
import re
import string
from collections import Counter
import sacrebleu
import sacremoses
from packaging import version
import datasets
__A : List[str] = "\n@inproceedings{xu-etal-2016-optimizing,\n title = {Optimizing Statistical Machine Translation for Text Simplification},\n authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris},\n journal = {Transactions of the Association for Computational Linguistics},\n volume = {4},\n year={2016},\n url = {https://www.aclweb.org/anthology/Q16-1029},\n pages = {401--415\n},\n@inproceedings{post-2018-call,\n title = \"A Call for Clarity in Reporting {BLEU} Scores\",\n author = \"Post, Matt\",\n booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",\n month = oct,\n year = \"2018\",\n address = \"Belgium, Brussels\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W18-6319\",\n pages = \"186--191\",\n}\n"
__A : List[Any] = "\\nWIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU\nIt can be used to evaluate the quality of machine-generated texts.\n"
__A : int = "\nCalculates sari score (between 0 and 100) given a list of source and predicted\nsentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score.\nArgs:\n sources: list of source sentences where each sentence should be a string.\n predictions: list of predicted sentences where each sentence should be a string.\n references: list of lists of reference sentences where each sentence should be a string.\nReturns:\n sari: sari score\n sacrebleu: sacrebleu score\n exact: exact score\n\nExamples:\n >>> sources=[\"About 95 species are currently accepted .\"]\n >>> predictions=[\"About 95 you now get in .\"]\n >>> references=[[\"About 95 species are currently known .\"]]\n >>> wiki_split = datasets.load_metric(\"wiki_split\")\n >>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references)\n >>> print(results)\n {'sari': 21.805555555555557, 'sacrebleu': 14.535768424205482, 'exact': 0.0}\n"
def lowercase ( _SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
def remove_articles(_SCREAMING_SNAKE_CASE : Optional[int] ):
_UpperCAmelCase = re.compile(r'''\b(a|an|the)\b''' , re.UNICODE )
return re.sub(_SCREAMING_SNAKE_CASE , ''' ''' , _SCREAMING_SNAKE_CASE )
def white_space_fix(_SCREAMING_SNAKE_CASE : Union[str, Any] ):
return " ".join(text.split() )
def remove_punc(_SCREAMING_SNAKE_CASE : Any ):
_UpperCAmelCase = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(_SCREAMING_SNAKE_CASE : Tuple ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(_SCREAMING_SNAKE_CASE ) ) ) )
def lowercase ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
return int(normalize_answer(_SCREAMING_SNAKE_CASE ) == normalize_answer(_SCREAMING_SNAKE_CASE ) )
def lowercase ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
_UpperCAmelCase = [any(compute_exact(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for ref in refs ) for pred, refs in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )]
return (sum(_SCREAMING_SNAKE_CASE ) / len(_SCREAMING_SNAKE_CASE )) * 100
def lowercase ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
_UpperCAmelCase = [rgram for rgrams in rgramslist for rgram in rgrams]
_UpperCAmelCase = Counter(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = Counter(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = Counter()
for sgram, scount in sgramcounter.items():
_UpperCAmelCase = scount * numref
_UpperCAmelCase = Counter(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = Counter()
for cgram, ccount in cgramcounter.items():
_UpperCAmelCase = ccount * numref
# KEEP
_UpperCAmelCase = sgramcounter_rep & cgramcounter_rep
_UpperCAmelCase = keepgramcounter_rep & rgramcounter
_UpperCAmelCase = sgramcounter_rep & rgramcounter
_UpperCAmelCase = 0
_UpperCAmelCase = 0
for keepgram in keepgramcountergood_rep:
keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram]
# Fix an alleged bug [2] in the keep score computation.
# keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram]
keeptmpscorea += keepgramcountergood_rep[keepgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
_UpperCAmelCase = 1
_UpperCAmelCase = 1
if len(_SCREAMING_SNAKE_CASE ) > 0:
_UpperCAmelCase = keeptmpscorea / len(_SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) > 0:
# Fix an alleged bug [2] in the keep score computation.
# keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep)
_UpperCAmelCase = keeptmpscorea / sum(keepgramcounterall_rep.values() )
_UpperCAmelCase = 0
if keepscore_precision > 0 or keepscore_recall > 0:
_UpperCAmelCase = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall)
# DELETION
_UpperCAmelCase = sgramcounter_rep - cgramcounter_rep
_UpperCAmelCase = delgramcounter_rep - rgramcounter
_UpperCAmelCase = sgramcounter_rep - rgramcounter
_UpperCAmelCase = 0
_UpperCAmelCase = 0
for delgram in delgramcountergood_rep:
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram]
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
_UpperCAmelCase = 1
if len(_SCREAMING_SNAKE_CASE ) > 0:
_UpperCAmelCase = deltmpscorea / len(_SCREAMING_SNAKE_CASE )
# ADDITION
_UpperCAmelCase = set(_SCREAMING_SNAKE_CASE ) - set(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = set(_SCREAMING_SNAKE_CASE ) & set(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = set(_SCREAMING_SNAKE_CASE ) - set(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = 0
for addgram in addgramcountergood:
addtmpscore += 1
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
_UpperCAmelCase = 1
_UpperCAmelCase = 1
if len(_SCREAMING_SNAKE_CASE ) > 0:
_UpperCAmelCase = addtmpscore / len(_SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) > 0:
_UpperCAmelCase = addtmpscore / len(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = 0
if addscore_precision > 0 or addscore_recall > 0:
_UpperCAmelCase = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall)
return (keepscore, delscore_precision, addscore)
def lowercase ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
_UpperCAmelCase = len(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = ssent.split(''' ''' )
_UpperCAmelCase = csent.split(''' ''' )
_UpperCAmelCase = []
_UpperCAmelCase = []
_UpperCAmelCase = []
_UpperCAmelCase = []
_UpperCAmelCase = []
_UpperCAmelCase = []
_UpperCAmelCase = []
_UpperCAmelCase = []
_UpperCAmelCase = []
_UpperCAmelCase = []
for rsent in rsents:
_UpperCAmelCase = rsent.split(''' ''' )
_UpperCAmelCase = []
_UpperCAmelCase = []
_UpperCAmelCase = []
ragramslist.append(_SCREAMING_SNAKE_CASE )
for i in range(0 , len(_SCREAMING_SNAKE_CASE ) - 1 ):
if i < len(_SCREAMING_SNAKE_CASE ) - 1:
_UpperCAmelCase = ragrams[i] + ''' ''' + ragrams[i + 1]
ragrams.append(_SCREAMING_SNAKE_CASE )
if i < len(_SCREAMING_SNAKE_CASE ) - 2:
_UpperCAmelCase = ragrams[i] + ''' ''' + ragrams[i + 1] + ''' ''' + ragrams[i + 2]
ragrams.append(_SCREAMING_SNAKE_CASE )
if i < len(_SCREAMING_SNAKE_CASE ) - 3:
_UpperCAmelCase = ragrams[i] + ''' ''' + ragrams[i + 1] + ''' ''' + ragrams[i + 2] + ''' ''' + ragrams[i + 3]
ragrams.append(_SCREAMING_SNAKE_CASE )
ragramslist.append(_SCREAMING_SNAKE_CASE )
ragramslist.append(_SCREAMING_SNAKE_CASE )
ragramslist.append(_SCREAMING_SNAKE_CASE )
for i in range(0 , len(_SCREAMING_SNAKE_CASE ) - 1 ):
if i < len(_SCREAMING_SNAKE_CASE ) - 1:
_UpperCAmelCase = sagrams[i] + ''' ''' + sagrams[i + 1]
sagrams.append(_SCREAMING_SNAKE_CASE )
if i < len(_SCREAMING_SNAKE_CASE ) - 2:
_UpperCAmelCase = sagrams[i] + ''' ''' + sagrams[i + 1] + ''' ''' + sagrams[i + 2]
sagrams.append(_SCREAMING_SNAKE_CASE )
if i < len(_SCREAMING_SNAKE_CASE ) - 3:
_UpperCAmelCase = sagrams[i] + ''' ''' + sagrams[i + 1] + ''' ''' + sagrams[i + 2] + ''' ''' + sagrams[i + 3]
sagrams.append(_SCREAMING_SNAKE_CASE )
for i in range(0 , len(_SCREAMING_SNAKE_CASE ) - 1 ):
if i < len(_SCREAMING_SNAKE_CASE ) - 1:
_UpperCAmelCase = cagrams[i] + ''' ''' + cagrams[i + 1]
cagrams.append(_SCREAMING_SNAKE_CASE )
if i < len(_SCREAMING_SNAKE_CASE ) - 2:
_UpperCAmelCase = cagrams[i] + ''' ''' + cagrams[i + 1] + ''' ''' + cagrams[i + 2]
cagrams.append(_SCREAMING_SNAKE_CASE )
if i < len(_SCREAMING_SNAKE_CASE ) - 3:
_UpperCAmelCase = cagrams[i] + ''' ''' + cagrams[i + 1] + ''' ''' + cagrams[i + 2] + ''' ''' + cagrams[i + 3]
cagrams.append(_SCREAMING_SNAKE_CASE )
((_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase)) = SARIngram(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
((_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase)) = SARIngram(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
((_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase)) = SARIngram(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
((_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase)) = SARIngram(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = sum([keepascore, keepascore, keepascore, keepascore] ) / 4
_UpperCAmelCase = sum([delascore, delascore, delascore, delascore] ) / 4
_UpperCAmelCase = sum([addascore, addascore, addascore, addascore] ) / 4
_UpperCAmelCase = (avgkeepscore + avgdelscore + avgaddscore) / 3
return finalscore
def lowercase ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : bool = True , _SCREAMING_SNAKE_CASE : str = "13a" , _SCREAMING_SNAKE_CASE : bool = True ):
'''simple docstring'''
if lowercase:
_UpperCAmelCase = sentence.lower()
if tokenizer in ["13a", "intl"]:
if version.parse(sacrebleu.__version__ ).major >= 2:
_UpperCAmelCase = sacrebleu.metrics.bleu._get_tokenizer(_SCREAMING_SNAKE_CASE )()(_SCREAMING_SNAKE_CASE )
else:
_UpperCAmelCase = sacrebleu.TOKENIZERS[tokenizer]()(_SCREAMING_SNAKE_CASE )
elif tokenizer == "moses":
_UpperCAmelCase = sacremoses.MosesTokenizer().tokenize(_SCREAMING_SNAKE_CASE , return_str=_SCREAMING_SNAKE_CASE , escape=_SCREAMING_SNAKE_CASE )
elif tokenizer == "penn":
_UpperCAmelCase = sacremoses.MosesTokenizer().penn_tokenize(_SCREAMING_SNAKE_CASE , return_str=_SCREAMING_SNAKE_CASE )
else:
_UpperCAmelCase = sentence
if not return_str:
_UpperCAmelCase = normalized_sent.split()
return normalized_sent
def lowercase ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
if not (len(_SCREAMING_SNAKE_CASE ) == len(_SCREAMING_SNAKE_CASE ) == len(_SCREAMING_SNAKE_CASE )):
raise ValueError('''Sources length must match predictions and references lengths.''' )
_UpperCAmelCase = 0
for src, pred, refs in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
sari_score += SARIsent(normalize(_SCREAMING_SNAKE_CASE ) , normalize(_SCREAMING_SNAKE_CASE ) , [normalize(_SCREAMING_SNAKE_CASE ) for sent in refs] )
_UpperCAmelCase = sari_score / len(_SCREAMING_SNAKE_CASE )
return 100 * sari_score
def lowercase ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : str="exp" , _SCREAMING_SNAKE_CASE : Tuple=None , _SCREAMING_SNAKE_CASE : List[str]=False , _SCREAMING_SNAKE_CASE : str=False , _SCREAMING_SNAKE_CASE : int=False , ):
'''simple docstring'''
_UpperCAmelCase = len(references[0] )
if any(len(_SCREAMING_SNAKE_CASE ) != references_per_prediction for refs in references ):
raise ValueError('''Sacrebleu requires the same number of references for each prediction''' )
_UpperCAmelCase = [[refs[i] for refs in references] for i in range(_SCREAMING_SNAKE_CASE )]
_UpperCAmelCase = sacrebleu.corpus_bleu(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , smooth_method=_SCREAMING_SNAKE_CASE , smooth_value=_SCREAMING_SNAKE_CASE , force=_SCREAMING_SNAKE_CASE , lowercase=_SCREAMING_SNAKE_CASE , use_effective_order=_SCREAMING_SNAKE_CASE , )
return output.score
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class _a ( datasets.Metric):
"""simple docstring"""
def lowercase__ ( self : Dict )->Any:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''' ) , id='''references''' ),
} ) , codebase_urls=[
'''https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py''',
'''https://github.com/cocoxu/simplification/blob/master/SARI.py''',
'''https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py''',
'''https://github.com/mjpost/sacreBLEU''',
] , reference_urls=[
'''https://www.aclweb.org/anthology/Q16-1029.pdf''',
'''https://github.com/mjpost/sacreBLEU''',
'''https://en.wikipedia.org/wiki/BLEU''',
'''https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213''',
] , )
def lowercase__ ( self : Dict , __UpperCamelCase : List[str] , __UpperCamelCase : Dict , __UpperCamelCase : List[Any] )->Any:
_UpperCAmelCase = {}
result.update({'''sari''': compute_sari(sources=__UpperCamelCase , predictions=__UpperCamelCase , references=__UpperCamelCase )} )
result.update({'''sacrebleu''': compute_sacrebleu(predictions=__UpperCamelCase , references=__UpperCamelCase )} )
result.update({'''exact''': compute_em(predictions=__UpperCamelCase , references=__UpperCamelCase )} )
return result
| 602
| 1
|
import inspect
import unittest
class snake_case__ ( unittest.TestCase ):
def UpperCAmelCase__ ( self : Any ):
try:
import diffusers # noqa: F401
except ImportError:
assert False
def UpperCAmelCase__ ( self : Union[str, Any] ):
import diffusers
from diffusers.dependency_versions_table import deps
snake_case__ : Any = inspect.getmembers(_lowerCamelCase , inspect.isclass )
for cls_name, cls_module in all_classes:
if "dummy_" in cls_module.__module__:
for backend in cls_module._backends:
if backend == "k_diffusion":
snake_case__ : int = 'k-diffusion'
elif backend == "invisible_watermark":
snake_case__ : List[str] = 'invisible-watermark'
assert backend in deps, F'''{backend} is not in the deps table!'''
| 303
|
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class snake_case__ ( UpperCamelCase_ ):
_lowerCAmelCase =42
_lowerCAmelCase =42
def __init__( self : List[str] , _lowerCamelCase : UNetaDModel , _lowerCamelCase : ScoreSdeVeScheduler ):
super().__init__()
self.register_modules(unet=_lowerCamelCase , scheduler=_lowerCamelCase )
@torch.no_grad()
def __call__( self : Union[str, Any] , _lowerCamelCase : int = 1 , _lowerCamelCase : int = 2_0_0_0 , _lowerCamelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _lowerCamelCase : Optional[str] = "pil" , _lowerCamelCase : bool = True , **_lowerCamelCase : Optional[int] , ):
snake_case__ : Union[str, Any] = self.unet.config.sample_size
snake_case__ : Tuple = (batch_size, 3, img_size, img_size)
snake_case__ : List[str] = self.unet
snake_case__ : int = randn_tensor(_lowerCamelCase , generator=_lowerCamelCase ) * self.scheduler.init_noise_sigma
snake_case__ : Tuple = sample.to(self.device )
self.scheduler.set_timesteps(_lowerCamelCase )
self.scheduler.set_sigmas(_lowerCamelCase )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
snake_case__ : Union[str, Any] = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
snake_case__ : Tuple = self.unet(_lowerCamelCase , _lowerCamelCase ).sample
snake_case__ : List[str] = self.scheduler.step_correct(_lowerCamelCase , _lowerCamelCase , generator=_lowerCamelCase ).prev_sample
# prediction step
snake_case__ : Dict = model(_lowerCamelCase , _lowerCamelCase ).sample
snake_case__ : Tuple = self.scheduler.step_pred(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , generator=_lowerCamelCase )
snake_case__ , snake_case__ : str = output.prev_sample, output.prev_sample_mean
snake_case__ : Optional[int] = sample_mean.clamp(0 , 1 )
snake_case__ : Optional[int] = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
snake_case__ : Optional[Any] = self.numpy_to_pil(_lowerCamelCase )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=_lowerCamelCase )
| 303
| 1
|
"""simple docstring"""
from math import sqrt
import numpy as np
from sympy import symbols
# Coefficient
# Speed of light (m/s)
SCREAMING_SNAKE_CASE = 299_792_458
# Symbols
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = symbols("""ct x y z""")
def lowerCamelCase__ ( UpperCAmelCase_ )-> float:
"""simple docstring"""
if velocity > c:
raise ValueError("Speed must not exceed light speed 299,792,458 [m/s]!" )
elif velocity < 1:
# Usually the speed should be much higher than 1 (c order of magnitude)
raise ValueError("Speed must be greater than or equal to 1!" )
return velocity / c
def lowerCamelCase__ ( UpperCAmelCase_ )-> float:
"""simple docstring"""
return 1 / sqrt(1 - beta(UpperCAmelCase_ ) ** 2 )
def lowerCamelCase__ ( UpperCAmelCase_ )-> np.ndarray:
"""simple docstring"""
return np.array(
[
[gamma(UpperCAmelCase_ ), -gamma(UpperCAmelCase_ ) * beta(UpperCAmelCase_ ), 0, 0],
[-gamma(UpperCAmelCase_ ) * beta(UpperCAmelCase_ ), gamma(UpperCAmelCase_ ), 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
] )
def lowerCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ = None )-> np.ndarray:
"""simple docstring"""
# Ensure event is not empty
if event is None:
UpperCamelCase = np.array([ct, x, y, z] ) # Symbolic four vector
else:
event[0] *= c # x0 is ct (speed of light * time)
return transformation_matrix(UpperCAmelCase_ ) @ event
if __name__ == "__main__":
import doctest
doctest.testmod()
# Example of symbolic vector:
SCREAMING_SNAKE_CASE = transform(29_979_245)
print("""Example of four vector: """)
print(F'''ct\' = {four_vector[0]}''')
print(F'''x\' = {four_vector[1]}''')
print(F'''y\' = {four_vector[2]}''')
print(F'''z\' = {four_vector[3]}''')
# Substitute symbols with numerical values
SCREAMING_SNAKE_CASE = {ct: c, x: 1, y: 1, z: 1}
SCREAMING_SNAKE_CASE = [four_vector[i].subs(sub_dict) for i in range(4)]
print(F'''\n{numerical_vector}''')
| 554
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class __a ( unittest.TestCase ):
def __init__( self : Optional[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[Any]=7 , UpperCAmelCase_ : List[Any]=3 , UpperCAmelCase_ : Optional[int]=30 , UpperCAmelCase_ : Any=400 , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : Dict=None , UpperCAmelCase_ : Dict=0.9 , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : Optional[int]=[0.5, 0.5, 0.5] , UpperCAmelCase_ : Union[str, Any]=[0.5, 0.5, 0.5] , )-> List[str]:
"""simple docstring"""
UpperCamelCase = size if size is not None else {"shortest_edge": 30}
UpperCamelCase = crop_size if crop_size is not None else {"height": 30, "width": 30}
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = num_channels
UpperCamelCase = min_resolution
UpperCamelCase = max_resolution
UpperCamelCase = do_resize_and_center_crop
UpperCamelCase = size
UpperCamelCase = crop_pct
UpperCamelCase = crop_size
UpperCamelCase = do_normalize
UpperCamelCase = image_mean
UpperCamelCase = image_std
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] )-> str:
"""simple docstring"""
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class __a ( _lowerCAmelCase , unittest.TestCase ):
UpperCamelCase_ : List[Any] = PoolFormerImageProcessor if is_vision_available() else None
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] )-> str:
"""simple docstring"""
UpperCamelCase = PoolFormerImageProcessingTester(self )
@property
def _SCREAMING_SNAKE_CASE ( self : List[Any] )-> Optional[Any]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def _SCREAMING_SNAKE_CASE ( self : Dict )-> Optional[int]:
"""simple docstring"""
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase_ , "do_resize_and_center_crop" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , "size" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , "crop_pct" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , "do_normalize" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , "image_mean" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , "image_std" ) )
def _SCREAMING_SNAKE_CASE ( self : int )-> List[Any]:
"""simple docstring"""
UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 30} )
self.assertEqual(image_processor.crop_size , {"height": 30, "width": 30} )
UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"shortest_edge": 42} )
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] )-> Any:
"""simple docstring"""
pass
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] )-> Dict:
"""simple docstring"""
# Initialize image_processing
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , Image.Image )
# Test not batched input
UpperCamelCase = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
UpperCamelCase = image_processing(UpperCAmelCase_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] )-> List[str]:
"""simple docstring"""
# Initialize image_processing
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , numpify=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , np.ndarray )
# Test not batched input
UpperCamelCase = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
UpperCamelCase = image_processing(UpperCAmelCase_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def _SCREAMING_SNAKE_CASE ( self : str )-> str:
"""simple docstring"""
# Initialize image_processing
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , torchify=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , torch.Tensor )
# Test not batched input
UpperCamelCase = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
UpperCamelCase = image_processing(UpperCAmelCase_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 554
| 1
|
'''simple docstring'''
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("""TEST_SAGEMAKER""" , """False""")) is not True , reason="""Skipping test because should only be run when releasing minor transformers version""" , )
@pytest.mark.usefixtures("""sm_env""")
@parameterized_class(
[
{
"""framework""": """pytorch""",
"""script""": """run_glue.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.g4dn.xlarge""",
"""results""": {"""train_runtime""": 6_50, """eval_accuracy""": 0.6, """eval_loss""": 0.9},
},
{
"""framework""": """tensorflow""",
"""script""": """run_tf.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.g4dn.xlarge""",
"""results""": {"""train_runtime""": 6_00, """eval_accuracy""": 0.3, """eval_loss""": 0.9},
},
])
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase):
def UpperCAmelCase_ ( self )-> int:
'''simple docstring'''
if self.framework == "pytorch":
subprocess.run(
F'''cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'''.split() , encoding='utf-8' , check=A_ , )
assert hasattr(self , 'env' )
def UpperCAmelCase_ ( self , A_=1 )-> Any:
'''simple docstring'''
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=F'''{self.env.base_job_name}-single''' , instance_count=A_ , instance_type=self.instance_type , debugger_hook_config=A_ , hyperparameters={**self.env.hyperparameters, 'model_name_or_path': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version='py36' , )
def UpperCAmelCase_ ( self , A_ )-> str:
'''simple docstring'''
TrainingJobAnalytics(A_ ).export_csv(F'''{self.env.test_path}/{job_name}_metrics.csv''' )
def UpperCAmelCase_ ( self )-> Dict:
'''simple docstring'''
UpperCamelCase = self.create_estimator()
# run training
estimator.fit()
# result dataframe
UpperCamelCase = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
UpperCamelCase = list(result_metrics_df[result_metrics_df.metric_name == 'eval_accuracy']['value'] )
UpperCamelCase = list(result_metrics_df[result_metrics_df.metric_name == 'eval_loss']['value'] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
UpperCamelCase = (
Session().describe_training_job(estimator.latest_training_job.name ).get('TrainingTimeInSeconds' , 999999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['eval_accuracy'] for t in eval_accuracy )
assert all(t <= self.results['eval_loss'] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F'''{estimator.latest_training_job.name}.json''' , 'w' ) as outfile:
json.dump({'train_time': train_runtime, 'eval_accuracy': eval_accuracy, 'eval_loss': eval_loss} , A_ )
| 432
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase : int = {
'configuration_xmod': [
'XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP',
'XmodConfig',
'XmodOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Dict = [
'XMOD_PRETRAINED_MODEL_ARCHIVE_LIST',
'XmodForCausalLM',
'XmodForMaskedLM',
'XmodForMultipleChoice',
'XmodForQuestionAnswering',
'XmodForSequenceClassification',
'XmodForTokenClassification',
'XmodModel',
'XmodPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xmod import (
XMOD_PRETRAINED_MODEL_ARCHIVE_LIST,
XmodForCausalLM,
XmodForMaskedLM,
XmodForMultipleChoice,
XmodForQuestionAnswering,
XmodForSequenceClassification,
XmodForTokenClassification,
XmodModel,
XmodPreTrainedModel,
)
else:
import sys
lowerCAmelCase : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 432
| 1
|
"""simple docstring"""
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
UpperCamelCase__ = {
'''vocab_file''': {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'''
),
},
}
UpperCamelCase__ = {
'''vocab_file''': {
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'''
),
},
}
UpperCamelCase__ = {
'''vocab_file''': {
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'''
),
},
}
UpperCamelCase__ = {
'''facebook/dpr-ctx_encoder-single-nq-base''': 5_1_2,
'''facebook/dpr-ctx_encoder-multiset-base''': 5_1_2,
}
UpperCamelCase__ = {
'''facebook/dpr-question_encoder-single-nq-base''': 5_1_2,
'''facebook/dpr-question_encoder-multiset-base''': 5_1_2,
}
UpperCamelCase__ = {
'''facebook/dpr-reader-single-nq-base''': 5_1_2,
'''facebook/dpr-reader-multiset-base''': 5_1_2,
}
UpperCamelCase__ = {
'''facebook/dpr-ctx_encoder-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-ctx_encoder-multiset-base''': {'''do_lower_case''': True},
}
UpperCamelCase__ = {
'''facebook/dpr-question_encoder-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-question_encoder-multiset-base''': {'''do_lower_case''': True},
}
UpperCamelCase__ = {
'''facebook/dpr-reader-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-reader-multiset-base''': {'''do_lower_case''': True},
}
class a__ ( snake_case_ ):
snake_case__ = VOCAB_FILES_NAMES
snake_case__ = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
snake_case__ = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class a__ ( snake_case_ ):
snake_case__ = VOCAB_FILES_NAMES
snake_case__ = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
snake_case__ = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
UpperCamelCase__ = collections.namedtuple(
'''DPRSpanPrediction''', ['''span_score''', '''relevance_score''', '''doc_id''', '''start_index''', '''end_index''', '''text''']
)
UpperCamelCase__ = collections.namedtuple('''DPRReaderOutput''', ['''start_logits''', '''end_logits''', '''relevance_logits'''])
UpperCamelCase__ = r'''
Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.
It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),
using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`
with the format:
```
[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>
```
Args:
questions (`str` or `List[str]`):
The questions to be encoded. You can specify one question for many passages. In this case, the question
will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in
`titles` or `texts`.
titles (`str` or `List[str]`):
The passages titles to be encoded. This can be a string or a list of strings if there are several passages.
texts (`str` or `List[str]`):
The passages texts to be encoded. This can be a string or a list of strings if there are several passages.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
if provided).
- `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch
of pairs) is provided.
- `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the first
sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the
second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `\'tf\'`: Return TensorFlow `tf.constant` objects.
- `\'pt\'`: Return PyTorch `torch.Tensor` objects.
- `\'np\'`: Return Numpy `np.ndarray` objects.
return_attention_mask (`bool`, *optional*):
Whether or not to return the attention mask. If not set, will return the attention mask according to the
specific tokenizer\'s default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
Returns:
`Dict[str, List[List[int]]]`: A dictionary with the following keys:
- `input_ids`: List of token ids to be fed to a model.
- `attention_mask`: List of indices specifying which tokens should be attended to by the model.
'''
@add_start_docstrings(snake_case_ )
class a__ :
def __call__( self : List[str] ,a__ : Optional[int] ,a__ : Optional[str] = None ,a__ : Optional[str] = None ,a__ : Union[bool, str] = False ,a__ : Union[bool, str] = False ,a__ : Optional[int] = None ,a__ : Optional[Union[str, TensorType]] = None ,a__ : Optional[bool] = None ,**a__ : Optional[int] ,) -> BatchEncoding:
"""simple docstring"""
if titles is None and texts is None:
return super().__call__(
__snake_case ,padding=__snake_case ,truncation=__snake_case ,max_length=__snake_case ,return_tensors=__snake_case ,return_attention_mask=__snake_case ,**__snake_case ,)
elif titles is None or texts is None:
_lowerCAmelCase:Optional[int] = titles if texts is None else texts
return super().__call__(
__snake_case ,__snake_case ,padding=__snake_case ,truncation=__snake_case ,max_length=__snake_case ,return_tensors=__snake_case ,return_attention_mask=__snake_case ,**__snake_case ,)
_lowerCAmelCase:Tuple = titles if not isinstance(__snake_case ,__snake_case) else [titles]
_lowerCAmelCase:Tuple = texts if not isinstance(__snake_case ,__snake_case) else [texts]
_lowerCAmelCase:Union[str, Any] = len(__snake_case)
_lowerCAmelCase:Union[str, Any] = questions if not isinstance(__snake_case ,__snake_case) else [questions] * n_passages
if len(__snake_case) != len(__snake_case):
raise ValueError(
F'There should be as many titles than texts but got {len(__snake_case)} titles and {len(__snake_case)} texts.')
_lowerCAmelCase:str = super().__call__(__snake_case ,__snake_case ,padding=__snake_case ,truncation=__snake_case)['''input_ids''']
_lowerCAmelCase:Union[str, Any] = super().__call__(__snake_case ,add_special_tokens=__snake_case ,padding=__snake_case ,truncation=__snake_case)['''input_ids''']
_lowerCAmelCase:int = {
'''input_ids''': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(__snake_case ,__snake_case)
]
}
if return_attention_mask is not False:
_lowerCAmelCase:Optional[Any] = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id) for input_id in input_ids])
_lowerCAmelCase:List[Any] = attention_mask
return self.pad(__snake_case ,padding=__snake_case ,max_length=__snake_case ,return_tensors=__snake_case)
def __UpperCamelCase ( self : Optional[Any] ,a__ : BatchEncoding ,a__ : DPRReaderOutput ,a__ : int = 16 ,a__ : int = 64 ,a__ : int = 4 ,) -> List[DPRSpanPrediction]:
"""simple docstring"""
_lowerCAmelCase:Tuple = reader_input['''input_ids''']
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase:Union[str, Any] = reader_output[:3]
_lowerCAmelCase:Union[str, Any] = len(__snake_case)
_lowerCAmelCase:str = sorted(range(__snake_case) ,reverse=__snake_case ,key=relevance_logits.__getitem__)
_lowerCAmelCase:int = []
for doc_id in sorted_docs:
_lowerCAmelCase:Optional[Any] = list(input_ids[doc_id])
# assuming question & title information is at the beginning of the sequence
_lowerCAmelCase:int = sequence_ids.index(self.sep_token_id ,2) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
_lowerCAmelCase:List[Any] = sequence_ids.index(self.pad_token_id)
else:
_lowerCAmelCase:Optional[int] = len(__snake_case)
_lowerCAmelCase:Any = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] ,end_logits=end_logits[doc_id][passage_offset:sequence_len] ,max_answer_length=__snake_case ,top_spans=__snake_case ,)
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] ,relevance_score=relevance_logits[doc_id] ,doc_id=__snake_case ,start_index=__snake_case ,end_index=__snake_case ,text=self.decode(sequence_ids[start_index : end_index + 1]) ,))
if len(__snake_case) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def __UpperCamelCase ( self : str ,a__ : List[int] ,a__ : List[int] ,a__ : int ,a__ : int ,) -> List[DPRSpanPrediction]:
"""simple docstring"""
_lowerCAmelCase:Dict = []
for start_index, start_score in enumerate(__snake_case):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length]):
scores.append(((start_index, start_index + answer_length), start_score + end_score))
_lowerCAmelCase:List[Any] = sorted(__snake_case ,key=lambda a__: x[1] ,reverse=__snake_case)
_lowerCAmelCase:Union[str, Any] = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(F'Wrong span indices: [{start_index}:{end_index}]')
_lowerCAmelCase:Union[str, Any] = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(F'Span is too long: {length} > {max_answer_length}')
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals):
continue
chosen_span_intervals.append((start_index, end_index))
if len(__snake_case) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(snake_case_ )
class a__ ( snake_case_ , snake_case_ ):
snake_case__ = VOCAB_FILES_NAMES
snake_case__ = READER_PRETRAINED_VOCAB_FILES_MAP
snake_case__ = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ = READER_PRETRAINED_INIT_CONFIGURATION
snake_case__ = ['''input_ids''', '''attention_mask''']
| 227
|
'''simple docstring'''
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
A__ : List[str] =imread(r'''digital_image_processing/image_data/lena_small.jpg''')
A__ : Union[str, Any] =cvtColor(img, COLOR_BGR2GRAY)
def UpperCamelCase__ ( ):
"""simple docstring"""
_lowerCAmelCase = cn.convert_to_negative(lowerCAmelCase )
# assert negative_img array for at least one True
assert negative_img.any()
def UpperCamelCase__ ( ):
"""simple docstring"""
with Image.open("""digital_image_processing/image_data/lena_small.jpg""" ) as img:
# Work around assertion for response
assert str(cc.change_contrast(lowerCAmelCase , 1_10 ) ).startswith(
"""<PIL.Image.Image image mode=RGB size=100x100 at""" )
def UpperCamelCase__ ( ):
"""simple docstring"""
_lowerCAmelCase = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def UpperCamelCase__ ( ):
"""simple docstring"""
_lowerCAmelCase = imread("""digital_image_processing/image_data/lena_small.jpg""" , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
_lowerCAmelCase = canny.canny(lowerCAmelCase )
# assert canny array for at least one True
assert canny_array.any()
def UpperCamelCase__ ( ):
"""simple docstring"""
assert gg.gaussian_filter(lowerCAmelCase , 5 , sigma=0.9 ).all()
def UpperCamelCase__ ( ):
"""simple docstring"""
_lowerCAmelCase = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
_lowerCAmelCase = conv.img_convolve(lowerCAmelCase , lowerCAmelCase ).astype(lowerCAmelCase )
assert res.any()
def UpperCamelCase__ ( ):
"""simple docstring"""
assert med.median_filter(lowerCAmelCase , 3 ).any()
def UpperCamelCase__ ( ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = sob.sobel_filter(lowerCAmelCase )
assert grad.any() and theta.any()
def UpperCamelCase__ ( ):
"""simple docstring"""
_lowerCAmelCase = sp.make_sepia(lowerCAmelCase , 20 )
assert sepia.all()
def UpperCamelCase__ ( lowerCAmelCase = "digital_image_processing/image_data/lena_small.jpg" ):
"""simple docstring"""
_lowerCAmelCase = bs.Burkes(imread(lowerCAmelCase , 1 ) , 1_20 )
burkes.process()
assert burkes.output_img.any()
def UpperCamelCase__ ( lowerCAmelCase = "digital_image_processing/image_data/lena_small.jpg" , ):
"""simple docstring"""
_lowerCAmelCase = rs.NearestNeighbour(imread(lowerCAmelCase , 1 ) , 4_00 , 2_00 )
nn.process()
assert nn.output.any()
def UpperCamelCase__ ( ):
"""simple docstring"""
_lowerCAmelCase = """digital_image_processing/image_data/lena.jpg"""
# Reading the image and converting it to grayscale.
_lowerCAmelCase = imread(lowerCAmelCase , 0 )
# Test for get_neighbors_pixel function() return not None
_lowerCAmelCase = 0
_lowerCAmelCase = 0
_lowerCAmelCase = image[x_coordinate][y_coordinate]
_lowerCAmelCase = lbp.get_neighbors_pixel(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
_lowerCAmelCase = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
_lowerCAmelCase = lbp.local_binary_value(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
assert lbp_image.any()
| 207
| 0
|
'''simple docstring'''
import os
def lowercase ( ):
"""simple docstring"""
_A : Dict = os.path.dirname(os.path.realpath(lowerCAmelCase))
_A : Any = os.path.join(lowerCAmelCase , '''triangle.txt''')
with open(lowerCAmelCase) as f:
_A : Optional[Any] = f.readlines()
_A : Optional[int] = []
for line in triangle:
_A : Optional[int] = []
for number in line.strip().split(''' '''):
numbers_from_line.append(int(lowerCAmelCase))
a.append(lowerCAmelCase)
for i in range(1 , len(lowerCAmelCase)):
for j in range(len(a[i])):
_A : Union[str, Any] = a[i - 1][j] if j != len(a[i - 1]) else 0
_A : Optional[int] = a[i - 1][j - 1] if j > 0 else 0
a[i][j] += max(lowerCAmelCase , lowerCAmelCase)
return max(a[-1])
if __name__ == "__main__":
print(solution())
| 417
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCamelCase : Union[str, Any] = {
'''configuration_clipseg''': [
'''CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''CLIPSegConfig''',
'''CLIPSegTextConfig''',
'''CLIPSegVisionConfig''',
],
'''processing_clipseg''': ['''CLIPSegProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : List[Any] = [
'''CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CLIPSegModel''',
'''CLIPSegPreTrainedModel''',
'''CLIPSegTextModel''',
'''CLIPSegVisionModel''',
'''CLIPSegForImageSegmentation''',
]
if TYPE_CHECKING:
from .configuration_clipseg import (
CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPSegConfig,
CLIPSegTextConfig,
CLIPSegVisionConfig,
)
from .processing_clipseg import CLIPSegProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clipseg import (
CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPSegForImageSegmentation,
CLIPSegModel,
CLIPSegPreTrainedModel,
CLIPSegTextModel,
CLIPSegVisionModel,
)
else:
import sys
__UpperCamelCase : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 417
| 1
|
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
A_ : List[Any] =logging.get_logger(__name__)
A_ : Optional[int] ={"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""}
A_ : List[str] ={
"""vocab_file""": {
"""allenai/longformer-base-4096""": """https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json""",
"""allenai/longformer-large-4096""": (
"""https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json"""
),
"""allenai/longformer-large-4096-finetuned-triviaqa""": (
"""https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json"""
),
"""allenai/longformer-base-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json"""
),
"""allenai/longformer-large-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json"""
),
},
"""merges_file""": {
"""allenai/longformer-base-4096""": """https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt""",
"""allenai/longformer-large-4096""": (
"""https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt"""
),
"""allenai/longformer-large-4096-finetuned-triviaqa""": (
"""https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt"""
),
"""allenai/longformer-base-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt"""
),
"""allenai/longformer-large-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt"""
),
},
}
A_ : str ={
"""allenai/longformer-base-4096""": 4096,
"""allenai/longformer-large-4096""": 4096,
"""allenai/longformer-large-4096-finetuned-triviaqa""": 4096,
"""allenai/longformer-base-4096-extra.pos.embd.only""": 4096,
"""allenai/longformer-large-4096-extra.pos.embd.only""": 4096,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def lowerCamelCase_ ( ):
"""simple docstring"""
a_ = (
list(range(ord("""!""" ) , ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ) , ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ) , ord("""ÿ""" ) + 1 ) )
)
a_ = bs[:]
a_ = 0
for b in range(2**8 ):
if b not in bs:
bs.append(UpperCAmelCase__ )
cs.append(2**8 + n )
n += 1
a_ = [chr(UpperCAmelCase__ ) for n in cs]
return dict(zip(UpperCAmelCase__ , UpperCAmelCase__ ) )
def lowerCamelCase_ ( UpperCAmelCase__ ):
"""simple docstring"""
a_ = set()
a_ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
a_ = char
return pairs
class lowercase_ ( UpperCamelCase__):
"""simple docstring"""
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = ['''input_ids''', '''attention_mask''']
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase="replace" , _UpperCAmelCase="<s>" , _UpperCAmelCase="</s>" , _UpperCAmelCase="</s>" , _UpperCAmelCase="<s>" , _UpperCAmelCase="<unk>" , _UpperCAmelCase="<pad>" , _UpperCAmelCase="<mask>" , _UpperCAmelCase=False , **_UpperCAmelCase , ):
"""simple docstring"""
a_ = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else bos_token
a_ = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else eos_token
a_ = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else sep_token
a_ = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else cls_token
a_ = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else unk_token
a_ = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
a_ = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else mask_token
super().__init__(
errors=_UpperCAmelCase , bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase , **_UpperCAmelCase , )
with open(_UpperCAmelCase , encoding="""utf-8""" ) as vocab_handle:
a_ = json.load(_UpperCAmelCase )
a_ = {v: k for k, v in self.encoder.items()}
a_ = errors # how to handle errors in decoding
a_ = bytes_to_unicode()
a_ = {v: k for k, v in self.byte_encoder.items()}
with open(_UpperCAmelCase , encoding="""utf-8""" ) as merges_handle:
a_ = merges_handle.read().split("""\n""" )[1:-1]
a_ = [tuple(merge.split() ) for merge in bpe_merges]
a_ = dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase ) ) ) )
a_ = {}
a_ = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
a_ = re.compile(R"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
def lowercase__ ( self ):
"""simple docstring"""
return len(self.encoder )
def lowercase__ ( self ):
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def lowercase__ ( self , _UpperCAmelCase ):
"""simple docstring"""
if token in self.cache:
return self.cache[token]
a_ = tuple(_UpperCAmelCase )
a_ = get_pairs(_UpperCAmelCase )
if not pairs:
return token
while True:
a_ = min(_UpperCAmelCase , key=lambda _UpperCAmelCase : self.bpe_ranks.get(_UpperCAmelCase , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
a_ , a_ = bigram
a_ = []
a_ = 0
while i < len(_UpperCAmelCase ):
try:
a_ = word.index(_UpperCAmelCase , _UpperCAmelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
a_ = j
if word[i] == first and i < len(_UpperCAmelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
a_ = tuple(_UpperCAmelCase )
a_ = new_word
if len(_UpperCAmelCase ) == 1:
break
else:
a_ = get_pairs(_UpperCAmelCase )
a_ = """ """.join(_UpperCAmelCase )
a_ = word
return word
def lowercase__ ( self , _UpperCAmelCase ):
"""simple docstring"""
a_ = []
for token in re.findall(self.pat , _UpperCAmelCase ):
a_ = """""".join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_UpperCAmelCase ).split(""" """ ) )
return bpe_tokens
def lowercase__ ( self , _UpperCAmelCase ):
"""simple docstring"""
return self.encoder.get(_UpperCAmelCase , self.encoder.get(self.unk_token ) )
def lowercase__ ( self , _UpperCAmelCase ):
"""simple docstring"""
return self.decoder.get(_UpperCAmelCase )
def lowercase__ ( self , _UpperCAmelCase ):
"""simple docstring"""
a_ = """""".join(_UpperCAmelCase )
a_ = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" , errors=self.errors )
return text
def lowercase__ ( self , _UpperCAmelCase , _UpperCAmelCase = None ):
"""simple docstring"""
if not os.path.isdir(_UpperCAmelCase ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
a_ = os.path.join(
_UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
a_ = os.path.join(
_UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(_UpperCAmelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_UpperCAmelCase , ensure_ascii=_UpperCAmelCase ) + """\n""" )
a_ = 0
with open(_UpperCAmelCase , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _UpperCAmelCase : kv[1] ):
if index != token_index:
logger.warning(
f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
""" Please check that the tokenizer is not corrupted!""" )
a_ = token_index
writer.write(""" """.join(_UpperCAmelCase ) + """\n""" )
index += 1
return vocab_file, merge_file
def lowercase__ ( self , _UpperCAmelCase , _UpperCAmelCase = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
a_ = [self.cls_token_id]
a_ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowercase__ ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCAmelCase , token_ids_a=_UpperCAmelCase , already_has_special_tokens=_UpperCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(_UpperCAmelCase )) + [1]
return [1] + ([0] * len(_UpperCAmelCase )) + [1, 1] + ([0] * len(_UpperCAmelCase )) + [1]
def lowercase__ ( self , _UpperCAmelCase , _UpperCAmelCase = None ):
"""simple docstring"""
a_ = [self.sep_token_id]
a_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowercase__ ( self , _UpperCAmelCase , _UpperCAmelCase=False , **_UpperCAmelCase ):
"""simple docstring"""
a_ = kwargs.pop("""add_prefix_space""" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(_UpperCAmelCase ) > 0 and not text[0].isspace()):
a_ = """ """ + text
return (text, kwargs)
| 483
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from typing import List
from unittest.mock import Mock
import torch
from torch.utils.data import DataLoader, IterableDataset, TensorDataset
from accelerate.accelerator import Accelerator
from accelerate.utils.dataclasses import DistributedType
class lowercase_ ( UpperCamelCase__):
"""simple docstring"""
def __init__( self , _UpperCAmelCase ):
"""simple docstring"""
a_ = data
def __iter__( self ):
"""simple docstring"""
for element in self.data:
yield element
def lowerCamelCase_ ( UpperCAmelCase__=True ):
"""simple docstring"""
a_ = Accelerator(even_batches=UpperCAmelCase__ )
assert accelerator.num_processes == 2, "this script expects that two GPUs are available"
return accelerator
def lowerCamelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = False ):
"""simple docstring"""
if iterable:
a_ = DummyIterableDataset(torch.as_tensor(range(UpperCAmelCase__ ) ) )
else:
a_ = TensorDataset(torch.as_tensor(range(UpperCAmelCase__ ) ) )
a_ = DataLoader(UpperCAmelCase__ , batch_size=UpperCAmelCase__ )
a_ = accelerator.prepare(UpperCAmelCase__ )
return dl
def lowerCamelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , ):
"""simple docstring"""
a_ = create_dataloader(accelerator=UpperCAmelCase__ , dataset_size=UpperCAmelCase__ , batch_size=UpperCAmelCase__ )
a_ = [len(batch[0] ) for batch in dl]
if accelerator.process_index == 0:
assert batch_sizes == process_0_expected_batch_sizes
elif accelerator.process_index == 1:
assert batch_sizes == process_1_expected_batch_sizes
def lowerCamelCase_ ( ):
"""simple docstring"""
a_ = create_accelerator()
# without padding, we would expect a different number of batches
verify_dataloader_batch_sizes(
UpperCAmelCase__ , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1, 1] , )
# without padding, we would expect the same number of batches, but different sizes
verify_dataloader_batch_sizes(
UpperCAmelCase__ , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 2] , )
def lowerCamelCase_ ( ):
"""simple docstring"""
a_ = create_accelerator(even_batches=UpperCAmelCase__ )
verify_dataloader_batch_sizes(
UpperCAmelCase__ , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1] , )
verify_dataloader_batch_sizes(
UpperCAmelCase__ , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 1] , )
def lowerCamelCase_ ( ):
"""simple docstring"""
a_ = create_accelerator(even_batches=UpperCAmelCase__ )
a_ = torch.nn.Linear(1 , 1 )
a_ = accelerator.prepare(UpperCAmelCase__ )
a_ = create_dataloader(UpperCAmelCase__ , dataset_size=3 , batch_size=1 )
a_ = []
with accelerator.join_uneven_inputs([ddp_model] ):
for batch_idx, batch in enumerate(UpperCAmelCase__ ):
a_ = ddp_model(batch[0].float() )
a_ = output.sum()
loss.backward()
batch_idxs.append(UpperCAmelCase__ )
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
assert batch_idxs == [0, 1]
elif accelerator.process_index == 1:
assert batch_idxs == [0]
def lowerCamelCase_ ( UpperCAmelCase__ ):
"""simple docstring"""
with warnings.catch_warnings(record=UpperCAmelCase__ ) as w:
with accelerator.join_uneven_inputs([Mock()] ):
pass
assert issubclass(w[-1].category , UpperCAmelCase__ )
assert "only supported for multi-GPU" in str(w[-1].message )
def lowerCamelCase_ ( ):
"""simple docstring"""
a_ = True
a_ = False
a_ = create_accelerator(even_batches=UpperCAmelCase__ )
a_ = torch.nn.Linear(1 , 1 )
a_ = accelerator.prepare(UpperCAmelCase__ )
a_ = create_dataloader(UpperCAmelCase__ , dataset_size=3 , batch_size=1 )
a_ = create_dataloader(UpperCAmelCase__ , dataset_size=3 , batch_size=1 )
with accelerator.join_uneven_inputs([ddp_model] , even_batches=UpperCAmelCase__ ):
a_ = train_dl.batch_sampler.even_batches
a_ = valid_dl.batch_sampler.even_batches
assert train_dl_overridden_value == overridden_even_batches
assert valid_dl_overridden_value == overridden_even_batches
assert train_dl.batch_sampler.even_batches == default_even_batches
assert valid_dl.batch_sampler.even_batches == default_even_batches
def lowerCamelCase_ ( ):
"""simple docstring"""
a_ = True
a_ = False
a_ = create_accelerator(even_batches=UpperCAmelCase__ )
a_ = torch.nn.Linear(1 , 1 )
a_ = accelerator.prepare(UpperCAmelCase__ )
create_dataloader(UpperCAmelCase__ , dataset_size=3 , batch_size=1 , iterable=UpperCAmelCase__ )
a_ = create_dataloader(UpperCAmelCase__ , dataset_size=3 , batch_size=1 )
with warnings.catch_warnings():
warnings.filterwarnings("""ignore""" )
try:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=UpperCAmelCase__ ):
a_ = batch_dl.batch_sampler.even_batches
except AttributeError:
# ensure attribute error is not raised when processing iterable dl
raise AssertionError
assert batch_dl_overridden_value == overridden_even_batches
assert batch_dl.batch_sampler.even_batches == default_even_batches
def lowerCamelCase_ ( ):
"""simple docstring"""
a_ = create_accelerator()
a_ = torch.nn.Linear(1 , 1 )
a_ = accelerator.prepare(UpperCAmelCase__ )
create_dataloader(UpperCAmelCase__ , dataset_size=3 , batch_size=1 , iterable=UpperCAmelCase__ )
with warnings.catch_warnings(record=UpperCAmelCase__ ) as w:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=UpperCAmelCase__ ):
pass
assert issubclass(w[-1].category , UpperCAmelCase__ )
assert "only supported for map-style datasets" in str(w[-1].message )
def lowerCamelCase_ ( ):
"""simple docstring"""
a_ = create_accelerator()
accelerator.print("""Test that even_batches variable ensures uniform batches across processes""" )
test_default_ensures_even_batch_sizes()
accelerator.print("""Run tests with even_batches disabled""" )
test_can_disable_even_batches()
accelerator.print("""Test joining uneven inputs""" )
test_can_join_uneven_inputs()
accelerator.print("""Test overriding even_batches when joining uneven inputs""" )
test_join_can_override_even_batches()
accelerator.print("""Test overriding even_batches for mixed dataloader types""" )
test_join_can_override_for_mixed_type_dataloaders()
accelerator.print("""Test overriding even_batches raises a warning for iterable dataloaders""" )
test_join_raises_warning_for_iterable_when_overriding_even_batches()
accelerator.print("""Test join with non DDP distributed raises warning""" )
a_ = accelerator.state.distributed_type
a_ = DistributedType.FSDP
test_join_raises_warning_for_non_ddp_distributed(UpperCAmelCase__ )
a_ = original_state
if __name__ == "__main__":
main()
| 483
| 1
|
"""simple docstring"""
import copy
from typing import Dict, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
from ..detr import DetrConfig
from ..swin import SwinConfig
__snake_case = {
'''facebook/maskformer-swin-base-ade''': (
'''https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json'''
)
# See all MaskFormer models at https://huggingface.co/models?filter=maskformer
}
__snake_case = logging.get_logger(__name__)
class __lowerCamelCase ( a__ ):
'''simple docstring'''
A_ : Tuple = 'maskformer'
A_ : int = {'hidden_size': 'mask_feature_size'}
A_ : Optional[Any] = ['resnet', 'swin']
A_ : Optional[Any] = ['detr']
def __init__( self , __UpperCAmelCase = 256 , __UpperCAmelCase = 256 , __UpperCAmelCase = 0.1 , __UpperCAmelCase = False , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = 0.02 , __UpperCAmelCase = 1.0 , __UpperCAmelCase = 1.0 , __UpperCAmelCase = 1.0 , __UpperCAmelCase = 20.0 , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> Tuple:
if backbone_config is None:
# fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k
_a = SwinConfig(
image_size=384 , in_channels=3 , patch_size=4 , embed_dim=128 , depths=[2, 2, 18, 2] , num_heads=[4, 8, 16, 32] , window_size=12 , drop_path_rate=0.3 , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] , )
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
_a = backbone_config.pop('''model_type''' )
_a = CONFIG_MAPPING[backbone_model_type]
_a = config_class.from_dict(__UpperCAmelCase )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
F'Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. '
F'Supported model types: {",".join(self.backbones_supported )}' )
if decoder_config is None:
# fall back to https://huggingface.co/facebook/detr-resnet-50
_a = DetrConfig()
else:
# verify that the decoder is supported
_a = (
decoder_config.pop('''model_type''' ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else decoder_config.model_type
)
if decoder_type not in self.decoders_supported:
raise ValueError(
F'Transformer Decoder {decoder_type} not supported, please use one of'
F' {",".join(self.decoders_supported )}' )
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
_a = CONFIG_MAPPING[decoder_type]
_a = config_class.from_dict(__UpperCAmelCase )
_a = backbone_config
_a = decoder_config
# main feature dimension for the model
_a = fpn_feature_size
_a = mask_feature_size
# initializer
_a = init_std
_a = init_xavier_std
# Hungarian matcher && loss
_a = cross_entropy_weight
_a = dice_weight
_a = mask_weight
_a = use_auxiliary_loss
_a = no_object_weight
_a = output_auxiliary_logits
_a = self.decoder_config.encoder_attention_heads
_a = self.decoder_config.num_hidden_layers
super().__init__(**__UpperCAmelCase )
@classmethod
def _UpperCAmelCase ( cls , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ) -> List[Any]:
return cls(
backbone_config=__UpperCAmelCase , decoder_config=__UpperCAmelCase , **__UpperCAmelCase , )
def _UpperCAmelCase ( self ) -> Dict[str, any]:
_a = copy.deepcopy(self.__dict__ )
_a = self.backbone_config.to_dict()
_a = self.decoder_config.to_dict()
_a = self.__class__.model_type
return output
| 285
|
"""simple docstring"""
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DeformableDetrImageProcessor
class __lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=7 , __UpperCAmelCase=3 , __UpperCAmelCase=30 , __UpperCAmelCase=400 , __UpperCAmelCase=True , __UpperCAmelCase=None , __UpperCAmelCase=True , __UpperCAmelCase=[0.5, 0.5, 0.5] , __UpperCAmelCase=[0.5, 0.5, 0.5] , __UpperCAmelCase=True , __UpperCAmelCase=1 / 255 , __UpperCAmelCase=True , ) -> Optional[int]:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
_a = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 1333}
_a = parent
_a = batch_size
_a = num_channels
_a = min_resolution
_a = max_resolution
_a = do_resize
_a = size
_a = do_normalize
_a = image_mean
_a = image_std
_a = do_rescale
_a = rescale_factor
_a = do_pad
def _UpperCAmelCase ( self ) -> Union[str, Any]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase=False ) -> List[str]:
if not batched:
_a = image_inputs[0]
if isinstance(__UpperCAmelCase , Image.Image ):
_a , _a = image.size
else:
_a , _a = image.shape[1], image.shape[2]
if w < h:
_a = int(self.size['''shortest_edge'''] * h / w )
_a = self.size['''shortest_edge''']
elif w > h:
_a = self.size['''shortest_edge''']
_a = int(self.size['''shortest_edge'''] * w / h )
else:
_a = self.size['''shortest_edge''']
_a = self.size['''shortest_edge''']
else:
_a = []
for image in image_inputs:
_a , _a = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
_a = max(__UpperCAmelCase , key=lambda __UpperCAmelCase : item[0] )[0]
_a = max(__UpperCAmelCase , key=lambda __UpperCAmelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class __lowerCamelCase ( a__ , unittest.TestCase ):
'''simple docstring'''
A_ : List[str] = DeformableDetrImageProcessor if is_vision_available() else None
def _UpperCAmelCase ( self ) -> Dict:
_a = DeformableDetrImageProcessingTester(self )
@property
def _UpperCAmelCase ( self ) -> Union[str, Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def _UpperCAmelCase ( self ) -> Dict:
_a = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__UpperCAmelCase , '''image_mean''' ) )
self.assertTrue(hasattr(__UpperCAmelCase , '''image_std''' ) )
self.assertTrue(hasattr(__UpperCAmelCase , '''do_normalize''' ) )
self.assertTrue(hasattr(__UpperCAmelCase , '''do_resize''' ) )
self.assertTrue(hasattr(__UpperCAmelCase , '''do_rescale''' ) )
self.assertTrue(hasattr(__UpperCAmelCase , '''do_pad''' ) )
self.assertTrue(hasattr(__UpperCAmelCase , '''size''' ) )
def _UpperCAmelCase ( self ) -> Dict:
_a = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 1333} )
self.assertEqual(image_processor.do_pad , __UpperCAmelCase )
_a = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=__UpperCAmelCase )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84} )
self.assertEqual(image_processor.do_pad , __UpperCAmelCase )
def _UpperCAmelCase ( self ) -> int:
pass
def _UpperCAmelCase ( self ) -> List[Any]:
# Initialize image_processing
_a = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , Image.Image )
# Test not batched input
_a = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
_a , _a = self.image_processor_tester.get_expected_values(__UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_a , _a = self.image_processor_tester.get_expected_values(__UpperCAmelCase , batched=__UpperCAmelCase )
_a = image_processing(__UpperCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _UpperCAmelCase ( self ) -> int:
# Initialize image_processing
_a = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase , numpify=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , np.ndarray )
# Test not batched input
_a = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
_a , _a = self.image_processor_tester.get_expected_values(__UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_a = image_processing(__UpperCAmelCase , return_tensors='''pt''' ).pixel_values
_a , _a = self.image_processor_tester.get_expected_values(__UpperCAmelCase , batched=__UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _UpperCAmelCase ( self ) -> Dict:
# Initialize image_processing
_a = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase , torchify=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , torch.Tensor )
# Test not batched input
_a = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
_a , _a = self.image_processor_tester.get_expected_values(__UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_a = image_processing(__UpperCAmelCase , return_tensors='''pt''' ).pixel_values
_a , _a = self.image_processor_tester.get_expected_values(__UpperCAmelCase , batched=__UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def _UpperCAmelCase ( self ) -> Optional[Any]:
# prepare image and target
_a = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
_a = json.loads(f.read() )
_a = {'''image_id''': 39769, '''annotations''': target}
# encode them
_a = DeformableDetrImageProcessor()
_a = image_processing(images=__UpperCAmelCase , annotations=__UpperCAmelCase , return_tensors='''pt''' )
# verify pixel values
_a = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['''pixel_values'''].shape , __UpperCAmelCase )
_a = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , __UpperCAmelCase , atol=1e-4 ) )
# verify area
_a = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , __UpperCAmelCase ) )
# verify boxes
_a = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , __UpperCAmelCase )
_a = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , __UpperCAmelCase , atol=1e-3 ) )
# verify image_id
_a = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , __UpperCAmelCase ) )
# verify is_crowd
_a = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , __UpperCAmelCase ) )
# verify class_labels
_a = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , __UpperCAmelCase ) )
# verify orig_size
_a = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , __UpperCAmelCase ) )
# verify size
_a = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , __UpperCAmelCase ) )
@slow
def _UpperCAmelCase ( self ) -> str:
# prepare image, target and masks_path
_a = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
_a = json.loads(f.read() )
_a = {'''file_name''': '''000000039769.png''', '''image_id''': 39769, '''segments_info''': target}
_a = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
_a = DeformableDetrImageProcessor(format='''coco_panoptic''' )
_a = image_processing(images=__UpperCAmelCase , annotations=__UpperCAmelCase , masks_path=__UpperCAmelCase , return_tensors='''pt''' )
# verify pixel values
_a = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['''pixel_values'''].shape , __UpperCAmelCase )
_a = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , __UpperCAmelCase , atol=1e-4 ) )
# verify area
_a = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , __UpperCAmelCase ) )
# verify boxes
_a = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , __UpperCAmelCase )
_a = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , __UpperCAmelCase , atol=1e-3 ) )
# verify image_id
_a = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , __UpperCAmelCase ) )
# verify is_crowd
_a = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , __UpperCAmelCase ) )
# verify class_labels
_a = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , __UpperCAmelCase ) )
# verify masks
_a = 822873
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , __UpperCAmelCase )
# verify orig_size
_a = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , __UpperCAmelCase ) )
# verify size
_a = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , __UpperCAmelCase ) )
| 285
| 1
|
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ASTConfig
from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_torchaudio_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ASTForAudioClassification, ASTModel
from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_torchaudio_available():
import torchaudio
from transformers import ASTFeatureExtractor
class A_ :
'''simple docstring'''
def __init__( self , snake_case , snake_case=13 , snake_case=2 , snake_case=24 , snake_case=16 , snake_case=True , snake_case=True , snake_case=32 , snake_case=5 , snake_case=4 , snake_case=37 , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=10 , snake_case=0.02 , snake_case=None , snake_case=2 , snake_case=2 , ):
lowercase = parent
lowercase = batch_size
lowercase = patch_size
lowercase = max_length
lowercase = num_mel_bins
lowercase = is_training
lowercase = use_labels
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = intermediate_size
lowercase = hidden_act
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = type_sequence_label_size
lowercase = initializer_range
lowercase = scope
lowercase = frequency_stride
lowercase = time_stride
# in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
lowercase = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1
lowercase = (self.max_length - self.patch_size) // self.time_stride + 1
lowercase = frequency_out_dimension * time_out_dimension
lowercase = num_patches + 2
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] )
lowercase = None
if self.use_labels:
lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase = self.get_config()
return config, input_values, labels
def SCREAMING_SNAKE_CASE__ ( self ):
return ASTConfig(
patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=snake_case , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case ):
lowercase = ASTModel(config=snake_case )
model.to(snake_case )
model.eval()
lowercase = model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.prepare_config_and_inputs()
(
(
lowercase
) , (
lowercase
) , (
lowercase
) ,
) = config_and_inputs
lowercase = {'input_values': input_values}
return config, inputs_dict
@require_torch
class A_ ( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
_UpperCamelCase : str = (
(
ASTModel,
ASTForAudioClassification,
)
if is_torch_available()
else ()
)
_UpperCamelCase : Optional[int] = (
{"""audio-classification""": ASTForAudioClassification, """feature-extraction""": ASTModel}
if is_torch_available()
else {}
)
_UpperCamelCase : Union[str, Any] = False
_UpperCamelCase : List[Any] = False
_UpperCamelCase : Any = False
_UpperCamelCase : Optional[int] = False
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case , snake_case ):
if pipeline_test_casse_name == "AudioClassificationPipelineTests":
return True
return False
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = ASTModelTester(self )
lowercase = ConfigTester(self , config_class=snake_case , has_text_modality=snake_case , hidden_size=37 )
def SCREAMING_SNAKE_CASE__ ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason='AST does not use inputs_embeds' )
def SCREAMING_SNAKE_CASE__ ( self ):
pass
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase = model_class(snake_case )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(snake_case , nn.Linear ) )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase = model_class(snake_case )
lowercase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase = [*signature.parameters.keys()]
lowercase = ['input_values']
self.assertListEqual(arg_names[:1] , snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase = ASTModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
def UpperCAmelCase_ ( ):
lowercase = hf_hub_download(
repo_id='nielsr/audio-spectogram-transformer-checkpoint' , filename='sample_audio.flac' , repo_type='dataset' )
lowercase , lowercase = torchaudio.load(__SCREAMING_SNAKE_CASE )
return audio, sampling_rate
@require_torch
@require_torchaudio
class A_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def SCREAMING_SNAKE_CASE__ ( self ):
return (
ASTFeatureExtractor.from_pretrained('MIT/ast-finetuned-audioset-10-10-0.4593' )
if is_torchaudio_available()
else None
)
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.default_feature_extractor
lowercase = ASTForAudioClassification.from_pretrained('MIT/ast-finetuned-audioset-10-10-0.4593' ).to(snake_case )
lowercase = self.default_feature_extractor
lowercase , lowercase = prepare_audio()
lowercase = audio.squeeze().numpy()
lowercase = feature_extractor(snake_case , sampling_rate=snake_case , return_tensors='pt' ).to(snake_case )
# forward pass
with torch.no_grad():
lowercase = model(**snake_case )
# verify the logits
lowercase = torch.Size((1, 527) )
self.assertEqual(outputs.logits.shape , snake_case )
lowercase = torch.tensor([-0.8_760, -7.0_042, -8.6_602] ).to(snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case , atol=1E-4 ) )
| 84
|
"""simple docstring"""
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 102
| 0
|
"""simple docstring"""
from ...processing_utils import ProcessorMixin
class snake_case ( __UpperCAmelCase ):
lowerCamelCase__ = '''SpeechT5FeatureExtractor'''
lowerCamelCase__ = '''SpeechT5Tokenizer'''
def __init__( self :List[Any] , _lowerCamelCase :Optional[int] , _lowerCamelCase :str ):
super().__init__(_lowerCamelCase , _lowerCamelCase )
def __call__( self :Optional[int] , *_lowerCamelCase :Dict , **_lowerCamelCase :Union[str, Any] ):
__SCREAMING_SNAKE_CASE : Any = kwargs.pop('''audio''' , _lowerCamelCase )
__SCREAMING_SNAKE_CASE : int = kwargs.pop('''text''' , _lowerCamelCase )
__SCREAMING_SNAKE_CASE : Tuple = kwargs.pop('''text_target''' , _lowerCamelCase )
__SCREAMING_SNAKE_CASE : List[Any] = kwargs.pop('''audio_target''' , _lowerCamelCase )
__SCREAMING_SNAKE_CASE : Tuple = kwargs.pop('''sampling_rate''' , _lowerCamelCase )
if audio is not None and text is not None:
raise ValueError(
'''Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?''' )
if audio_target is not None and text_target is not None:
raise ValueError(
'''Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?''' )
if audio is None and audio_target is None and text is None and text_target is None:
raise ValueError(
'''You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process.''' )
if audio is not None:
__SCREAMING_SNAKE_CASE : Optional[int] = self.feature_extractor(_lowerCamelCase , *_lowerCamelCase , sampling_rate=_lowerCamelCase , **_lowerCamelCase )
elif text is not None:
__SCREAMING_SNAKE_CASE : Optional[Any] = self.tokenizer(_lowerCamelCase , **_lowerCamelCase )
else:
__SCREAMING_SNAKE_CASE : Tuple = None
if audio_target is not None:
__SCREAMING_SNAKE_CASE : Tuple = self.feature_extractor(audio_target=_lowerCamelCase , *_lowerCamelCase , sampling_rate=_lowerCamelCase , **_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Optional[int] = targets['''input_values''']
elif text_target is not None:
__SCREAMING_SNAKE_CASE : str = self.tokenizer(_lowerCamelCase , **_lowerCamelCase )
__SCREAMING_SNAKE_CASE : List[Any] = targets['''input_ids''']
else:
__SCREAMING_SNAKE_CASE : List[Any] = None
if inputs is None:
return targets
if targets is not None:
__SCREAMING_SNAKE_CASE : int = labels
__SCREAMING_SNAKE_CASE : Dict = targets.get('''attention_mask''' )
if decoder_attention_mask is not None:
__SCREAMING_SNAKE_CASE : Any = decoder_attention_mask
return inputs
def SCREAMING_SNAKE_CASE_ ( self :Optional[Any] , *_lowerCamelCase :Dict , **_lowerCamelCase :Any ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = kwargs.pop('''input_values''' , _lowerCamelCase )
__SCREAMING_SNAKE_CASE : List[Any] = kwargs.pop('''input_ids''' , _lowerCamelCase )
__SCREAMING_SNAKE_CASE : Any = kwargs.pop('''labels''' , _lowerCamelCase )
if input_values is not None and input_ids is not None:
raise ValueError('''Cannot process both `input_values` and `input_ids` inputs.''' )
if input_values is None and input_ids is None and labels is None:
raise ValueError(
'''You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded.''' )
if input_values is not None:
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.feature_extractor.pad(_lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase )
elif input_ids is not None:
__SCREAMING_SNAKE_CASE : int = self.tokenizer.pad(_lowerCamelCase , **_lowerCamelCase )
else:
__SCREAMING_SNAKE_CASE : Any = None
if labels is not None:
if "input_ids" in labels or (isinstance(_lowerCamelCase , _lowerCamelCase ) and "input_ids" in labels[0]):
__SCREAMING_SNAKE_CASE : Any = self.tokenizer.pad(_lowerCamelCase , **_lowerCamelCase )
__SCREAMING_SNAKE_CASE : int = targets['''input_ids''']
else:
__SCREAMING_SNAKE_CASE : Any = self.feature_extractor.feature_size
__SCREAMING_SNAKE_CASE : Any = self.feature_extractor.num_mel_bins
__SCREAMING_SNAKE_CASE : Any = self.feature_extractor.pad(_lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Tuple = feature_size_hack
__SCREAMING_SNAKE_CASE : Any = targets['''input_values''']
else:
__SCREAMING_SNAKE_CASE : Dict = None
if inputs is None:
return targets
if targets is not None:
__SCREAMING_SNAKE_CASE : List[Any] = labels
__SCREAMING_SNAKE_CASE : int = targets.get('''attention_mask''' )
if decoder_attention_mask is not None:
__SCREAMING_SNAKE_CASE : List[Any] = decoder_attention_mask
return inputs
def SCREAMING_SNAKE_CASE_ ( self :Tuple , *_lowerCamelCase :Tuple , **_lowerCamelCase :Union[str, Any] ):
return self.tokenizer.batch_decode(*_lowerCamelCase , **_lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self :Optional[Any] , *_lowerCamelCase :List[Any] , **_lowerCamelCase :List[str] ):
return self.tokenizer.decode(*_lowerCamelCase , **_lowerCamelCase )
| 710
|
"""simple docstring"""
from __future__ import annotations
_lowerCamelCase = 8.988e9 # units = N * m^s * C^-2
def lowerCAmelCase_ ( lowercase_ : float , lowercase_ : float , lowercase_ : float , lowercase_ : float ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE : Dict = abs(chargea * chargea )
if (force, chargea, chargea, distance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if distance < 0:
raise ValueError('''Distance cannot be negative''' )
if force == 0:
__SCREAMING_SNAKE_CASE : int = COULOMBS_CONSTANT * charge_product / (distance**2)
return {"force": force}
elif chargea == 0:
__SCREAMING_SNAKE_CASE : List[str] = abs(lowercase_ ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge1": chargea}
elif chargea == 0:
__SCREAMING_SNAKE_CASE : Optional[int] = abs(lowercase_ ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge2": chargea}
elif distance == 0:
__SCREAMING_SNAKE_CASE : Tuple = (COULOMBS_CONSTANT * charge_product / abs(lowercase_ )) ** 0.5
return {"distance": distance}
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 401
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.