code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
__lowerCamelCase : Dict = logging.get_logger(__name__)
__lowerCamelCase : List[Any] = {
"""salesforce/blip2-opt-2.7b""": """https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json""",
}
class lowerCAmelCase__ ( __SCREAMING_SNAKE_CASE ):
A = "blip_2_vision_model"
def __init__( self : List[Any] , UpperCamelCase_ : int=1_408 , UpperCamelCase_ : Optional[Any]=6_144 , UpperCamelCase_ : Union[str, Any]=39 , UpperCamelCase_ : Union[str, Any]=16 , UpperCamelCase_ : Union[str, Any]=224 , UpperCamelCase_ : Dict=14 , UpperCamelCase_ : Any="gelu" , UpperCamelCase_ : List[Any]=0.0_0001 , UpperCamelCase_ : Optional[int]=0.0 , UpperCamelCase_ : List[Any]=1e-1_0 , UpperCamelCase_ : Dict=True , **UpperCamelCase_ : int , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(**UpperCamelCase__ )
lowerCamelCase_ : int = hidden_size
lowerCamelCase_ : Any = intermediate_size
lowerCamelCase_ : Union[str, Any] = num_hidden_layers
lowerCamelCase_ : int = num_attention_heads
lowerCamelCase_ : int = patch_size
lowerCamelCase_ : Any = image_size
lowerCamelCase_ : Dict = initializer_range
lowerCamelCase_ : str = attention_dropout
lowerCamelCase_ : Optional[int] = layer_norm_eps
lowerCamelCase_ : List[Any] = hidden_act
lowerCamelCase_ : Dict = qkv_bias
@classmethod
def __UpperCamelCase ( cls : Optional[int] , UpperCamelCase_ : Union[str, os.PathLike] , **UpperCamelCase_ : Tuple ) -> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(UpperCamelCase__ )
lowerCamelCase_ : List[str] = cls.get_config_dict(UpperCamelCase__ , **UpperCamelCase__ )
# get the vision config dict if we are loading from Blip2Config
if config_dict.get('''model_type''' ) == "blip-2":
lowerCamelCase_ : List[str] = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(UpperCamelCase__ , **UpperCamelCase__ )
class lowerCAmelCase__ ( __SCREAMING_SNAKE_CASE ):
A = "blip_2_qformer"
def __init__( self : Tuple , UpperCamelCase_ : int=30_522 , UpperCamelCase_ : str=768 , UpperCamelCase_ : Dict=12 , UpperCamelCase_ : str=12 , UpperCamelCase_ : Optional[int]=3_072 , UpperCamelCase_ : int="gelu" , UpperCamelCase_ : Union[str, Any]=0.1 , UpperCamelCase_ : str=0.1 , UpperCamelCase_ : Union[str, Any]=512 , UpperCamelCase_ : Tuple=0.02 , UpperCamelCase_ : str=1e-1_2 , UpperCamelCase_ : Optional[Any]=0 , UpperCamelCase_ : Any="absolute" , UpperCamelCase_ : Dict=2 , UpperCamelCase_ : Any=1_408 , **UpperCamelCase_ : Union[str, Any] , ) -> Optional[int]:
"""simple docstring"""
super().__init__(pad_token_id=UpperCamelCase__ , **UpperCamelCase__ )
lowerCamelCase_ : Optional[Any] = vocab_size
lowerCamelCase_ : List[Any] = hidden_size
lowerCamelCase_ : Optional[int] = num_hidden_layers
lowerCamelCase_ : Dict = num_attention_heads
lowerCamelCase_ : List[str] = hidden_act
lowerCamelCase_ : Union[str, Any] = intermediate_size
lowerCamelCase_ : Any = hidden_dropout_prob
lowerCamelCase_ : Any = attention_probs_dropout_prob
lowerCamelCase_ : str = max_position_embeddings
lowerCamelCase_ : Optional[Any] = initializer_range
lowerCamelCase_ : Union[str, Any] = layer_norm_eps
lowerCamelCase_ : Any = position_embedding_type
lowerCamelCase_ : Optional[Any] = cross_attention_frequency
lowerCamelCase_ : Optional[int] = encoder_hidden_size
@classmethod
def __UpperCamelCase ( cls : int , UpperCamelCase_ : Union[str, os.PathLike] , **UpperCamelCase_ : Union[str, Any] ) -> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(UpperCamelCase__ )
lowerCamelCase_ : List[Any] = cls.get_config_dict(UpperCamelCase__ , **UpperCamelCase__ )
# get the qformer config dict if we are loading from Blip2Config
if config_dict.get('''model_type''' ) == "blip-2":
lowerCamelCase_ : List[str] = config_dict['''qformer_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(UpperCamelCase__ , **UpperCamelCase__ )
class lowerCAmelCase__ ( __SCREAMING_SNAKE_CASE ):
A = "blip-2"
A = True
def __init__( self : Union[str, Any] , UpperCamelCase_ : int=None , UpperCamelCase_ : List[Any]=None , UpperCamelCase_ : int=None , UpperCamelCase_ : Optional[Any]=32 , **UpperCamelCase_ : Optional[Any] ) -> Dict:
"""simple docstring"""
super().__init__(**UpperCamelCase__ )
if vision_config is None:
lowerCamelCase_ : Tuple = {}
logger.info('''vision_config is None. initializing the Blip2VisionConfig with default values.''' )
if qformer_config is None:
lowerCamelCase_ : Tuple = {}
logger.info('''qformer_config is None. Initializing the Blip2QFormerConfig with default values.''' )
if text_config is None:
lowerCamelCase_ : List[Any] = {}
logger.info('''text_config is None. Initializing the text config with default values (`OPTConfig`).''' )
lowerCamelCase_ : str = BlipaVisionConfig(**UpperCamelCase__ )
lowerCamelCase_ : Optional[Any] = BlipaQFormerConfig(**UpperCamelCase__ )
lowerCamelCase_ : Optional[Any] = text_config['''model_type'''] if '''model_type''' in text_config else '''opt'''
lowerCamelCase_ : Optional[int] = CONFIG_MAPPING[text_model_type](**UpperCamelCase__ )
lowerCamelCase_ : Tuple = self.text_config.tie_word_embeddings
lowerCamelCase_ : Union[str, Any] = self.text_config.is_encoder_decoder
lowerCamelCase_ : List[Any] = num_query_tokens
lowerCamelCase_ : Dict = self.vision_config.hidden_size
lowerCamelCase_ : Optional[int] = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
lowerCamelCase_ : Union[str, Any] = 1.0
lowerCamelCase_ : int = 0.02
@classmethod
def __UpperCamelCase ( cls : int , UpperCamelCase_ : BlipaVisionConfig , UpperCamelCase_ : BlipaQFormerConfig , UpperCamelCase_ : PretrainedConfig , **UpperCamelCase_ : Union[str, Any] , ) -> str:
"""simple docstring"""
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **UpperCamelCase__ , )
def __UpperCamelCase ( self : str ) -> Dict:
"""simple docstring"""
lowerCamelCase_ : Dict = copy.deepcopy(self.__dict__ )
lowerCamelCase_ : Optional[int] = self.vision_config.to_dict()
lowerCamelCase_ : Union[str, Any] = self.qformer_config.to_dict()
lowerCamelCase_ : Optional[int] = self.text_config.to_dict()
lowerCamelCase_ : Optional[int] = self.__class__.model_type
return output
| 501
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowercase__ = {"configuration_encoder_decoder": ["EncoderDecoderConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = ["EncoderDecoderModel"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = ["TFEncoderDecoderModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = ["FlaxEncoderDecoderModel"]
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
lowercase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 638
| 0
|
"""simple docstring"""
from __future__ import annotations
def lowercase ( _snake_case : tuple[int, int] , _snake_case : int ) ->list[tuple[int, int]]:
"""simple docstring"""
__snake_case , __snake_case : List[str] = position
__snake_case : Tuple = [
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
__snake_case : Dict = []
for position in positions:
__snake_case , __snake_case : List[str] = position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(_snake_case )
return permissible_positions
def lowercase ( _snake_case : list[list[int]] ) ->bool:
"""simple docstring"""
return not any(elem == 0 for row in board for elem in row )
def lowercase ( _snake_case : list[list[int]] , _snake_case : tuple[int, int] , _snake_case : int ) ->bool:
"""simple docstring"""
if is_complete(_snake_case ):
return True
for position in get_valid_pos(_snake_case , len(_snake_case ) ):
__snake_case , __snake_case : Dict = position
if board[y][x] == 0:
__snake_case : List[Any] = curr + 1
if open_knight_tour_helper(_snake_case , _snake_case , curr + 1 ):
return True
__snake_case : Dict = 0
return False
def lowercase ( _snake_case : int ) ->list[list[int]]:
"""simple docstring"""
__snake_case : int = [[0 for i in range(_snake_case )] for j in range(_snake_case )]
for i in range(_snake_case ):
for j in range(_snake_case ):
__snake_case : Union[str, Any] = 1
if open_knight_tour_helper(_snake_case , (i, j) , 1 ):
return board
__snake_case : str = 0
__snake_case : Dict = f"""Open Kight Tour cannot be performed on a board of size {n}"""
raise ValueError(_snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 229
|
"""simple docstring"""
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class _UpperCAmelCase :
'''simple docstring'''
def __init__(self , a_ , a_=None , a_=None , a_=None , a_="resnet50" , a_=3 , a_=32 , a_=3 , a_=True , a_=True , ):
'''simple docstring'''
__snake_case : List[Any] = parent
__snake_case : Tuple = out_indices if out_indices is not None else [4]
__snake_case : Optional[Any] = stage_names
__snake_case : str = out_features
__snake_case : List[str] = backbone
__snake_case : Optional[int] = batch_size
__snake_case : Optional[int] = image_size
__snake_case : str = num_channels
__snake_case : Optional[int] = use_pretrained_backbone
__snake_case : Optional[int] = is_training
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case : Optional[int] = self.get_config()
return config, pixel_values
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def SCREAMING_SNAKE_CASE (self , a_ , a_ ):
'''simple docstring'''
__snake_case : List[str] = TimmBackbone(config=a_ )
model.to(a_ )
model.eval()
with torch.no_grad():
__snake_case : int = model(a_ )
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[Any] = self.prepare_config_and_inputs()
__snake_case , __snake_case : List[Any] = config_and_inputs
__snake_case : Tuple = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class _UpperCAmelCase ( __snake_case, __snake_case, __snake_case, unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =(TimmBackbone,) if is_torch_available() else ()
lowerCamelCase__ ={'feature-extraction': TimmBackbone} if is_torch_available() else {}
lowerCamelCase__ =False
lowerCamelCase__ =False
lowerCamelCase__ =False
lowerCamelCase__ =False
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : str = TimmBackboneModelTester(self )
__snake_case : Dict = ConfigTester(self , config_class=a_ , has_text_modality=a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : str = '''resnet18'''
__snake_case : Tuple = '''microsoft/resnet-18'''
__snake_case : Dict = AutoBackbone.from_pretrained(a_ , use_timm_backbone=a_ )
__snake_case : Tuple = AutoBackbone.from_pretrained(a_ )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,) )
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] )
__snake_case : Optional[Any] = AutoBackbone.from_pretrained(a_ , use_timm_backbone=a_ , out_indices=[1, 2, 3] )
__snake_case : Optional[int] = AutoBackbone.from_pretrained(a_ , out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices , transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
@unittest.skip('''TimmBackbone doesn\'t support feed forward chunking''' )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
pass
@unittest.skip('''TimmBackbone doesn\'t have num_hidden_layers attribute''' )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
pass
@unittest.skip('''TimmBackbone initialization is managed on the timm side''' )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
pass
@unittest.skip('''TimmBackbone models doesn\'t have inputs_embeds''' )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
pass
@unittest.skip('''TimmBackbone models doesn\'t have inputs_embeds''' )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
pass
@unittest.skip('''TimmBackbone model cannot be created without specifying a backbone checkpoint''' )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
pass
@unittest.skip('''model weights aren\'t tied in TimmBackbone.''' )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
pass
@unittest.skip('''model weights aren\'t tied in TimmBackbone.''' )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
pass
@unittest.skip('''TimmBackbone doesn\'t have hidden size info in its configuration.''' )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
pass
@unittest.skip('''TimmBackbone doesn\'t support output_attentions.''' )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
pass
@unittest.skip('''Safetensors is not supported by timm.''' )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case , __snake_case : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : Any = model_class(a_ )
__snake_case : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case : Tuple = [*signature.parameters.keys()]
__snake_case : int = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case , __snake_case : str = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case : List[Any] = True
__snake_case : List[Any] = self.has_attentions
# no need to test all models as different heads yield the same functionality
__snake_case : Dict = self.all_model_classes[0]
__snake_case : Optional[int] = model_class(a_ )
model.to(a_ )
__snake_case : int = self._prepare_for_class(a_ , a_ )
__snake_case : Optional[Any] = model(**a_ )
__snake_case : int = outputs[0][-1]
# Encoder-/Decoder-only models
__snake_case : int = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
__snake_case : int = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=a_ )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case , __snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : Optional[int] = model_class(a_ )
model.to(a_ )
model.eval()
__snake_case : List[str] = model(**a_ )
self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) )
self.assertEqual(len(model.channels ) , len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
__snake_case : Optional[Any] = copy.deepcopy(a_ )
__snake_case : str = None
__snake_case : int = model_class(a_ )
model.to(a_ )
model.eval()
__snake_case : Any = model(**a_ )
self.assertEqual(len(result.feature_maps ) , 1 )
self.assertEqual(len(model.channels ) , 1 )
# Check backbone can be initialized with fresh weights
__snake_case : Union[str, Any] = copy.deepcopy(a_ )
__snake_case : int = False
__snake_case : List[str] = model_class(a_ )
model.to(a_ )
model.eval()
__snake_case : Optional[Any] = model(**a_ )
| 229
| 1
|
import unittest
from transformers import MPNetConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
class A__ :
"""simple docstring"""
def __init__( self : str , lowerCamelCase__ : str , lowerCamelCase__ : int=13 , lowerCamelCase__ : str=7 , lowerCamelCase__ : Optional[int]=True , lowerCamelCase__ : Optional[int]=True , lowerCamelCase__ : Optional[int]=False , lowerCamelCase__ : Any=True , lowerCamelCase__ : Optional[int]=99 , lowerCamelCase__ : Optional[Any]=64 , lowerCamelCase__ : Any=5 , lowerCamelCase__ : Tuple=4 , lowerCamelCase__ : Dict=64 , lowerCamelCase__ : int="gelu" , lowerCamelCase__ : int=0.1 , lowerCamelCase__ : List[Any]=0.1 , lowerCamelCase__ : Dict=512 , lowerCamelCase__ : int=16 , lowerCamelCase__ : Tuple=2 , lowerCamelCase__ : Union[str, Any]=0.02 , lowerCamelCase__ : List[Any]=3 , lowerCamelCase__ : List[str]=4 , lowerCamelCase__ : int=None , ):
a__ : Optional[Any] = parent
a__ : List[str] = batch_size
a__ : Tuple = seq_length
a__ : Dict = is_training
a__ : str = use_input_mask
a__ : int = use_token_type_ids
a__ : Any = use_labels
a__ : List[Any] = vocab_size
a__ : Optional[Any] = hidden_size
a__ : int = num_hidden_layers
a__ : Optional[Any] = num_attention_heads
a__ : Union[str, Any] = intermediate_size
a__ : Tuple = hidden_act
a__ : Dict = hidden_dropout_prob
a__ : List[str] = attention_probs_dropout_prob
a__ : Any = max_position_embeddings
a__ : int = type_vocab_size
a__ : List[Any] = type_sequence_label_size
a__ : Tuple = initializer_range
a__ : Any = num_labels
a__ : Optional[int] = num_choices
a__ : Union[str, Any] = scope
def _UpperCamelCase( self : Tuple ):
return MPNetConfig.from_pretrained("microsoft/mpnet-base" )
def _UpperCamelCase( self : List[Any] ):
a__ : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a__ : Union[str, Any] = None
if self.use_input_mask:
a__ : str = random_attention_mask([self.batch_size, self.seq_length] )
a__ : Optional[Any] = None
a__ : List[Any] = None
a__ : str = None
if self.use_labels:
a__ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
a__ : List[Any] = ids_tensor([self.batch_size] , self.num_choices )
a__ : Optional[int] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _UpperCamelCase( self : Optional[int] ):
return MPNetConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def _UpperCamelCase( self : List[Any] , lowerCamelCase__ : Any , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Any , lowerCamelCase__ : List[Any] , lowerCamelCase__ : int , lowerCamelCase__ : str ):
a__ : Union[str, Any] = MPNetModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
a__ : int = model(lowerCamelCase__ , lowerCamelCase__ )
a__ : str = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _UpperCamelCase( self : int , lowerCamelCase__ : Dict , lowerCamelCase__ : str , lowerCamelCase__ : int , lowerCamelCase__ : Any , lowerCamelCase__ : List[str] , lowerCamelCase__ : Optional[Any] ):
a__ : Union[str, Any] = MPNetForQuestionAnswering(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
a__ : Optional[int] = model(
lowerCamelCase__ , attention_mask=lowerCamelCase__ , start_positions=lowerCamelCase__ , end_positions=lowerCamelCase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _UpperCamelCase( self : Tuple , lowerCamelCase__ : int , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Dict , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Tuple ):
a__ : Union[str, Any] = self.num_labels
a__ : Tuple = MPNetForSequenceClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
a__ : Tuple = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _UpperCamelCase( self : str , lowerCamelCase__ : Any , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : List[str] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Any , lowerCamelCase__ : Tuple ):
a__ : str = self.num_choices
a__ : List[Any] = MPNetForMultipleChoice(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
a__ : Optional[Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
a__ : str = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
a__ : List[Any] = model(
lowerCamelCase__ , attention_mask=lowerCamelCase__ , labels=lowerCamelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _UpperCamelCase( self : Tuple , lowerCamelCase__ : Any , lowerCamelCase__ : Any , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : str , lowerCamelCase__ : List[str] , lowerCamelCase__ : List[str] ):
a__ : Any = self.num_labels
a__ : Any = MPNetForTokenClassification(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
a__ : int = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _UpperCamelCase( self : Optional[int] ):
a__ : Any = self.prepare_config_and_inputs()
((a__), (a__), (a__), (a__), (a__), (a__)) : Optional[Any] = config_and_inputs
a__ : Optional[int] = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class A__ ( A__ , A__ , unittest.TestCase ):
"""simple docstring"""
_lowercase = (
(
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
if is_torch_available()
else ()
)
_lowercase = (
{
'feature-extraction': MPNetModel,
'fill-mask': MPNetForMaskedLM,
'question-answering': MPNetForQuestionAnswering,
'text-classification': MPNetForSequenceClassification,
'token-classification': MPNetForTokenClassification,
'zero-shot': MPNetForSequenceClassification,
}
if is_torch_available()
else {}
)
_lowercase = False
_lowercase = True
def _UpperCamelCase( self : Dict ):
a__ : str = MPNetModelTester(self )
a__ : List[Any] = ConfigTester(self , config_class=lowerCamelCase__ , hidden_size=37 )
def _UpperCamelCase( self : Tuple ):
self.config_tester.run_common_tests()
def _UpperCamelCase( self : List[str] ):
a__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_model(*lowerCamelCase__ )
def _UpperCamelCase( self : Union[str, Any] ):
a__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_sequence_classification(*lowerCamelCase__ )
def _UpperCamelCase( self : str ):
a__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_multiple_choice(*lowerCamelCase__ )
def _UpperCamelCase( self : Optional[int] ):
a__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_token_classification(*lowerCamelCase__ )
def _UpperCamelCase( self : Optional[int] ):
a__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_question_answering(*lowerCamelCase__ )
@require_torch
class A__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def _UpperCamelCase( self : int ):
a__ : Dict = MPNetModel.from_pretrained("microsoft/mpnet-base" )
a__ : List[Any] = torch.tensor([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] )
a__ : List[str] = model(lowerCamelCase__ )[0]
a__ : Any = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , lowerCamelCase__ )
a__ : Tuple = torch.tensor(
[[[-0.0550, 0.1943, -0.0740], [-0.0562, 0.2211, -0.0579], [-0.0437, 0.3337, -0.0641]]] )
# compare the actual values for a slice.
self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCamelCase__ , atol=1E-4 ) )
| 37
|
def lowerCamelCase_ ( lowerCAmelCase: str )-> str:
_snake_case : str = 0
# if input_string is "aba" than new_input_string become "a|b|a"
_snake_case : List[Any] = ''
_snake_case : Dict = ''
# append each character + "|" in new_string for range(0, length-1)
for i in input_string[: len(lowerCAmelCase ) - 1]:
new_input_string += i + "|"
# append last character
new_input_string += input_string[-1]
# we will store the starting and ending of previous furthest ending palindromic
# substring
_snake_case , _snake_case : Union[str, Any] = 0, 0
# length[i] shows the length of palindromic substring with center i
_snake_case : Optional[Any] = [1 for i in range(len(lowerCAmelCase ) )]
# for each character in new_string find corresponding palindromic string
_snake_case : Any = 0
for j in range(len(lowerCAmelCase ) ):
_snake_case : Tuple = 1 if j > r else min(length[l + r - j] // 2 , r - j + 1 )
while (
j - k >= 0
and j + k < len(lowerCAmelCase )
and new_input_string[k + j] == new_input_string[j - k]
):
k += 1
_snake_case : str = 2 * k - 1
# does this string is ending after the previously explored end (that is r) ?
# if yes the update the new r to the last index of this
if j + k - 1 > r:
_snake_case : List[str] = j - k + 1 # noqa: E741
_snake_case : List[Any] = j + k - 1
# update max_length and start position
if max_length < length[j]:
_snake_case : List[Any] = length[j]
_snake_case : Optional[Any] = j
# create that string
_snake_case : Any = new_input_string[start - max_length // 2 : start + max_length // 2 + 1]
for i in s:
if i != "|":
output_string += i
return output_string
if __name__ == "__main__":
import doctest
doctest.testmod()
| 411
| 0
|
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCamelCase__ ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any , __lowerCAmelCase : Any ):
"""simple docstring"""
lowerCAmelCase_ = TapasConfig.from_json_file(__lowerCAmelCase )
# set absolute/relative position embeddings parameter
lowerCAmelCase_ = reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
lowerCAmelCase_ = TapasForQuestionAnswering(config=__lowerCAmelCase )
elif task == "WTQ":
# run_task_main.py hparams
lowerCAmelCase_ = 4
lowerCAmelCase_ = True
# hparam_utils.py hparams
lowerCAmelCase_ = 0.664_694
lowerCAmelCase_ = 0.207_951
lowerCAmelCase_ = 0.121_194
lowerCAmelCase_ = True
lowerCAmelCase_ = True
lowerCAmelCase_ = False
lowerCAmelCase_ = 0.0_352_513
lowerCAmelCase_ = TapasForQuestionAnswering(config=__lowerCAmelCase )
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
lowerCAmelCase_ = 4
lowerCAmelCase_ = False
# hparam_utils.py hparams
lowerCAmelCase_ = 36.4_519
lowerCAmelCase_ = 0.903_421
lowerCAmelCase_ = 222.088
lowerCAmelCase_ = True
lowerCAmelCase_ = True
lowerCAmelCase_ = True
lowerCAmelCase_ = 0.763_141
lowerCAmelCase_ = TapasForQuestionAnswering(config=__lowerCAmelCase )
elif task == "TABFACT":
lowerCAmelCase_ = TapasForSequenceClassification(config=__lowerCAmelCase )
elif task == "MLM":
lowerCAmelCase_ = TapasForMaskedLM(config=__lowerCAmelCase )
elif task == "INTERMEDIATE_PRETRAINING":
lowerCAmelCase_ = TapasModel(config=__lowerCAmelCase )
else:
raise ValueError(F"""Task {task} not supported.""" )
print(F"""Building PyTorch model from configuration: {config}""" )
# Load weights from tf checkpoint
load_tf_weights_in_tapas(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Save pytorch-model (weights and configuration)
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(__lowerCAmelCase )
# Save tokenizer files
print(F"""Save tokenizer files to {pytorch_dump_path}""" )
lowerCAmelCase_ = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + "vocab.txt" , model_max_length=512 )
tokenizer.save_pretrained(__lowerCAmelCase )
print("Used relative position embeddings:" , model.config.reset_position_index_per_cell )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--task", default="SQA", type=str, help="Model task for which to convert a checkpoint. Defaults to SQA."
)
parser.add_argument(
"--reset_position_index_per_cell",
default=False,
action="store_true",
help="Whether to use relative position embeddings or not. Defaults to True.",
)
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--tapas_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained TAPAS model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
_A = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
)
| 279
|
def lowerCamelCase__ ( __lowerCAmelCase : int ):
"""simple docstring"""
if upper_limit < 0:
raise ValueError("Limit for the Catalan sequence must be ≥ 0" )
lowerCAmelCase_ = [0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
lowerCAmelCase_ = 1
if upper_limit > 0:
lowerCAmelCase_ = 1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 , upper_limit + 1 ):
for j in range(__lowerCAmelCase ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print("\n********* Catalan Numbers Using Dynamic Programming ************\n")
print("\n*** Enter -1 at any time to quit ***")
print("\nEnter the upper limit (≥ 0) for the Catalan number sequence: ", end="")
try:
while True:
_A = int(input().strip())
if N < 0:
print("\n********* Goodbye!! ************")
break
else:
print(f"""The Catalan numbers from 0 through {N} are:""")
print(catalan_numbers(N))
print("Try another upper limit for the sequence: ", end="")
except (NameError, ValueError):
print("\n********* Invalid input, goodbye! ************\n")
import doctest
doctest.testmod()
| 279
| 1
|
"""simple docstring"""
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available
from . import BaseDiffusersCLICommand
def _lowerCamelCase ( UpperCAmelCase_ : List[str] ) -> Tuple:
"""simple docstring"""
return EnvironmentCommand()
class UpperCamelCase__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
@staticmethod
def snake_case__ ( SCREAMING_SNAKE_CASE__ ) -> Dict:
A__ = parser.add_parser("env" )
download_parser.set_defaults(func=lowercase__ )
def snake_case__ ( self ) -> List[str]:
A__ = huggingface_hub.__version__
A__ = 'not installed'
A__ = 'NA'
if is_torch_available():
import torch
A__ = torch.__version__
A__ = torch.cuda.is_available()
A__ = 'not installed'
if is_transformers_available():
import transformers
A__ = transformers.__version__
A__ = 'not installed'
if is_accelerate_available():
import accelerate
A__ = accelerate.__version__
A__ = 'not installed'
if is_xformers_available():
import xformers
A__ = xformers.__version__
A__ = {
'`diffusers` version': version,
'Platform': platform.platform(),
'Python version': platform.python_version(),
'PyTorch version (GPU?)': f"""{pt_version} ({pt_cuda_available})""",
'Huggingface_hub version': hub_version,
'Transformers version': transformers_version,
'Accelerate version': accelerate_version,
'xFormers version': xformers_version,
'Using GPU in script?': '<fill in>',
'Using distributed or parallel set-up in script?': '<fill in>',
}
print("\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n" )
print(self.format_dict(lowercase__ ) )
return info
@staticmethod
def snake_case__ ( SCREAMING_SNAKE_CASE__ ) -> Dict:
return "\n".join([f"""- {prop}: {val}""" for prop, val in d.items()] ) + "\n"
| 104
|
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
import torch
from ...utils import is_npu_available, is_xpu_available
from .config_args import ClusterConfig, default_json_config_file
from .config_utils import SubcommandHelpFormatter
_lowerCAmelCase :Optional[Any] = """Create a default config file for Accelerate with only a few flags set."""
def __lowerCAmelCase ( a_="no" , a_ = default_json_config_file , a_ = False ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = Path(a_ )
path.parent.mkdir(parents=a_ , exist_ok=a_ )
if path.exists():
print(
f"""Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.""" )
return False
SCREAMING_SNAKE_CASE : List[str] = mixed_precision.lower()
if mixed_precision not in ["no", "fp16", "bf16", "fp8"]:
raise ValueError(
f"""`mixed_precision` should be one of 'no', 'fp16', 'bf16', or 'fp8'. Received {mixed_precision}""" )
SCREAMING_SNAKE_CASE : Union[str, Any] = {
'compute_environment': 'LOCAL_MACHINE',
'mixed_precision': mixed_precision,
}
if torch.cuda.is_available():
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.cuda.device_count()
SCREAMING_SNAKE_CASE : str = num_gpus
SCREAMING_SNAKE_CASE : Dict = False
if num_gpus > 1:
SCREAMING_SNAKE_CASE : List[str] = 'MULTI_GPU'
else:
SCREAMING_SNAKE_CASE : Optional[int] = 'NO'
elif is_xpu_available() and use_xpu:
SCREAMING_SNAKE_CASE : List[str] = torch.xpu.device_count()
SCREAMING_SNAKE_CASE : List[Any] = num_xpus
SCREAMING_SNAKE_CASE : Optional[int] = False
if num_xpus > 1:
SCREAMING_SNAKE_CASE : List[Any] = 'MULTI_XPU'
else:
SCREAMING_SNAKE_CASE : List[Any] = 'NO'
elif is_npu_available():
SCREAMING_SNAKE_CASE : List[str] = torch.npu.device_count()
SCREAMING_SNAKE_CASE : Any = num_npus
SCREAMING_SNAKE_CASE : Dict = False
if num_npus > 1:
SCREAMING_SNAKE_CASE : Any = 'MULTI_NPU'
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = 'NO'
else:
SCREAMING_SNAKE_CASE : int = 0
SCREAMING_SNAKE_CASE : Union[str, Any] = True
SCREAMING_SNAKE_CASE : Optional[int] = 1
SCREAMING_SNAKE_CASE : List[Any] = 'NO'
SCREAMING_SNAKE_CASE : Any = ClusterConfig(**a_ )
config.to_json_file(a_ )
return path
def __lowerCAmelCase ( a_ , a_ ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = parser.add_parser('default' , parents=a_ , help=a_ , formatter_class=a_ )
parser.add_argument(
'--config_file' , default=a_ , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , dest='save_location' , )
parser.add_argument(
'--mixed_precision' , choices=['no', 'fp16', 'bf16'] , type=a_ , help='Whether or not to use mixed precision training. '
'Choose between FP16 and BF16 (bfloat16) training. '
'BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.' , default='no' , )
parser.set_defaults(func=a_ )
return parser
def __lowerCAmelCase ( a_ ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = write_basic_config(args.mixed_precision , args.save_location )
if config_file:
print(f"""accelerate configuration saved at {config_file}""" )
| 251
| 0
|
'''simple docstring'''
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def A ( self : str , lowercase : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["""bs"""] , model_result["""ss"""] ):
UpperCamelCase__ = model_result["""result"""][batch_size][sequence_length]
self.assertIsNotNone(lowercase )
def A ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ = """sshleifer/tiny-gpt2"""
UpperCamelCase__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase , inference=lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase , )
UpperCamelCase__ = PyTorchBenchmark(lowercase )
UpperCamelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def A ( self : Any ) -> str:
'''simple docstring'''
UpperCamelCase__ = """sgugger/tiny-distilbert-classification"""
UpperCamelCase__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase , inference=lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase , only_pretrain_model=lowercase , )
UpperCamelCase__ = PyTorchBenchmark(lowercase )
UpperCamelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def A ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ = """sshleifer/tiny-gpt2"""
UpperCamelCase__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase , inference=lowercase , torchscript=lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase , )
UpperCamelCase__ = PyTorchBenchmark(lowercase )
UpperCamelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(torch_device == """cpu""" , """Cant do half precision""" )
def A ( self : int ) -> str:
'''simple docstring'''
UpperCamelCase__ = """sshleifer/tiny-gpt2"""
UpperCamelCase__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase , inference=lowercase , fpaa=lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase , )
UpperCamelCase__ = PyTorchBenchmark(lowercase )
UpperCamelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def A ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ = """sshleifer/tiny-gpt2"""
UpperCamelCase__ = AutoConfig.from_pretrained(lowercase )
# set architectures equal to `None`
UpperCamelCase__ = None
UpperCamelCase__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase , inference=lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase , )
UpperCamelCase__ = PyTorchBenchmark(lowercase , configs=[config] )
UpperCamelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def A ( self : Dict ) -> int:
'''simple docstring'''
UpperCamelCase__ = """sshleifer/tiny-gpt2"""
UpperCamelCase__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase , inference=lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase , )
UpperCamelCase__ = PyTorchBenchmark(lowercase )
UpperCamelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
@unittest.skipIf(torch_device == """cpu""" , """Can't do half precision""" )
def A ( self : Tuple ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ = """sshleifer/tiny-gpt2"""
UpperCamelCase__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase , inference=lowercase , sequence_lengths=[8] , batch_sizes=[1] , fpaa=lowercase , multi_process=lowercase , )
UpperCamelCase__ = PyTorchBenchmark(lowercase )
UpperCamelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def A ( self : List[str] ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ = """sshleifer/tiny-gpt2"""
UpperCamelCase__ = AutoConfig.from_pretrained(lowercase )
UpperCamelCase__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase , inference=lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase , )
UpperCamelCase__ = PyTorchBenchmark(lowercase , configs=[config] )
UpperCamelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def A ( self : Optional[Any] ) -> str:
'''simple docstring'''
UpperCamelCase__ = """sshleifer/tinier_bart"""
UpperCamelCase__ = AutoConfig.from_pretrained(lowercase )
UpperCamelCase__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase , inference=lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase , )
UpperCamelCase__ = PyTorchBenchmark(lowercase , configs=[config] )
UpperCamelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def A ( self : List[str] ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ = """sshleifer/tiny-gpt2"""
UpperCamelCase__ = AutoConfig.from_pretrained(lowercase )
UpperCamelCase__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase , inference=lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase , )
UpperCamelCase__ = PyTorchBenchmark(lowercase , configs=[config] )
UpperCamelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def A ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ = """sshleifer/tinier_bart"""
UpperCamelCase__ = AutoConfig.from_pretrained(lowercase )
UpperCamelCase__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase , inference=lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase , )
UpperCamelCase__ = PyTorchBenchmark(lowercase , configs=[config] )
UpperCamelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def A ( self : Any ) -> Any:
'''simple docstring'''
UpperCamelCase__ = """sshleifer/tiny-gpt2"""
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase , inference=lowercase , save_to_csv=lowercase , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(lowercase , """inf_time.csv""" ) , train_memory_csv_file=os.path.join(lowercase , """train_mem.csv""" ) , inference_memory_csv_file=os.path.join(lowercase , """inf_mem.csv""" ) , train_time_csv_file=os.path.join(lowercase , """train_time.csv""" ) , env_info_csv_file=os.path.join(lowercase , """env.csv""" ) , multi_process=lowercase , )
UpperCamelCase__ = PyTorchBenchmark(lowercase )
benchmark.run()
self.assertTrue(Path(os.path.join(lowercase , """inf_time.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(lowercase , """train_time.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(lowercase , """inf_mem.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(lowercase , """train_mem.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(lowercase , """env.csv""" ) ).exists() )
def A ( self : int ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ = """sshleifer/tiny-gpt2"""
def _check_summary_is_not_empty(lowercase : List[str] ):
self.assertTrue(hasattr(lowercase , """sequential""" ) )
self.assertTrue(hasattr(lowercase , """cumulative""" ) )
self.assertTrue(hasattr(lowercase , """current""" ) )
self.assertTrue(hasattr(lowercase , """total""" ) )
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase , inference=lowercase , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(lowercase , """log.txt""" ) , log_print=lowercase , trace_memory_line_by_line=lowercase , multi_process=lowercase , )
UpperCamelCase__ = PyTorchBenchmark(lowercase )
UpperCamelCase__ = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
_check_summary_is_not_empty(result.train_summary )
self.assertTrue(Path(os.path.join(lowercase , """log.txt""" ) ).exists() )
| 265
|
'''simple docstring'''
from __future__ import annotations
from typing import Any
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Union[str, Any] , lowercase : int ) -> None:
'''simple docstring'''
UpperCamelCase__ = num_of_nodes
UpperCamelCase__ = []
UpperCamelCase__ = {}
def A ( self : Optional[Any] , lowercase : int , lowercase : int , lowercase : int ) -> None:
'''simple docstring'''
self.m_edges.append([u_node, v_node, weight] )
def A ( self : str , lowercase : int ) -> int:
'''simple docstring'''
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def A ( self : Union[str, Any] , lowercase : int ) -> None:
'''simple docstring'''
if self.m_component[u_node] != u_node:
for k in self.m_component:
UpperCamelCase__ = self.find_component(lowercase )
def A ( self : Tuple , lowercase : list[int] , lowercase : int , lowercase : int ) -> None:
'''simple docstring'''
if component_size[u_node] <= component_size[v_node]:
UpperCamelCase__ = v_node
component_size[v_node] += component_size[u_node]
self.set_component(lowercase )
elif component_size[u_node] >= component_size[v_node]:
UpperCamelCase__ = self.find_component(lowercase )
component_size[u_node] += component_size[v_node]
self.set_component(lowercase )
def A ( self : int ) -> None:
'''simple docstring'''
UpperCamelCase__ = []
UpperCamelCase__ = 0
UpperCamelCase__ = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
UpperCamelCase__ = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = edge
UpperCamelCase__ = self.m_component[u]
UpperCamelCase__ = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
UpperCamelCase__ = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(lowercase , lowercase ):
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = edge
UpperCamelCase__ = self.m_component[u]
UpperCamelCase__ = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(lowercase , lowercase , lowercase )
print(f"Added edge [{u} - {v}]\nAdded weight: {w}\n" )
num_of_components -= 1
UpperCamelCase__ = [-1] * self.m_num_of_nodes
print(f"The total weight of the minimal spanning tree is: {mst_weight}" )
def __magic_name__( ):
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 265
| 1
|
'''simple docstring'''
from __future__ import annotations
from bisect import bisect_left
from functools import total_ordering
from heapq import merge
@total_ordering
class a__ ( a__ ):
'''simple docstring'''
def __lt__( self , lowerCamelCase_ ) -> List[Any]:
return self[-1] < other[-1]
def __eq__( self , lowerCamelCase_ ) -> str:
return self[-1] == other[-1]
def _snake_case ( A ) -> list:
lowerCAmelCase__ = []
# sort into stacks
for element in collection:
lowerCAmelCase__ = Stack([element] )
lowerCAmelCase__ = bisect_left(A , A )
if i != len(A ):
stacks[i].append(A )
else:
stacks.append(A )
# use a heap-based merge to merge stack efficiently
lowerCAmelCase__ = merge(*(reversed(A ) for stack in stacks) )
return collection
if __name__ == "__main__":
__UpperCAmelCase = input('''Enter numbers separated by a comma:\n''').strip()
__UpperCAmelCase = [int(item) for item in user_input.split(''',''')]
print(patience_sort(unsorted))
| 90
|
'''simple docstring'''
def lowerCAmelCase (__A , __A):
"""simple docstring"""
if digit_amount > 0:
return round(number - int(__A) , __A)
return number - int(__A)
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.345, 1))
print(decimal_isolate(35.345, 2))
print(decimal_isolate(35.345, 3))
print(decimal_isolate(-14.789, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.123, 1))
print(decimal_isolate(-14.123, 2))
print(decimal_isolate(-14.123, 3))
| 11
| 0
|
lowercase = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
lowercase = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
lowercase = {
0: 'Sunday',
1: 'Monday',
2: 'Tuesday',
3: 'Wednesday',
4: 'Thursday',
5: 'Friday',
6: 'Saturday',
}
def __UpperCAmelCase ( a_ , a_ , a_):
assert len(str(__UpperCamelCase)) > 2, "year should be in YYYY format"
assert 1 <= month <= 12, "month should be between 1 to 12"
assert 1 <= day <= 31, "day should be between 1 to 31"
# Doomsday algorithm:
snake_case_ = year // 1_00
snake_case_ = (5 * (century % 4) + 2) % 7
snake_case_ = year % 1_00
snake_case_ = centurian % 12
snake_case_ = (
(centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
snake_case_ = (
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 4_00) == 0)
else DOOMSDAY_LEAP[month - 1]
)
snake_case_ = (dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 711
|
from typing import List, Optional, Tuple, Union
import PIL
import torch
from torchvision import transforms
from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
from diffusers.schedulers import DDIMScheduler
from diffusers.utils import randn_tensor
lowercase = transforms.Compose(
[
transforms.Resize((256, 256)),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def __UpperCAmelCase ( a_):
if isinstance(a_ , torch.Tensor):
return image
elif isinstance(a_ , PIL.Image.Image):
snake_case_ = [image]
snake_case_ = [trans(img.convert('RGB')) for img in image]
snake_case_ = torch.stack(a_)
return image
class UpperCamelCase_ ( snake_case_ ):
'''simple docstring'''
def __init__( self , a , a ) -> List[Any]:
super().__init__()
# make sure scheduler can always be converted to DDIM
snake_case_ = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=a , scheduler=a )
def _UpperCamelCase ( self , a ) -> List[str]:
if strength < 0 or strength > 1:
raise ValueError(F'''The value of strength should in [0.0, 1.0] but is {strength}''' )
def _UpperCamelCase ( self , a , a , a ) -> Any:
# get the original timestep using init_timestep
snake_case_ = min(int(num_inference_steps * strength ) , a )
snake_case_ = max(num_inference_steps - init_timestep , 0 )
snake_case_ = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def _UpperCamelCase ( self , a , a , a , a , a , a=None ) -> List[Any]:
if not isinstance(a , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
F'''`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(a )}''' )
snake_case_ = image.to(device=a , dtype=a )
if isinstance(a , a ) and len(a ) != batch_size:
raise ValueError(
F'''You have passed a list of generators of length {len(a )}, but requested an effective batch'''
F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
snake_case_ = init_latents.shape
snake_case_ = randn_tensor(a , generator=a , device=a , dtype=a )
# get latents
print('add noise to latents at timestep' , a )
snake_case_ = self.scheduler.add_noise(a , a , a )
snake_case_ = init_latents
return latents
@torch.no_grad()
def __call__( self , a = None , a = 0.8 , a = 1 , a = None , a = 0.0 , a = 50 , a = None , a = "pil" , a = True , ) -> Union[ImagePipelineOutput, Tuple]:
self.check_inputs(a )
# 2. Preprocess image
snake_case_ = preprocess(a )
# 3. set timesteps
self.scheduler.set_timesteps(a , device=self.device )
snake_case_ , snake_case_ = self.get_timesteps(a , a , self.device )
snake_case_ = timesteps[:1].repeat(a )
# 4. Prepare latent variables
snake_case_ = self.prepare_latents(a , a , a , self.unet.dtype , self.device , a )
snake_case_ = latents
# 5. Denoising loop
for t in self.progress_bar(a ):
# 1. predict noise model_output
snake_case_ = self.unet(a , a ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
snake_case_ = self.scheduler.step(
a , a , a , eta=a , use_clipped_model_output=a , generator=a , ).prev_sample
snake_case_ = (image / 2 + 0.5).clamp(0 , 1 )
snake_case_ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
snake_case_ = self.numpy_to_pil(a )
if not return_dict:
return (image, latent_timestep.item())
return ImagePipelineOutput(images=a )
| 607
| 0
|
"""simple docstring"""
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
A__ : Any = logging.getLogger(__name__)
def _lowerCAmelCase ( _UpperCamelCase=2 , _UpperCamelCase=3 , _UpperCamelCase=16 , _UpperCamelCase = 10 , _UpperCamelCase = 2 ):
"""simple docstring"""
def get_dataset(_UpperCamelCase ):
_lowercase: Tuple = torch.randn(batch_size * n_batches , 1 )
return TensorDataset(_UpperCamelCase , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) )
_lowercase: Tuple = get_dataset(_UpperCamelCase )
_lowercase: Dict = get_dataset(_UpperCamelCase )
_lowercase: Optional[int] = DataLoader(_UpperCamelCase , shuffle=_UpperCamelCase , batch_size=_UpperCamelCase , num_workers=4 )
_lowercase: Optional[int] = DataLoader(_UpperCamelCase , shuffle=_UpperCamelCase , batch_size=_UpperCamelCase , num_workers=4 )
return (train_dataloader, valid_dataloader)
def _lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None ):
"""simple docstring"""
_lowercase: Tuple = []
for epoch in range(_UpperCamelCase ):
# Train quickly
model.train()
for batch in dataloader:
_lowercase , _lowercase: Optional[Any] = batch
_lowercase: Union[str, Any] = model(_UpperCamelCase )
_lowercase: Optional[Any] = torch.nn.functional.mse_loss(_UpperCamelCase , _UpperCamelCase )
accelerator.backward(_UpperCamelCase )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class __magic_name__ ( nn.Module ):
def __init__( self ) -> str:
"""simple docstring"""
super().__init__()
_lowercase: Optional[Any] = nn.Parameter(torch.randn(1 ) )
_lowercase: int = nn.Parameter(torch.randn(1 ) )
def lowercase_ ( self , A_ ) -> List[str]:
"""simple docstring"""
return x * self.a + self.b
class __magic_name__ ( unittest.TestCase ):
def lowercase_ ( self ) -> Optional[Any]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
_lowercase: Union[str, Any] = DummyModel()
_lowercase: Optional[int] = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
_lowercase , _lowercase: Union[str, Any] = dummy_dataloaders()
_lowercase: Any = ProjectConfiguration(total_limit=1 , project_dir=A_ , automatic_checkpoint_naming=A_ )
# Train baseline
_lowercase: Dict = Accelerator(project_config=A_ )
_lowercase , _lowercase , _lowercase , _lowercase: Optional[int] = accelerator.prepare(
A_ , A_ , A_ , A_ )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 )
def lowercase_ ( self ) -> int:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
_lowercase: Dict = DummyModel()
_lowercase: str = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
_lowercase , _lowercase: Optional[Any] = dummy_dataloaders()
# Train baseline
_lowercase: Dict = Accelerator()
_lowercase , _lowercase , _lowercase , _lowercase: Optional[Any] = accelerator.prepare(
A_ , A_ , A_ , A_ )
# Save initial
_lowercase: Dict = os.path.join(A_ , '''initial''' )
accelerator.save_state(A_ )
((_lowercase) , (_lowercase)): Union[str, Any] = model.a.item(), model.b.item()
_lowercase: int = optimizer.state_dict()
_lowercase: Optional[int] = train(3 , A_ , A_ , A_ , A_ )
((_lowercase) , (_lowercase)): int = model.a.item(), model.b.item()
_lowercase: Optional[int] = optimizer.state_dict()
# Train partially
set_seed(42 )
_lowercase: Dict = DummyModel()
_lowercase: int = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
_lowercase , _lowercase: List[Any] = dummy_dataloaders()
_lowercase: str = Accelerator()
_lowercase , _lowercase , _lowercase , _lowercase: str = accelerator.prepare(
A_ , A_ , A_ , A_ )
accelerator.load_state(A_ )
((_lowercase) , (_lowercase)): List[Any] = model.a.item(), model.b.item()
_lowercase: Dict = optimizer.state_dict()
self.assertEqual(A_ , A_ )
self.assertEqual(A_ , A_ )
self.assertEqual(A_ , A_ )
_lowercase: Optional[int] = train(2 , A_ , A_ , A_ , A_ )
# Save everything
_lowercase: List[Any] = os.path.join(A_ , '''checkpoint''' )
accelerator.save_state(A_ )
# Load everything back in and make sure all states work
accelerator.load_state(A_ )
test_rands += train(1 , A_ , A_ , A_ , A_ )
((_lowercase) , (_lowercase)): Optional[int] = model.a.item(), model.b.item()
_lowercase: Any = optimizer.state_dict()
self.assertEqual(A_ , A_ )
self.assertEqual(A_ , A_ )
self.assertEqual(A_ , A_ )
self.assertEqual(A_ , A_ )
def lowercase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
_lowercase: Tuple = DummyModel()
_lowercase: int = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
_lowercase , _lowercase: Optional[int] = dummy_dataloaders()
_lowercase: Optional[Any] = ProjectConfiguration(automatic_checkpoint_naming=A_ )
# Train baseline
_lowercase: List[Any] = Accelerator(project_dir=A_ , project_config=A_ )
_lowercase , _lowercase , _lowercase , _lowercase: str = accelerator.prepare(
A_ , A_ , A_ , A_ )
# Save initial
accelerator.save_state()
((_lowercase) , (_lowercase)): Dict = model.a.item(), model.b.item()
_lowercase: Optional[int] = optimizer.state_dict()
_lowercase: Tuple = train(3 , A_ , A_ , A_ , A_ )
((_lowercase) , (_lowercase)): int = model.a.item(), model.b.item()
_lowercase: Union[str, Any] = optimizer.state_dict()
# Train partially
set_seed(42 )
_lowercase: Any = DummyModel()
_lowercase: int = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
_lowercase , _lowercase: int = dummy_dataloaders()
_lowercase: List[Any] = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=A_ )
_lowercase: Any = Accelerator(project_dir=A_ , project_config=A_ )
_lowercase , _lowercase , _lowercase , _lowercase: str = accelerator.prepare(
A_ , A_ , A_ , A_ )
accelerator.load_state(os.path.join(A_ , '''checkpoints''' , '''checkpoint_0''' ) )
((_lowercase) , (_lowercase)): Dict = model.a.item(), model.b.item()
_lowercase: Dict = optimizer.state_dict()
self.assertEqual(A_ , A_ )
self.assertEqual(A_ , A_ )
self.assertEqual(A_ , A_ )
_lowercase: Any = train(2 , A_ , A_ , A_ , A_ )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(A_ , '''checkpoints''' , '''checkpoint_1''' ) )
test_rands += train(1 , A_ , A_ , A_ , A_ )
((_lowercase) , (_lowercase)): Optional[int] = model.a.item(), model.b.item()
_lowercase: Any = optimizer.state_dict()
self.assertEqual(A_ , A_ )
self.assertEqual(A_ , A_ )
self.assertEqual(A_ , A_ )
self.assertEqual(A_ , A_ )
def lowercase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
_lowercase: Optional[Any] = torch.tensor([1, 2, 3] )
_lowercase: Optional[Any] = torch.tensor([2, 3, 4] )
_lowercase: Union[str, Any] = DummyModel()
_lowercase: List[Any] = torch.optim.Adam(net.parameters() )
_lowercase: List[Any] = Accelerator()
with self.assertRaises(A_ ) as ve:
accelerator.register_for_checkpointing(A_ , A_ , A_ , A_ )
_lowercase: Tuple = str(ve.exception )
self.assertTrue('''Item at index 0''' in message )
self.assertTrue('''Item at index 1''' in message )
self.assertFalse('''Item at index 2''' in message )
self.assertFalse('''Item at index 3''' in message )
def lowercase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
_lowercase: Any = DummyModel()
_lowercase: List[Any] = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
_lowercase: Optional[Any] = torch.optim.lr_scheduler.StepLR(A_ , step_size=1 , gamma=0.99 )
_lowercase , _lowercase: Optional[int] = dummy_dataloaders()
_lowercase: Optional[int] = ProjectConfiguration(automatic_checkpoint_naming=A_ )
# Train baseline
_lowercase: int = Accelerator(project_dir=A_ , project_config=A_ )
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase: Tuple = accelerator.prepare(
A_ , A_ , A_ , A_ , A_ )
# Save initial
accelerator.save_state()
_lowercase: Union[str, Any] = scheduler.state_dict()
train(3 , A_ , A_ , A_ , A_ , A_ )
self.assertNotEqual(A_ , scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(A_ , '''checkpoints''' , '''checkpoint_0''' ) )
self.assertEqual(A_ , scheduler.state_dict() )
def lowercase_ ( self ) -> Optional[Any]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
_lowercase: List[Any] = DummyModel()
_lowercase: List[Any] = ProjectConfiguration(automatic_checkpoint_naming=A_ , total_limit=2 )
# Train baseline
_lowercase: Union[str, Any] = Accelerator(project_dir=A_ , project_config=A_ )
_lowercase: Optional[Any] = accelerator.prepare(A_ )
# Save 3 states:
for _ in range(11 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(A_ , '''checkpoints''' , '''checkpoint_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(A_ , '''checkpoints''' , '''checkpoint_9''' ) ) )
self.assertTrue(os.path.exists(os.path.join(A_ , '''checkpoints''' , '''checkpoint_10''' ) ) )
@require_cuda
def lowercase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
_lowercase: List[Any] = ['''torchrun''', f'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )]
execute_subprocess_async(A_ , env=os.environ.copy() )
if __name__ == "__main__":
A__ : Dict = '/tmp/accelerate/state_checkpointing'
A__ : Union[str, Any] = DummyModel()
A__ : str = torch.optim.Adam(params=model.parameters(), lr=1e-3)
A__ : Union[str, Any] = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99)
A__ , A__ : Optional[int] = dummy_dataloaders()
A__ : Union[str, Any] = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
A__ : List[Any] = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision='no')
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
A__ , A__ , A__ , A__ , A__ : Union[str, Any] = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
A__ , A__ : str = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
A__ : Tuple = group['params'][0].device
break
assert param_device.type == accelerator.device.type
A__ : Tuple = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='cpu')
for group in optimizer.param_groups:
A__ : int = group['params'][0].device
break
assert (
param_device.type == torch.device('cpu').type
), f"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='on_device')
for group in optimizer.param_groups:
A__ : List[Any] = group['params'][0].device
break
assert (
param_device.type == accelerator.device.type
), f"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match='Unsupported optimizer map location passed'):
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='invalid')
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone()
| 353
|
"""simple docstring"""
import functools
import logging
import os
import sys
import threading
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
import huggingface_hub.utils as hf_hub_utils
from tqdm import auto as tqdm_lib
A__ : int = threading.Lock()
A__ : Optional[logging.Handler] = None
A__ : str = {
'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL,
}
A__ : str = logging.WARNING
A__ : Union[str, Any] = True
def _lowerCAmelCase ( ):
"""simple docstring"""
_lowercase: List[Any] = os.getenv('''TRANSFORMERS_VERBOSITY''' , _UpperCamelCase )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
f'''Unknown option TRANSFORMERS_VERBOSITY={env_level_str}, '''
f'''has to be one of: { ", ".join(log_levels.keys() ) }''' )
return _default_log_level
def _lowerCAmelCase ( ):
"""simple docstring"""
return __name__.split('''.''' )[0]
def _lowerCAmelCase ( ):
"""simple docstring"""
return logging.getLogger(_get_library_name() )
def _lowerCAmelCase ( ):
"""simple docstring"""
global _default_handler
with _lock:
if _default_handler:
# This library has already configured the library root logger.
return
_lowercase: int = logging.StreamHandler() # Set sys.stderr as stream.
_lowercase: Dict = sys.stderr.flush
# Apply our default configuration to the library root logger.
_lowercase: Dict = _get_library_root_logger()
library_root_logger.addHandler(_default_handler )
library_root_logger.setLevel(_get_default_logging_level() )
_lowercase: Optional[Any] = False
def _lowerCAmelCase ( ):
"""simple docstring"""
global _default_handler
with _lock:
if not _default_handler:
return
_lowercase: Tuple = _get_library_root_logger()
library_root_logger.removeHandler(_default_handler )
library_root_logger.setLevel(logging.NOTSET )
_lowercase: Dict = None
def _lowerCAmelCase ( ):
"""simple docstring"""
return log_levels
def _lowerCAmelCase ( _UpperCamelCase = None ):
"""simple docstring"""
if name is None:
_lowercase: Tuple = _get_library_name()
_configure_library_root_logger()
return logging.getLogger(_UpperCamelCase )
def _lowerCAmelCase ( ):
"""simple docstring"""
_configure_library_root_logger()
return _get_library_root_logger().getEffectiveLevel()
def _lowerCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
_configure_library_root_logger()
_get_library_root_logger().setLevel(_UpperCamelCase )
def _lowerCAmelCase ( ):
"""simple docstring"""
return set_verbosity(_UpperCamelCase )
def _lowerCAmelCase ( ):
"""simple docstring"""
return set_verbosity(_UpperCamelCase )
def _lowerCAmelCase ( ):
"""simple docstring"""
return set_verbosity(_UpperCamelCase )
def _lowerCAmelCase ( ):
"""simple docstring"""
return set_verbosity(_UpperCamelCase )
def _lowerCAmelCase ( ):
"""simple docstring"""
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().removeHandler(_default_handler )
def _lowerCAmelCase ( ):
"""simple docstring"""
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().addHandler(_default_handler )
def _lowerCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
_configure_library_root_logger()
assert handler is not None
_get_library_root_logger().addHandler(_UpperCamelCase )
def _lowerCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
_configure_library_root_logger()
assert handler is not None and handler not in _get_library_root_logger().handlers
_get_library_root_logger().removeHandler(_UpperCamelCase )
def _lowerCAmelCase ( ):
"""simple docstring"""
_configure_library_root_logger()
_lowercase: str = False
def _lowerCAmelCase ( ):
"""simple docstring"""
_configure_library_root_logger()
_lowercase: List[str] = True
def _lowerCAmelCase ( ):
"""simple docstring"""
_lowercase: Any = _get_library_root_logger().handlers
for handler in handlers:
_lowercase: int = logging.Formatter('''[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s''' )
handler.setFormatter(_UpperCamelCase )
def _lowerCAmelCase ( ):
"""simple docstring"""
_lowercase: List[str] = _get_library_root_logger().handlers
for handler in handlers:
handler.setFormatter(_UpperCamelCase )
def _lowerCAmelCase ( self , *_UpperCamelCase , **_UpperCamelCase ):
"""simple docstring"""
_lowercase: Any = os.getenv('''TRANSFORMERS_NO_ADVISORY_WARNINGS''' , _UpperCamelCase )
if no_advisory_warnings:
return
self.warning(*_UpperCamelCase , **_UpperCamelCase )
A__ : Optional[int] = warning_advice
@functools.lru_cache(_UpperCamelCase )
def _lowerCAmelCase ( self , *_UpperCamelCase , **_UpperCamelCase ):
"""simple docstring"""
self.warning(*_UpperCamelCase , **_UpperCamelCase )
A__ : List[Any] = warning_once
class __magic_name__ :
def __init__( self , *A_ , **A_ ) -> Any: # pylint: disable=unused-argument
"""simple docstring"""
_lowercase: Tuple = args[0] if args else None
def __iter__( self ) -> Union[str, Any]:
"""simple docstring"""
return iter(self._iterator )
def __getattr__( self , A_ ) -> List[Any]:
"""simple docstring"""
def empty_fn(*A_ , **A_ ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self ) -> Tuple:
"""simple docstring"""
return self
def __exit__( self , A_ , A_ , A_ ) -> Optional[Any]:
"""simple docstring"""
return
class __magic_name__ :
def __call__( self , *A_ , **A_ ) -> Dict:
"""simple docstring"""
if _tqdm_active:
return tqdm_lib.tqdm(*A_ , **A_ )
else:
return EmptyTqdm(*A_ , **A_ )
def lowercase_ ( self , *A_ , **A_ ) -> List[str]:
"""simple docstring"""
_lowercase: Optional[Any] = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*A_ , **A_ )
def lowercase_ ( self ) -> Optional[Any]:
"""simple docstring"""
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
A__ : str = _tqdm_cls()
def _lowerCAmelCase ( ):
"""simple docstring"""
global _tqdm_active
return bool(_tqdm_active )
def _lowerCAmelCase ( ):
"""simple docstring"""
global _tqdm_active
_lowercase: str = True
hf_hub_utils.enable_progress_bars()
def _lowerCAmelCase ( ):
"""simple docstring"""
global _tqdm_active
_lowercase: Union[str, Any] = False
hf_hub_utils.disable_progress_bars()
| 353
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCAmelCase : List[Any] ={
'configuration_nezha': ['NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'NezhaConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Union[str, Any] =[
'NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST',
'NezhaForNextSentencePrediction',
'NezhaForMaskedLM',
'NezhaForPreTraining',
'NezhaForMultipleChoice',
'NezhaForQuestionAnswering',
'NezhaForSequenceClassification',
'NezhaForTokenClassification',
'NezhaModel',
'NezhaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nezha import (
NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
NezhaPreTrainedModel,
)
else:
import sys
lowerCAmelCase : Tuple =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 721
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase : Optional[int] =logging.get_logger(__name__)
lowerCAmelCase : List[Any] ={}
class _a ( snake_case_ ):
_UpperCamelCase: Tuple = "llama"
_UpperCamelCase: List[str] = ["past_key_values"]
def __init__( self , lowercase_=32000 , lowercase_=4096 , lowercase_=11008 , lowercase_=32 , lowercase_=32 , lowercase_=None , lowercase_="silu" , lowercase_=2048 , lowercase_=0.0_2 , lowercase_=1e-6 , lowercase_=True , lowercase_=0 , lowercase_=1 , lowercase_=2 , lowercase_=1 , lowercase_=False , lowercase_=None , **lowercase_ , ) -> Optional[int]:
lowerCAmelCase : List[str] = vocab_size
lowerCAmelCase : str = max_position_embeddings
lowerCAmelCase : int = hidden_size
lowerCAmelCase : Optional[int] = intermediate_size
lowerCAmelCase : int = num_hidden_layers
lowerCAmelCase : Any = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
lowerCAmelCase : Any = num_attention_heads
lowerCAmelCase : Any = num_key_value_heads
lowerCAmelCase : Any = hidden_act
lowerCAmelCase : Union[str, Any] = initializer_range
lowerCAmelCase : str = rms_norm_eps
lowerCAmelCase : int = pretraining_tp
lowerCAmelCase : int = use_cache
lowerCAmelCase : Optional[Any] = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , tie_word_embeddings=lowercase_ , **lowercase_ , )
def _snake_case ( self ) -> Dict:
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , lowercase_ ) or len(self.rope_scaling ) != 2:
raise ValueError(
"""`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, """
f"""got {self.rope_scaling}""" )
lowerCAmelCase : Union[str, Any] = self.rope_scaling.get("""type""" , lowercase_ )
lowerCAmelCase : Dict = self.rope_scaling.get("""factor""" , lowercase_ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" )
if rope_scaling_factor is None or not isinstance(lowercase_ , lowercase_ ) or rope_scaling_factor <= 1.0:
raise ValueError(f"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
| 693
| 0
|
import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
lowercase_: Optional[Any] = get_tests_dir('fixtures/spiece.model')
@require_sentencepiece
@require_tokenizers
class lowercase__ (__snake_case , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase : Tuple = DebertaVaTokenizer
__UpperCamelCase : Optional[int] = DebertaVaTokenizerFast
__UpperCamelCase : Optional[int] = True
__UpperCamelCase : Optional[Any] = True
def lowercase ( self : Any ):
super().setUp()
# We have a SentencePiece fixture for testing
snake_case__ : Optional[Any] = DebertaVaTokenizer(__a , unk_token="""<unk>""" )
tokenizer.save_pretrained(self.tmpdirname )
def lowercase ( self : List[str] , __a : Optional[int] ):
snake_case__ : Any = """this is a test"""
snake_case__ : Optional[int] = """this is a test"""
return input_text, output_text
def lowercase ( self : str ):
snake_case__ : Optional[int] = """<pad>"""
snake_case__ : Optional[Any] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__a ) , __a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__a ) , __a )
def lowercase ( self : int ):
snake_case__ : Union[str, Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<pad>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """[PAD]""" )
self.assertEqual(len(__a ) , 3_0_0_0_1 )
def lowercase ( self : Any ):
self.assertEqual(self.get_tokenizer().vocab_size , 3_0_0_0_0 )
def lowercase ( self : Optional[int] ):
# fmt: off
snake_case__ : List[Any] = """ \tHeLLo!how \n Are yoU? """
snake_case__ : List[str] = ["""▁hello""", """!""", """how""", """▁are""", """▁you""", """?"""]
# fmt: on
snake_case__ : List[str] = DebertaVaTokenizer(__a , do_lower_case=__a )
snake_case__ : Optional[Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(__a , add_special_tokens=__a ) )
self.assertListEqual(__a , __a )
snake_case__ : List[Any] = DebertaVaTokenizerFast(__a , do_lower_case=__a )
snake_case__ : Optional[int] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__a , add_special_tokens=__a ) )
self.assertListEqual(__a , __a )
@unittest.skip("""There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.""" )
def lowercase ( self : Dict ):
pass
@unittest.skip("""There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.""" )
def lowercase ( self : List[str] ):
pass
def lowercase ( self : List[Any] ):
# fmt: off
snake_case__ : List[str] = """I was born in 92000, and this is falsé."""
snake_case__ : List[str] = ["""▁""", """<unk>""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """▁""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """▁""", """.""", ]
# fmt: on
snake_case__ : List[str] = DebertaVaTokenizer(__a , split_by_punct=__a )
snake_case__ : int = tokenizer.convert_ids_to_tokens(tokenizer.encode(__a , add_special_tokens=__a ) )
self.assertListEqual(__a , __a )
snake_case__ : int = DebertaVaTokenizerFast(__a , split_by_punct=__a )
snake_case__ : Dict = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__a , add_special_tokens=__a ) )
self.assertListEqual(__a , __a )
def lowercase ( self : Any ):
# fmt: off
snake_case__ : Optional[int] = """I was born in 92000, and this is falsé."""
snake_case__ : List[str] = ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """▁""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """▁""", """.""", ]
# fmt: on
snake_case__ : Dict = DebertaVaTokenizer(__a , do_lower_case=__a , split_by_punct=__a )
snake_case__ : Tuple = tokenizer.convert_ids_to_tokens(tokenizer.encode(__a , add_special_tokens=__a ) )
self.assertListEqual(__a , __a )
snake_case__ : List[Any] = DebertaVaTokenizerFast(__a , do_lower_case=__a , split_by_punct=__a )
snake_case__ : Optional[int] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__a , add_special_tokens=__a ) )
self.assertListEqual(__a , __a )
def lowercase ( self : List[str] ):
# fmt: off
snake_case__ : Optional[Any] = """I was born in 92000, and this is falsé."""
snake_case__ : Dict = ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """.""", ]
# fmt: on
snake_case__ : str = DebertaVaTokenizer(__a , do_lower_case=__a , split_by_punct=__a )
snake_case__ : Dict = tokenizer.convert_ids_to_tokens(tokenizer.encode(__a , add_special_tokens=__a ) )
self.assertListEqual(__a , __a )
snake_case__ : List[Any] = DebertaVaTokenizerFast(__a , do_lower_case=__a , split_by_punct=__a )
snake_case__ : Dict = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__a , add_special_tokens=__a ) )
self.assertListEqual(__a , __a )
def lowercase ( self : str ):
# fmt: off
snake_case__ : List[Any] = """I was born in 92000, and this is falsé."""
snake_case__ : Optional[Any] = ["""▁""", """<unk>""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """▁""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """▁""", """.""", ]
# fmt: on
snake_case__ : Dict = DebertaVaTokenizer(__a , do_lower_case=__a , split_by_punct=__a )
snake_case__ : str = tokenizer.convert_ids_to_tokens(tokenizer.encode(__a , add_special_tokens=__a ) )
self.assertListEqual(__a , __a )
snake_case__ : str = DebertaVaTokenizerFast(__a , do_lower_case=__a , split_by_punct=__a )
snake_case__ : Optional[Any] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__a , add_special_tokens=__a ) )
self.assertListEqual(__a , __a )
def lowercase ( self : Tuple ):
# fmt: off
snake_case__ : List[str] = """ \tHeLLo!how \n Are yoU? """
snake_case__ : Union[str, Any] = ["""▁""", """<unk>""", """e""", """<unk>""", """o""", """!""", """how""", """▁""", """<unk>""", """re""", """▁yo""", """<unk>""", """?"""]
# fmt: on
snake_case__ : List[Any] = DebertaVaTokenizer(__a , do_lower_case=__a , split_by_punct=__a )
snake_case__ : List[str] = tokenizer.convert_ids_to_tokens(tokenizer.encode(__a , add_special_tokens=__a ) )
self.assertListEqual(__a , __a )
snake_case__ : Union[str, Any] = DebertaVaTokenizerFast(__a , do_lower_case=__a , split_by_punct=__a )
snake_case__ : Any = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__a , add_special_tokens=__a ) )
self.assertListEqual(__a , __a )
def lowercase ( self : List[str] ):
snake_case__ : Any = self.get_tokenizer()
snake_case__ : Optional[int] = self.get_rust_tokenizer()
snake_case__ : int = """I was born in 92000, and this is falsé."""
snake_case__ : Optional[Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(__a , add_special_tokens=__a ) )
snake_case__ : Union[str, Any] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__a , add_special_tokens=__a ) )
self.assertListEqual(__a , __a )
snake_case__ : Union[str, Any] = tokenizer.encode(__a , add_special_tokens=__a )
snake_case__ : str = rust_tokenizer.encode(__a , add_special_tokens=__a )
self.assertListEqual(__a , __a )
snake_case__ : Optional[int] = self.get_rust_tokenizer()
snake_case__ : Optional[Any] = tokenizer.encode(__a )
snake_case__ : Union[str, Any] = rust_tokenizer.encode(__a )
self.assertListEqual(__a , __a )
def lowercase ( self : str ):
snake_case__ : Any = """This is a test"""
snake_case__ : Optional[int] = [1_3, 1, 4_3_9_8, 2_5, 2_1, 1_2_8_9]
snake_case__ : Tuple = ["""▁""", """T""", """his""", """▁is""", """▁a""", """▁test"""]
snake_case__ : Any = ["""▁""", """<unk>""", """his""", """▁is""", """▁a""", """▁test"""]
snake_case__ : Dict = DebertaVaTokenizer(__a , keep_accents=__a )
snake_case__ : str = DebertaVaTokenizerFast(__a , keep_accents=__a )
snake_case__ : str = tokenizer.encode(__a , add_special_tokens=__a )
self.assertListEqual(__a , __a )
snake_case__ : List[Any] = tokenizer.tokenize(__a )
self.assertListEqual(__a , __a )
snake_case__ : Union[str, Any] = tokenizer.convert_ids_to_tokens(__a )
self.assertListEqual(__a , __a )
snake_case__ : List[str] = rust_tokenizer.encode(__a , add_special_tokens=__a )
self.assertListEqual(__a , __a )
snake_case__ : Optional[Any] = rust_tokenizer.tokenize(__a )
self.assertListEqual(__a , __a )
snake_case__ : List[str] = rust_tokenizer.convert_ids_to_tokens(__a )
self.assertListEqual(__a , __a )
# fmt: off
snake_case__ : List[str] = """I was born in 92000, and this is falsé."""
snake_case__ : List[Any] = [1_3, 1, 2_3, 3_8_6, 1_9, 5_6_1, 3_0_5_0, 1_5, 1_7, 4_8, 2_5, 8_2_5_6, 1_8, 1, 9]
snake_case__ : Any = ["""▁""", """I""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """é""", """.""", ]
snake_case__ : Optional[int] = ["""▁""", """<unk>""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """.""", ]
# fmt: on
snake_case__ : Optional[int] = tokenizer.encode(__a , add_special_tokens=__a )
self.assertListEqual(__a , __a )
snake_case__ : Any = tokenizer.tokenize(__a )
self.assertListEqual(__a , __a )
snake_case__ : List[str] = tokenizer.convert_ids_to_tokens(__a )
self.assertListEqual(__a , __a )
snake_case__ : str = rust_tokenizer.encode(__a , add_special_tokens=__a )
self.assertListEqual(__a , __a )
snake_case__ : Optional[Any] = rust_tokenizer.tokenize(__a )
self.assertListEqual(__a , __a )
snake_case__ : Optional[Any] = rust_tokenizer.convert_ids_to_tokens(__a )
self.assertListEqual(__a , __a )
def lowercase ( self : Dict ):
snake_case__ : Optional[int] = DebertaVaTokenizer(__a )
snake_case__ : Optional[int] = tokenizer.encode("""sequence builders""" )
snake_case__ : Any = tokenizer.encode("""multi-sequence build""" )
snake_case__ : Any = tokenizer.build_inputs_with_special_tokens(__a )
snake_case__ : Tuple = tokenizer.build_inputs_with_special_tokens(__a , __a )
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , __a )
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , __a , )
@slow
def lowercase ( self : int ):
# fmt: off
snake_case__ : Optional[Any] = {"""input_ids""": [[1, 3_9_8_6_7, 3_6, 1_9_3_9_0, 4_8_6, 2_7, 3_5_0_5_2, 8_1_4_3_6, 1_8, 6_0_6_8_5, 1_2_2_5, 7, 3_5_0_5_2, 8_1_4_3_6, 1_8, 9_3_6_7, 1_6_8_9_9, 1_8, 1_5_9_3_7, 5_3, 5_9_4, 7_7_3, 1_8, 1_6_2_8_7, 3_0_4_6_5, 3_6, 1_5_9_3_7, 6, 4_1_1_3_9, 3_8, 3_6_9_7_9, 6_0_7_6_3, 1_9_1, 6, 3_4_1_3_2, 9_9, 6, 5_0_5_3_8, 3_9_0, 4_3_2_3_0, 6, 3_4_1_3_2, 2_7_7_9, 2_0_8_5_0, 1_4, 6_9_9, 1_0_7_2, 1_1_9_4, 3_6, 3_8_2, 1_0_9_0_1, 5_3, 7, 6_9_9, 1_0_7_2, 2_0_8_4, 3_6, 2_0_4_2_2, 6_3_0, 5_3, 1_9, 1_0_5, 3_0_4_9, 1_8_9_6, 1_0_5_3, 1_6_8_9_9, 1_5_0_6, 1_1, 3_7_9_7_8, 4_2_4_3, 7, 1_2_3_7, 3_1_8_6_9, 2_0_0, 1_6_5_6_6, 6_5_4, 6, 3_5_0_5_2, 8_1_4_3_6, 7, 5_5_6_3_0, 1_3_5_9_3, 4, 2], [1, 2_6, 1_5_0_1_1, 1_3, 6_6_7, 8, 1_0_5_3, 1_8, 2_3_6_1_1, 1_2_3_7, 7_2_3_5_6, 1_2_8_2_0, 3_4, 1_0_4_1_3_4, 1_2_0_9, 3_5, 1_3_3_1_3, 6_6_2_7, 2_1, 2_0_2, 3_4_7, 7, 1_6_4, 2_3_9_9, 1_1, 4_6, 4_4_8_5, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1_2_3_2, 2_8_6_4, 1_5_7_8_5, 1_4_9_5_1, 1_0_5, 5, 8_5_8_1, 1_2_5_0, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__a , model_name="""microsoft/deberta-v2-xlarge""" , revision="""ad6e42c1532ddf3a15c39246b63f5559d558b670""" , )
| 648
|
import unittest
from transformers import BertGenerationConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import BertGenerationDecoder, BertGenerationEncoder
class lowercase__ :
"""simple docstring"""
def __init__( self : Dict , __a : List[str] , __a : List[str]=1_3 , __a : List[str]=7 , __a : Dict=True , __a : str=True , __a : str=9_9 , __a : Dict=3_2 , __a : Optional[int]=5 , __a : List[Any]=4 , __a : Dict=3_7 , __a : List[Any]="gelu" , __a : str=0.1 , __a : Dict=0.1 , __a : Optional[Any]=5_0 , __a : Dict=0.02 , __a : List[Any]=True , __a : str=None , ):
snake_case__ : int = parent
snake_case__ : Any = batch_size
snake_case__ : Any = seq_length
snake_case__ : Dict = is_training
snake_case__ : str = use_input_mask
snake_case__ : Optional[Any] = vocab_size
snake_case__ : List[Any] = hidden_size
snake_case__ : Any = num_hidden_layers
snake_case__ : Any = num_attention_heads
snake_case__ : Optional[Any] = intermediate_size
snake_case__ : str = hidden_act
snake_case__ : Any = hidden_dropout_prob
snake_case__ : List[str] = attention_probs_dropout_prob
snake_case__ : Any = max_position_embeddings
snake_case__ : Tuple = initializer_range
snake_case__ : str = use_labels
snake_case__ : List[str] = scope
def lowercase ( self : Optional[Any] ):
snake_case__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case__ : Any = None
if self.use_input_mask:
snake_case__ : int = random_attention_mask([self.batch_size, self.seq_length] )
if self.use_labels:
snake_case__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case__ : Optional[int] = self.get_config()
return config, input_ids, input_mask, token_labels
def lowercase ( self : Optional[Any] ):
return BertGenerationConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , is_decoder=__a , initializer_range=self.initializer_range , )
def lowercase ( self : List[str] ):
(
(
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) ,
) : Any = self.prepare_config_and_inputs()
snake_case__ : Union[str, Any] = True
snake_case__ : str = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
snake_case__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
token_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def lowercase ( self : int , __a : List[Any] , __a : Optional[Any] , __a : Tuple , __a : Optional[int] , **__a : Union[str, Any] , ):
snake_case__ : Optional[Any] = BertGenerationEncoder(config=__a )
model.to(__a )
model.eval()
snake_case__ : Optional[int] = model(__a , attention_mask=__a )
snake_case__ : str = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase ( self : str , __a : Union[str, Any] , __a : Any , __a : int , __a : int , __a : List[Any] , __a : List[str] , **__a : List[Any] , ):
snake_case__ : Union[str, Any] = True
snake_case__ : int = BertGenerationEncoder(config=__a )
model.to(__a )
model.eval()
snake_case__ : List[str] = model(
__a , attention_mask=__a , encoder_hidden_states=__a , encoder_attention_mask=__a , )
snake_case__ : List[Any] = model(
__a , attention_mask=__a , encoder_hidden_states=__a , )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase ( self : int , __a : Tuple , __a : Union[str, Any] , __a : Optional[Any] , __a : str , __a : Union[str, Any] , __a : str , **__a : List[Any] , ):
snake_case__ : int = True
snake_case__ : Optional[int] = True
snake_case__ : List[str] = BertGenerationDecoder(config=__a ).to(__a ).eval()
# first forward pass
snake_case__ : Union[str, Any] = model(
__a , attention_mask=__a , encoder_hidden_states=__a , encoder_attention_mask=__a , use_cache=__a , )
snake_case__ : List[Any] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
snake_case__ : Tuple = ids_tensor((self.batch_size, 3) , config.vocab_size )
snake_case__ : int = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
snake_case__ : Any = torch.cat([input_ids, next_tokens] , dim=-1 )
snake_case__ : Tuple = torch.cat([input_mask, next_mask] , dim=-1 )
snake_case__ : Any = model(
__a , attention_mask=__a , encoder_hidden_states=__a , encoder_attention_mask=__a , output_hidden_states=__a , )["""hidden_states"""][0]
snake_case__ : Any = model(
__a , attention_mask=__a , encoder_hidden_states=__a , encoder_attention_mask=__a , past_key_values=__a , output_hidden_states=__a , )["""hidden_states"""][0]
# select random slice
snake_case__ : List[Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
snake_case__ : Optional[Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
snake_case__ : Any = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__a , __a , atol=1e-3 ) )
def lowercase ( self : Union[str, Any] , __a : List[Any] , __a : int , __a : Union[str, Any] , __a : int , *__a : str , ):
snake_case__ : Union[str, Any] = BertGenerationDecoder(__a )
model.to(__a )
model.eval()
snake_case__ : List[Any] = model(__a , attention_mask=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase ( self : Any ):
snake_case__ , snake_case__ , snake_case__ , snake_case__ : str = self.prepare_config_and_inputs()
snake_case__ : str = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class lowercase__ (__snake_case , __snake_case , __snake_case , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else ()
__UpperCamelCase : str = (BertGenerationDecoder,) if is_torch_available() else ()
__UpperCamelCase : str = (
{'feature-extraction': BertGenerationEncoder, 'text-generation': BertGenerationDecoder}
if is_torch_available()
else {}
)
def lowercase ( self : str ):
snake_case__ : Dict = BertGenerationEncoderTester(self )
snake_case__ : Tuple = ConfigTester(self , config_class=__a , hidden_size=3_7 )
def lowercase ( self : Optional[int] ):
self.config_tester.run_common_tests()
def lowercase ( self : Optional[int] ):
snake_case__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def lowercase ( self : List[Any] ):
snake_case__ , snake_case__ , snake_case__ , snake_case__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
snake_case__ : Optional[int] = """bert"""
self.model_tester.create_and_check_model(__a , __a , __a , __a )
def lowercase ( self : str ):
snake_case__ : Any = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*__a )
def lowercase ( self : Any ):
snake_case__ : Any = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*__a )
def lowercase ( self : Tuple ):
# This regression test was failing with PyTorch < 1.3
(
(
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) ,
) : int = self.model_tester.prepare_config_and_inputs_for_decoder()
snake_case__ : int = None
self.model_tester.create_and_check_model_as_decoder(
__a , __a , __a , __a , __a , __a , )
def lowercase ( self : int ):
snake_case__ : int = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_for_causal_lm(*__a )
@slow
def lowercase ( self : List[str] ):
snake_case__ : int = BertGenerationEncoder.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" )
self.assertIsNotNone(__a )
@require_torch
class lowercase__ (unittest.TestCase ):
"""simple docstring"""
@slow
def lowercase ( self : Optional[int] ):
snake_case__ : Dict = BertGenerationEncoder.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" )
snake_case__ : Optional[Any] = torch.tensor([[1_0_1, 7_5_9_2, 1_0_1_0, 2_0_2_6, 3_8_9_9, 2_0_0_3, 1_0_1_4_0, 1_0_2]] )
with torch.no_grad():
snake_case__ : Union[str, Any] = model(__a )[0]
snake_case__ : Optional[Any] = torch.Size([1, 8, 1_0_2_4] )
self.assertEqual(output.shape , __a )
snake_case__ : List[Any] = torch.tensor(
[[[0.1775, 0.0083, -0.0321], [1.6002, 0.1287, 0.3912], [2.1473, 0.5791, 0.6066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __a , atol=1e-4 ) )
@require_torch
class lowercase__ (unittest.TestCase ):
"""simple docstring"""
@slow
def lowercase ( self : Any ):
snake_case__ : List[str] = BertGenerationDecoder.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" )
snake_case__ : Tuple = torch.tensor([[1_0_1, 7_5_9_2, 1_0_1_0, 2_0_2_6, 3_8_9_9, 2_0_0_3, 1_0_1_4_0, 1_0_2]] )
with torch.no_grad():
snake_case__ : Any = model(__a )[0]
snake_case__ : Optional[Any] = torch.Size([1, 8, 5_0_3_5_8] )
self.assertEqual(output.shape , __a )
snake_case__ : int = torch.tensor(
[[[-0.5788, -2.5994, -3.7054], [0.0438, 4.7997, 1.8795], [1.5862, 6.6409, 4.4638]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __a , atol=1e-4 ) )
| 648
| 1
|
'''simple docstring'''
def snake_case_ ( a__ : str ,a__ : str ):
"""simple docstring"""
__lowercase = len(_lowerCAmelCase )
print("""The following activities are selected:""" )
# The first activity is always selected
__lowercase = 0
print(_lowerCAmelCase ,end=""",""" )
# Consider rest of the activities
for j in range(_lowerCAmelCase ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(_lowerCAmelCase ,end=""",""" )
__lowercase = j
if __name__ == "__main__":
import doctest
doctest.testmod()
A : str = [1, 3, 0, 5, 8, 5]
A : List[Any] = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 701
|
'''simple docstring'''
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 163
| 0
|
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
UpperCamelCase__ : List[Any] = logging.get_logger(__name__)
class _a (_lowerCamelCase):
"""simple docstring"""
def __init__( self , A__ , A__ , A__ , **A__ ) -> int:
_SCREAMING_SNAKE_CASE = feature_size
_SCREAMING_SNAKE_CASE = sampling_rate
_SCREAMING_SNAKE_CASE = padding_value
_SCREAMING_SNAKE_CASE = kwargs.pop("""padding_side""" , """right""" )
_SCREAMING_SNAKE_CASE = kwargs.pop("""return_attention_mask""" , a_ )
super().__init__(**a_ )
def UpperCamelCase ( self , A__ , A__ = True , A__ = None , A__ = False , A__ = None , A__ = None , A__ = None , ) -> BatchFeature:
# If we have a list of dicts, let's convert it in a dict of lists
# We do this to allow using this method as a collate_fn function in PyTorch Dataloader
if isinstance(a_ , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ):
_SCREAMING_SNAKE_CASE = {
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
"""You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`"""
F" to this method that includes {self.model_input_names[0]}, but you provided"
F" {list(processed_features.keys() )}" )
_SCREAMING_SNAKE_CASE = processed_features[self.model_input_names[0]]
_SCREAMING_SNAKE_CASE = (
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(a_ ) == 0:
if return_attention_mask:
_SCREAMING_SNAKE_CASE = []
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
_SCREAMING_SNAKE_CASE = required_input[0]
if isinstance(a_ , (list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
_SCREAMING_SNAKE_CASE = 0
while len(required_input[index] ) == 0:
index += 1
if index < len(a_ ):
_SCREAMING_SNAKE_CASE = required_input[index][0]
if return_tensors is None:
if is_tf_tensor(a_ ):
_SCREAMING_SNAKE_CASE = """tf"""
elif is_torch_tensor(a_ ):
_SCREAMING_SNAKE_CASE = """pt"""
elif isinstance(a_ , (int, float, list, tuple, np.ndarray) ):
_SCREAMING_SNAKE_CASE = """np"""
else:
raise ValueError(
F"type of {first_element} unknown: {type(a_ )}. "
"""Should be one of a python, numpy, pytorch or tensorflow object.""" )
for key, value in processed_features.items():
if isinstance(value[0] , (int, float) ):
_SCREAMING_SNAKE_CASE = to_numpy(a_ )
else:
_SCREAMING_SNAKE_CASE = [to_numpy(a_ ) for v in value]
# Convert padding_strategy in PaddingStrategy
_SCREAMING_SNAKE_CASE = self._get_padding_strategies(padding=a_ , max_length=a_ )
_SCREAMING_SNAKE_CASE = processed_features[self.model_input_names[0]]
_SCREAMING_SNAKE_CASE = len(a_ )
if not all(len(a_ ) == batch_size for v in processed_features.values() ):
raise ValueError("""Some items in the output dictionary have a different batch size than others.""" )
_SCREAMING_SNAKE_CASE = []
for i in range(a_ ):
_SCREAMING_SNAKE_CASE = {k: v[i] for k, v in processed_features.items()}
# truncation
_SCREAMING_SNAKE_CASE = self._truncate(
a_ , max_length=a_ , pad_to_multiple_of=a_ , truncation=a_ , )
truncated_inputs.append(a_ )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
_SCREAMING_SNAKE_CASE = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
_SCREAMING_SNAKE_CASE = PaddingStrategy.MAX_LENGTH
_SCREAMING_SNAKE_CASE = {}
for i in range(a_ ):
# padding
_SCREAMING_SNAKE_CASE = self._pad(
truncated_inputs[i] , max_length=a_ , padding_strategy=a_ , pad_to_multiple_of=a_ , return_attention_mask=a_ , )
for key, value in outputs.items():
if key not in batch_outputs:
_SCREAMING_SNAKE_CASE = []
if value.dtype is np.dtype(np.floataa ):
_SCREAMING_SNAKE_CASE = value.astype(np.floataa )
batch_outputs[key].append(a_ )
return BatchFeature(a_ , tensor_type=a_ )
def UpperCamelCase ( self , A__ , A__ = None , A__ = PaddingStrategy.DO_NOT_PAD , A__ = None , A__ = None , ) -> dict:
_SCREAMING_SNAKE_CASE = processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
_SCREAMING_SNAKE_CASE = len(a_ )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
_SCREAMING_SNAKE_CASE = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
_SCREAMING_SNAKE_CASE = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(a_ ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
_SCREAMING_SNAKE_CASE = np.ones(len(a_ ) , dtype=np.intaa )
if needs_to_be_padded:
_SCREAMING_SNAKE_CASE = max_length - len(a_ )
if self.padding_side == "right":
if return_attention_mask:
_SCREAMING_SNAKE_CASE = np.pad(
processed_features["""attention_mask"""] , (0, difference) )
_SCREAMING_SNAKE_CASE = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
_SCREAMING_SNAKE_CASE = np.pad(
a_ , a_ , """constant""" , constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
_SCREAMING_SNAKE_CASE = np.pad(
processed_features["""attention_mask"""] , (difference, 0) )
_SCREAMING_SNAKE_CASE = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
_SCREAMING_SNAKE_CASE = np.pad(
a_ , a_ , """constant""" , constant_values=self.padding_value )
else:
raise ValueError("""Invalid padding strategy:""" + str(self.padding_side ) )
return processed_features
def UpperCamelCase ( self , A__ , A__ = None , A__ = None , A__ = None , ) -> Tuple:
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError("""When setting ``truncation=True``, make sure that ``max_length`` is defined.""" )
_SCREAMING_SNAKE_CASE = processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
_SCREAMING_SNAKE_CASE = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
_SCREAMING_SNAKE_CASE = len(a_ ) > max_length
if needs_to_be_truncated:
_SCREAMING_SNAKE_CASE = processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
_SCREAMING_SNAKE_CASE = processed_features["""attention_mask"""][:max_length]
return processed_features
def UpperCamelCase ( self , A__=False , A__=None ) -> int:
# Get padding strategy
if padding is not False:
if padding is True:
_SCREAMING_SNAKE_CASE = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(a_ , a_ ):
_SCREAMING_SNAKE_CASE = PaddingStrategy(a_ )
elif isinstance(a_ , a_ ):
_SCREAMING_SNAKE_CASE = padding
else:
_SCREAMING_SNAKE_CASE = PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
F"When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined" )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
"""Asking to pad but the feature_extractor does not have a padding value. Please select a value to use"""
""" as `padding_value`. For example: `feature_extractor.padding_value = 0.0`.""" )
return padding_strategy
| 591
|
'''simple docstring'''
from io import BytesIO
from typing import List, Union
import requests
from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_decord_available():
import numpy as np
from decord import VideoReader
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
_a : List[Any] = logging.get_logger(__name__)
@add_end_docstrings(a )
class lowercase_ ( a ):
'''simple docstring'''
def __init__( self , *a_ , **a_ ) -> str:
"""simple docstring"""
super().__init__(*a_ , **a_ )
requires_backends(self , 'decord' )
self.check_model_type(a_ )
def snake_case_ ( self , a_=None , a_=None , a_=None ) -> int:
"""simple docstring"""
UpperCAmelCase = {}
if frame_sampling_rate is not None:
UpperCAmelCase = frame_sampling_rate
if num_frames is not None:
UpperCAmelCase = num_frames
UpperCAmelCase = {}
if top_k is not None:
UpperCAmelCase = top_k
return preprocess_params, {}, postprocess_params
def __call__( self , a_ , **a_ ) -> Union[str, Any]:
"""simple docstring"""
return super().__call__(a_ , **a_ )
def snake_case_ ( self , a_ , a_=None , a_=1 ) -> Tuple:
"""simple docstring"""
if num_frames is None:
UpperCAmelCase = self.model.config.num_frames
if video.startswith('http://' ) or video.startswith('https://' ):
UpperCAmelCase = BytesIO(requests.get(a_ ).content )
UpperCAmelCase = VideoReader(a_ )
videoreader.seek(0 )
UpperCAmelCase = 0
UpperCAmelCase = num_frames * frame_sampling_rate - 1
UpperCAmelCase = np.linspace(a_ , a_ , num=a_ , dtype=np.intaa )
UpperCAmelCase = videoreader.get_batch(a_ ).asnumpy()
UpperCAmelCase = list(a_ )
UpperCAmelCase = self.image_processor(a_ , return_tensors=self.framework )
return model_inputs
def snake_case_ ( self , a_ ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = self.model(**a_ )
return model_outputs
def snake_case_ ( self , a_ , a_=5 ) -> Union[str, Any]:
"""simple docstring"""
if top_k > self.model.config.num_labels:
UpperCAmelCase = self.model.config.num_labels
if self.framework == "pt":
UpperCAmelCase = model_outputs.logits.softmax(-1 )[0]
UpperCAmelCase , UpperCAmelCase = probs.topk(a_ )
else:
raise ValueError(F'''Unsupported framework: {self.framework}''' )
UpperCAmelCase = scores.tolist()
UpperCAmelCase = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(a_ , a_ )]
| 447
| 0
|
'''simple docstring'''
import numpy as np
import pandas as pd
from sklearn.preprocessing import Normalizer
from sklearn.svm import SVR
from statsmodels.tsa.statespace.sarimax import SARIMAX
def __magic_name__ ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> float:
'''simple docstring'''
UpperCamelCase = np.array([[1, item, train_mtch[i]] for i, item in enumerate(lowercase_ )] )
UpperCamelCase = np.array(lowercase_ )
UpperCamelCase = np.dot(np.dot(np.linalg.inv(np.dot(x.transpose() , lowercase_ ) ) , x.transpose() ) , lowercase_ )
return abs(beta[0] + test_dt[0] * beta[1] + test_mtch[0] + beta[2] )
def __magic_name__ ( lowercase_ , lowercase_ , lowercase_ ) -> float:
'''simple docstring'''
UpperCamelCase = (1, 2, 1)
UpperCamelCase = (1, 1, 0, 7)
UpperCamelCase = SARIMAX(
lowercase_ , exog=lowercase_ , order=lowercase_ , seasonal_order=lowercase_ )
UpperCamelCase = model.fit(disp=lowercase_ , maxiter=600 , method="nm" )
UpperCamelCase = model_fit.predict(1 , len(lowercase_ ) , exog=[test_match] )
return result[0]
def __magic_name__ ( lowercase_ , lowercase_ , lowercase_ ) -> float:
'''simple docstring'''
UpperCamelCase = SVR(kernel="rbf" , C=1 , gamma=0.1 , epsilon=0.1 )
regressor.fit(lowercase_ , lowercase_ )
UpperCamelCase = regressor.predict(lowercase_ )
return y_pred[0]
def __magic_name__ ( lowercase_ ) -> float:
'''simple docstring'''
train_user.sort()
UpperCamelCase = np.percentile(lowercase_ , 25 )
UpperCamelCase = np.percentile(lowercase_ , 75 )
UpperCamelCase = qa - qa
UpperCamelCase = qa - (iqr * 0.1)
return low_lim
def __magic_name__ ( lowercase_ , lowercase_ ) -> bool:
'''simple docstring'''
UpperCamelCase = 0
UpperCamelCase = 0
for i in list_vote:
if i > actual_result:
UpperCamelCase = not_safe + 1
else:
if abs(abs(lowercase_ ) - abs(lowercase_ ) ) <= 0.1:
safe += 1
else:
not_safe += 1
return safe > not_safe
if __name__ == "__main__":
# data_input_df = pd.read_csv("ex_data.csv", header=None)
__a : Any = [[1_8_2_3_1, 0.0, 1], [2_2_6_2_1, 1.0, 2], [1_5_6_7_5, 0.0, 3], [2_3_5_8_3, 1.0, 4]]
__a : Union[str, Any] = pd.DataFrame(
data_input, columns=["""total_user""", """total_even""", """days"""]
)
__a : Optional[int] = Normalizer().fit_transform(data_input_df.values)
# split data
__a : Optional[Any] = normalize_df[:, 2].tolist()
__a : str = normalize_df[:, 0].tolist()
__a : Optional[int] = normalize_df[:, 1].tolist()
# for svr (input variable = total date and total match)
__a : Union[str, Any] = normalize_df[:, [1, 2]].tolist()
__a : Any = x[: len(x) - 1]
__a : List[str] = x[len(x) - 1 :]
# for linear regression & sarimax
__a : Optional[int] = total_date[: len(total_date) - 1]
__a : int = total_user[: len(total_user) - 1]
__a : int = total_match[: len(total_match) - 1]
__a : str = total_date[len(total_date) - 1 :]
__a : Any = total_user[len(total_user) - 1 :]
__a : Optional[Any] = total_match[len(total_match) - 1 :]
# voting system with forecasting
__a : Optional[Any] = [
linear_regression_prediction(
trn_date, trn_user, trn_match, tst_date, tst_match
),
sarimax_predictor(trn_user, trn_match, tst_match),
support_vector_regressor(x_train, x_test, trn_user),
]
# check the safety of today's data
__a : int = """""" if data_safety_checker(res_vote, tst_user) else """not """
print("""Today's data is {not_str}safe.""")
| 700
|
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
__a : Union[str, Any] = logging.get_logger(__name__)
def __magic_name__ ( lowercase_ ) -> Dict:
'''simple docstring'''
UpperCamelCase = torch.load(lowercase_ , map_location="cpu" )
if "model" in sd.keys():
UpperCamelCase = torch.load(lowercase_ , map_location="cpu" )["model"]
# pop unnecessary weights
UpperCamelCase = [
"decoder.version",
"decoder.output_projection.weight",
]
for key in keys_to_delete:
if key in sd:
sd.pop(lowercase_ )
UpperCamelCase = {
"decoder.project_in_dim.weight": "decoder.project_in.weight",
"decoder.project_out_dim.weight": "decoder.project_out.weight",
"decoder.layer_norm.weight": "decoder.final_layer_norm.weight",
"decoder.layer_norm.bias": "decoder.final_layer_norm.bias",
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
UpperCamelCase = sd.pop(lowercase_ )
UpperCamelCase = list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
UpperCamelCase = sd[key]
# We split QKV in separate Q,K,V
UpperCamelCase = key.replace(".qkv_proj." , ".q_proj." )
UpperCamelCase = key.replace(".qkv_proj." , ".k_proj." )
UpperCamelCase = key.replace(".qkv_proj." , ".v_proj." )
UpperCamelCase = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
UpperCamelCase , UpperCamelCase , UpperCamelCase = torch.split(lowercase_ , depth // 3 , dim=0 )
UpperCamelCase = q
UpperCamelCase = k
UpperCamelCase = v
del sd[key]
return sd
@torch.no_grad()
def __magic_name__ ( lowercase_ , lowercase_ , lowercase_=None ) -> str:
'''simple docstring'''
UpperCamelCase = load_checkpoint(lowercase_ )
if config is not None:
UpperCamelCase = OPTConfig.from_pretrained(lowercase_ )
else:
UpperCamelCase = OPTConfig()
UpperCamelCase = OPTModel(lowercase_ ).half().eval()
model.load_state_dict(lowercase_ )
# Check results
Path(lowercase_ ).mkdir(exist_ok=lowercase_ )
model.save_pretrained(lowercase_ )
if __name__ == "__main__":
__a : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--fairseq_path""",
type=str,
help=(
"""path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:"""
""" https://huggingface.co/models?other=opt_metasq"""
),
)
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--hf_config""", default=None, type=str, help="""Define HF config.""")
__a : Dict = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 414
| 0
|
import inspect
import unittest
from typing import List
import numpy as np
from transformers import EfficientFormerConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
)
from transformers.models.efficientformer.modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_vision_available():
from PIL import Image
from transformers import EfficientFormerImageProcessor
class __lowercase :
def __init__( self , lowercase_ , lowercase_ = 1_3 , lowercase_ = 6_4 , lowercase_ = 2 , lowercase_ = 3 , lowercase_ = 3 , lowercase_ = True , lowercase_ = True , lowercase_ = 1_2_8 , lowercase_=[1_6, 3_2, 6_4, 1_2_8] , lowercase_ = 7 , lowercase_ = 4 , lowercase_ = 3_7 , lowercase_ = "gelu" , lowercase_ = 0.1 , lowercase_ = 0.1 , lowercase_ = 1_0 , lowercase_ = 0.02 , lowercase_ = 2 , lowercase_ = 1 , lowercase_ = 1_2_8 , lowercase_ = [2, 2, 2, 2] , lowercase_ = 2 , lowercase_ = 2 , ) -> Dict:
__snake_case = parent
__snake_case = batch_size
__snake_case = image_size
__snake_case = patch_size
__snake_case = num_channels
__snake_case = is_training
__snake_case = use_labels
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = type_sequence_label_size
__snake_case = initializer_range
__snake_case = encoder_stride
__snake_case = num_attention_outputs
__snake_case = embed_dim
__snake_case = embed_dim + 1
__snake_case = resolution
__snake_case = depths
__snake_case = hidden_sizes
__snake_case = dim
__snake_case = mlp_expansion_ratio
def _a ( self) -> Tuple:
__snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
__snake_case = None
if self.use_labels:
__snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__snake_case = self.get_config()
return config, pixel_values, labels
def _a ( self) -> str:
return EfficientFormerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowercase_ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , )
def _a ( self , lowercase_ , lowercase_ , lowercase_) -> List[str]:
__snake_case = TFEfficientFormerModel(config=lowercase_)
__snake_case = model(lowercase_ , training=lowercase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def _a ( self , lowercase_ , lowercase_ , lowercase_) -> Tuple:
__snake_case = self.type_sequence_label_size
__snake_case = TFEfficientFormerForImageClassification(lowercase_)
__snake_case = model(lowercase_ , labels=lowercase_ , training=lowercase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
# test greyscale images
__snake_case = 1
__snake_case = TFEfficientFormerForImageClassification(lowercase_)
__snake_case = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
__snake_case = model(lowercase_ , labels=lowercase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def _a ( self) -> Optional[Any]:
__snake_case = self.prepare_config_and_inputs()
__snake_case = config_and_inputs
__snake_case = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class __lowercase ( _A , _A , unittest.TestCase ):
__UpperCAmelCase = (
(
TFEfficientFormerModel,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerForImageClassification,
)
if is_tf_available()
else ()
)
__UpperCAmelCase = (
{
"feature-extraction": TFEfficientFormerModel,
"image-classification": (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
),
}
if is_tf_available()
else {}
)
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
def _a ( self) -> Dict:
__snake_case = TFEfficientFormerModelTester(self)
__snake_case = ConfigTester(
self , config_class=lowercase_ , has_text_modality=lowercase_ , hidden_size=3_7)
def _a ( self) -> int:
self.config_tester.run_common_tests()
@unittest.skip(reason='EfficientFormer does not use inputs_embeds')
def _a ( self) -> int:
pass
@unittest.skip(reason='EfficientFormer does not support input and output embeddings')
def _a ( self) -> str:
pass
def _a ( self) -> List[Any]:
__snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = model_class(lowercase_)
__snake_case = inspect.signature(model.call)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case = [*signature.parameters.keys()]
__snake_case = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowercase_)
def _a ( self) -> Any:
def check_hidden_states_output(lowercase_ , lowercase_ , lowercase_):
__snake_case = model_class(lowercase_)
__snake_case = model(**self._prepare_for_class(lowercase_ , lowercase_) , training=lowercase_)
__snake_case = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__snake_case = getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1)
self.assertEqual(len(lowercase_) , lowercase_)
if hasattr(self.model_tester , 'encoder_seq_length'):
__snake_case = self.model_tester.encoder_seq_length
if hasattr(self.model_tester , 'chunk_length') and self.model_tester.chunk_length > 1:
__snake_case = seq_length * self.model_tester.chunk_length
else:
__snake_case = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[-1].shape[-2:]) , [seq_length, self.model_tester.hidden_size] , )
if config.is_encoder_decoder:
__snake_case = outputs.decoder_hidden_states
self.asseretIsInstance(lowercase_ , (list, tuple))
self.assertEqual(len(lowercase_) , lowercase_)
__snake_case = getattr(self.model_tester , 'seq_length' , lowercase_)
__snake_case = getattr(self.model_tester , 'decoder_seq_length' , lowercase_)
self.assertListEqual(
list(hidden_states[-1].shape[-2:]) , [decoder_seq_length, self.model_tester.hidden_size] , )
__snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__snake_case = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_)
def _a ( self , lowercase_ , lowercase_ , lowercase_=False) -> int:
__snake_case = super()._prepare_for_class(lowercase_ , lowercase_ , return_labels=lowercase_)
if return_labels:
if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def _a ( self) -> Union[str, Any]:
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_)
@unittest.skip(reason='EfficientFormer does not implement masked image modeling yet')
def _a ( self) -> Optional[int]:
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowercase_)
def _a ( self) -> Any:
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase_)
@slow
def _a ( self) -> str:
for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case = TFEfficientFormerModel.from_pretrained(lowercase_)
self.assertIsNotNone(lowercase_)
def _a ( self) -> Dict:
__snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = True
__snake_case = getattr(self.model_tester , 'seq_length' , lowercase_)
__snake_case = getattr(self.model_tester , 'encoder_seq_length' , lowercase_)
__snake_case = getattr(self.model_tester , 'key_length' , lowercase_)
__snake_case = getattr(self.model_tester , 'chunk_length' , lowercase_)
if chunk_length is not None and hasattr(self.model_tester , 'num_hashes'):
__snake_case = encoder_seq_length * self.model_tester.num_hashes
for model_class in self.all_model_classes:
__snake_case = True
__snake_case = False
__snake_case = True
__snake_case = model_class(lowercase_)
__snake_case = model(**self._prepare_for_class(lowercase_ , lowercase_) , training=lowercase_)
__snake_case = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(lowercase_) , self.model_tester.num_attention_outputs)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__snake_case = True
__snake_case = model_class(lowercase_)
__snake_case = model(**self._prepare_for_class(lowercase_ , lowercase_) , training=lowercase_)
__snake_case = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(lowercase_) , self.model_tester.num_attention_outputs)
if chunk_length is not None:
self.assertListEqual(
list(attentions[0].shape[-4:]) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , )
else:
self.assertListEqual(
list(attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , )
def _a ( self) -> Optional[int]:
# We use a simplified version of this test for EfficientFormer because it requires training=False
# and Keras refuses to let us force that during functional construction
__snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# Prepare our model
__snake_case = model_class(lowercase_)
# These are maximally general inputs for the model, with multiple None dimensions
# Hopefully this will catch any conditionals that fail for flexible shapes
__snake_case = {
key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=lowercase_)
for key, val in model.input_signature.items()
if key in model.dummy_inputs
}
__snake_case = model(lowercase_)
self.assertTrue(outputs_dict is not None)
def A ( ) -> Union[str, Any]:
'''simple docstring'''
__snake_case = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class __lowercase ( unittest.TestCase ):
@cached_property
def _a ( self) -> int:
return (
EfficientFormerImageProcessor.from_pretrained('snap-research/efficientformer-l1-300')
if is_vision_available()
else None
)
@slow
def _a ( self) -> List[Any]:
__snake_case = TFEfficientFormerForImageClassification.from_pretrained('snap-research/efficientformer-l1-300')
__snake_case = self.default_image_processor
__snake_case = prepare_img()
__snake_case = image_processor(images=lowercase_ , return_tensors='tf')
# forward pass
__snake_case = model(**lowercase_ , training=lowercase_)
# verify the logits
__snake_case = tf.TensorShape((1, 1_0_0_0))
self.assertEqual(outputs.logits.shape , lowercase_)
__snake_case = tf.constant([-0.0555, 0.4825, -0.0852])
self.assertTrue(np.allclose(outputs.logits[0, :3] , lowercase_ , atol=1e-4))
@slow
def _a ( self) -> List[str]:
__snake_case = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained(
'snap-research/efficientformer-l1-300')
__snake_case = self.default_image_processor
__snake_case = prepare_img()
__snake_case = image_processor(images=lowercase_ , return_tensors='tf')
# forward pass
__snake_case = model(**lowercase_ , training=lowercase_)
# verify the logits
__snake_case = tf.TensorShape((1, 1_0_0_0))
self.assertEqual(outputs.logits.shape , lowercase_)
__snake_case = tf.constant([-0.1312, 0.4353, -1.0499])
self.assertTrue(np.allclose(outputs.logits[0, :3] , lowercase_ , atol=1e-4))
| 313
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__A : str = {
'''configuration_vivit''': ['''VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''VivitConfig'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Optional[int] = ['''VivitImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Optional[Any] = [
'''VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''VivitModel''',
'''VivitPreTrainedModel''',
'''VivitForVideoClassification''',
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
__A : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 231
| 0
|
import unittest
from transformers import MobileBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertModel,
)
class _UpperCamelCase :
"""simple docstring"""
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=13 , lowerCAmelCase__=7 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=99 , lowerCAmelCase__=64 , lowerCAmelCase__=32 , lowerCAmelCase__=5 , lowerCAmelCase__=4 , lowerCAmelCase__=37 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=5_12 , lowerCAmelCase__=16 , lowerCAmelCase__=2 , lowerCAmelCase__=0.02 , lowerCAmelCase__=3 , lowerCAmelCase__=4 , lowerCAmelCase__=None , ) -> Optional[int]:
'''simple docstring'''
__lowercase = parent
__lowercase = batch_size
__lowercase = seq_length
__lowercase = is_training
__lowercase = use_input_mask
__lowercase = use_token_type_ids
__lowercase = use_labels
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = embedding_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = type_sequence_label_size
__lowercase = initializer_range
__lowercase = num_labels
__lowercase = num_choices
__lowercase = scope
def _SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase = None
if self.use_input_mask:
__lowercase = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase = None
if self.use_token_type_ids:
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowercase = None
__lowercase = None
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowercase = ids_tensor([self.batch_size] , self.num_choices )
__lowercase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
return MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase__ , initializer_range=self.initializer_range , )
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Dict:
'''simple docstring'''
__lowercase = MobileBertModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__lowercase = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ )
__lowercase = model(lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ )
__lowercase = model(lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Tuple:
'''simple docstring'''
__lowercase = MobileBertForMaskedLM(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__lowercase = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = MobileBertForNextSentencePrediction(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__lowercase = model(
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> str:
'''simple docstring'''
__lowercase = MobileBertForPreTraining(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__lowercase = model(
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ , next_sentence_label=lowerCAmelCase__ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> int:
'''simple docstring'''
__lowercase = MobileBertForQuestionAnswering(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__lowercase = model(
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , start_positions=lowerCAmelCase__ , end_positions=lowerCAmelCase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Dict:
'''simple docstring'''
__lowercase = self.num_labels
__lowercase = MobileBertForSequenceClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__lowercase = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Any:
'''simple docstring'''
__lowercase = self.num_labels
__lowercase = MobileBertForTokenClassification(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__lowercase = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Any:
'''simple docstring'''
__lowercase = self.num_choices
__lowercase = MobileBertForMultipleChoice(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__lowercase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase = model(
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = self.prepare_config_and_inputs()
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = config_and_inputs
__lowercase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class _UpperCamelCase ( _UpperCAmelCase ,_UpperCAmelCase ,unittest.TestCase ):
"""simple docstring"""
__a : Dict = (
(
MobileBertModel,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
)
if is_torch_available()
else ()
)
__a : Tuple = (
{
'''feature-extraction''': MobileBertModel,
'''fill-mask''': MobileBertForMaskedLM,
'''question-answering''': MobileBertForQuestionAnswering,
'''text-classification''': MobileBertForSequenceClassification,
'''token-classification''': MobileBertForTokenClassification,
'''zero-shot''': MobileBertForSequenceClassification,
}
if is_torch_available()
else {}
)
__a : Tuple = True
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=False ) -> List[Any]:
'''simple docstring'''
__lowercase = super()._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ , return_labels=lowerCAmelCase__ )
if return_labels:
if model_class in get_values(lowerCAmelCase__ ):
__lowercase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=lowerCAmelCase__ )
__lowercase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase__ )
return inputs_dict
def _SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
__lowercase = MobileBertModelTester(self )
__lowercase = ConfigTester(self , config_class=lowerCAmelCase__ , hidden_size=37 )
def _SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
'''simple docstring'''
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
'''simple docstring'''
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
'''simple docstring'''
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*lowerCAmelCase__ )
def UpperCAmelCase ( lowercase ):
return torch.tensor(
lowercase , dtype=torch.long , device=lowercase , )
__a : List[str] = 1e-3
@require_torch
@require_sentencepiece
@require_tokenizers
class _UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def _SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = MobileBertModel.from_pretrained('''google/mobilebert-uncased''' ).to(lowerCAmelCase__ )
__lowercase = _long_tensor([[1_01, 71_10, 10_05, 10_56, 20_23, 1_13_33, 1_74_13, 10_29, 1_02]] )
with torch.no_grad():
__lowercase = model(lowerCAmelCase__ )[0]
__lowercase = torch.Size((1, 9, 5_12) )
self.assertEqual(output.shape , lowerCAmelCase__ )
__lowercase = torch.tensor(
[
[
[-2.473_6526E07, 8.269_1656E04, 1.652_1838E05],
[-5.754_1704E-01, 3.905_6022E00, 4.401_1507E00],
[2.604_7359E00, 1.567_7652E00, -1.732_4188E-01],
]
] , device=lowerCAmelCase__ , )
# MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a
# ~1 difference, it's therefore not a good idea to measure using addition.
# Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the
# result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE
__lowercase = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE )
__lowercase = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE )
self.assertTrue(lower_bound and upper_bound )
| 705
|
from __future__ import annotations
def UpperCAmelCase ( lowercase , lowercase , lowercase , lowercase ): # noqa: E741
"""simple docstring"""
while r - l > 1:
__lowercase = (l + r) // 2
if v[m] >= key:
__lowercase = m
else:
__lowercase = m # noqa: E741
return r
def UpperCAmelCase ( lowercase ):
"""simple docstring"""
if len(lowercase ) == 0:
return 0
__lowercase = [0] * len(lowercase )
__lowercase = 1
__lowercase = v[0]
for i in range(1 , len(lowercase ) ):
if v[i] < tail[0]:
__lowercase = v[i]
elif v[i] > tail[length - 1]:
__lowercase = v[i]
length += 1
else:
__lowercase = v[i]
return length
if __name__ == "__main__":
import doctest
doctest.testmod()
| 522
| 0
|
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class lowerCAmelCase__ :
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Collection[float] | None = None ) -> None:
if components is None:
__lowerCamelCase = []
__lowerCamelCase = list(SCREAMING_SNAKE_CASE__ )
def __len__( self : Union[str, Any] ) -> int:
return len(self.__components )
def __str__( self : Dict ) -> str:
return "(" + ",".join(map(SCREAMING_SNAKE_CASE__ , self.__components ) ) + ")"
def __add__( self : str , SCREAMING_SNAKE_CASE__ : Vector ) -> Vector:
__lowerCamelCase = len(self )
if size == len(SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase = [self.__components[i] + other.component(SCREAMING_SNAKE_CASE__ ) for i in range(SCREAMING_SNAKE_CASE__ )]
return Vector(SCREAMING_SNAKE_CASE__ )
else:
raise Exception('''must have the same size''' )
def __sub__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Vector ) -> Vector:
__lowerCamelCase = len(self )
if size == len(SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase = [self.__components[i] - other.component(SCREAMING_SNAKE_CASE__ ) for i in range(SCREAMING_SNAKE_CASE__ )]
return Vector(SCREAMING_SNAKE_CASE__ )
else: # error case
raise Exception('''must have the same size''' )
@overload
def __mul__( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : float ) -> Vector:
...
@overload
def __mul__( self : int , SCREAMING_SNAKE_CASE__ : Vector ) -> float:
...
def __mul__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : float | Vector ) -> float | Vector:
if isinstance(SCREAMING_SNAKE_CASE__ , (float, int) ):
__lowerCamelCase = [c * other for c in self.__components]
return Vector(SCREAMING_SNAKE_CASE__ )
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and len(self ) == len(SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase = len(self )
__lowerCamelCase = [self.__components[i] * other.component(SCREAMING_SNAKE_CASE__ ) for i in range(SCREAMING_SNAKE_CASE__ )]
return sum(SCREAMING_SNAKE_CASE__ )
else: # error case
raise Exception('''invalid operand!''' )
def __A ( self : List[str] ) -> Vector:
return Vector(self.__components )
def __A ( self : str , SCREAMING_SNAKE_CASE__ : int ) -> float:
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception('''index out of range''' )
def __A ( self : Tuple , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : float ) -> None:
assert -len(self.__components ) <= pos < len(self.__components )
__lowerCamelCase = value
def __A ( self : Optional[Any] ) -> float:
if len(self.__components ) == 0:
raise Exception('''Vector is empty''' )
__lowerCamelCase = [c**2 for c in self.__components]
return math.sqrt(sum(SCREAMING_SNAKE_CASE__ ) )
def __A ( self : List[str] , SCREAMING_SNAKE_CASE__ : Vector , SCREAMING_SNAKE_CASE__ : bool = False ) -> float:
__lowerCamelCase = self * other
__lowerCamelCase = self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def __magic_name__ ( __lowerCAmelCase : int ) -> Vector:
assert isinstance(__lowerCAmelCase , __lowerCAmelCase )
return Vector([0] * dimension )
def __magic_name__ ( __lowerCAmelCase : int , __lowerCAmelCase : int ) -> Vector:
assert isinstance(__lowerCAmelCase , __lowerCAmelCase ) and (isinstance(__lowerCAmelCase , __lowerCAmelCase ))
__lowerCamelCase = [0] * dimension
__lowerCamelCase = 1
return Vector(__lowerCAmelCase )
def __magic_name__ ( __lowerCAmelCase : float , __lowerCAmelCase : Vector , __lowerCAmelCase : Vector ) -> Vector:
assert (
isinstance(__lowerCAmelCase , __lowerCAmelCase )
and isinstance(__lowerCAmelCase , __lowerCAmelCase )
and (isinstance(__lowerCAmelCase , (int, float) ))
)
return x * scalar + y
def __magic_name__ ( __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : int ) -> Vector:
random.seed(__lowerCAmelCase )
__lowerCamelCase = [random.randint(__lowerCAmelCase , __lowerCAmelCase ) for _ in range(__lowerCAmelCase )]
return Vector(__lowerCAmelCase )
class lowerCAmelCase__ :
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : list[list[float]] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ) -> None:
__lowerCamelCase = matrix
__lowerCamelCase = w
__lowerCamelCase = h
def __str__( self : Tuple ) -> str:
__lowerCamelCase = ''''''
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__( self : Tuple , SCREAMING_SNAKE_CASE__ : Matrix ) -> Matrix:
if self.__width == other.width() and self.__height == other.height():
__lowerCamelCase = []
for i in range(self.__height ):
__lowerCamelCase = [
self.__matrix[i][j] + other.component(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
for j in range(self.__width )
]
matrix.append(SCREAMING_SNAKE_CASE__ )
return Matrix(SCREAMING_SNAKE_CASE__ , self.__width , self.__height )
else:
raise Exception('''matrix must have the same dimension!''' )
def __sub__( self : Dict , SCREAMING_SNAKE_CASE__ : Matrix ) -> Matrix:
if self.__width == other.width() and self.__height == other.height():
__lowerCamelCase = []
for i in range(self.__height ):
__lowerCamelCase = [
self.__matrix[i][j] - other.component(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
for j in range(self.__width )
]
matrix.append(SCREAMING_SNAKE_CASE__ )
return Matrix(SCREAMING_SNAKE_CASE__ , self.__width , self.__height )
else:
raise Exception('''matrices must have the same dimension!''' )
@overload
def __mul__( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : float ) -> Matrix:
...
@overload
def __mul__( self : int , SCREAMING_SNAKE_CASE__ : Vector ) -> Vector:
...
def __mul__( self : List[str] , SCREAMING_SNAKE_CASE__ : float | Vector ) -> Vector | Matrix:
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): # matrix-vector
if len(SCREAMING_SNAKE_CASE__ ) == self.__width:
__lowerCamelCase = zero_vector(self.__height )
for i in range(self.__height ):
__lowerCamelCase = [
self.__matrix[i][j] * other.component(SCREAMING_SNAKE_CASE__ )
for j in range(self.__width )
]
ans.change_component(SCREAMING_SNAKE_CASE__ , sum(SCREAMING_SNAKE_CASE__ ) )
return ans
else:
raise Exception(
'''vector must have the same size as the '''
'''number of columns of the matrix!''' )
elif isinstance(SCREAMING_SNAKE_CASE__ , (int, float) ): # matrix-scalar
__lowerCamelCase = [
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(SCREAMING_SNAKE_CASE__ , self.__width , self.__height )
return None
def __A ( self : Union[str, Any] ) -> int:
return self.__height
def __A ( self : List[str] ) -> int:
return self.__width
def __A ( self : List[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ) -> float:
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception('''change_component: indices out of bounds''' )
def __A ( self : Tuple , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : float ) -> None:
if 0 <= x < self.__height and 0 <= y < self.__width:
__lowerCamelCase = value
else:
raise Exception('''change_component: indices out of bounds''' )
def __A ( self : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ) -> float:
if self.__height != self.__width:
raise Exception('''Matrix is not square''' )
__lowerCamelCase = self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
__lowerCamelCase = minor[i][:y] + minor[i][y + 1 :]
return Matrix(SCREAMING_SNAKE_CASE__ , self.__width - 1 , self.__height - 1 ).determinant()
def __A ( self : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ) -> float:
if self.__height != self.__width:
raise Exception('''Matrix is not square''' )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
else:
raise Exception('''Indices out of bounds''' )
def __A ( self : List[str] ) -> float:
if self.__height != self.__width:
raise Exception('''Matrix is not square''' )
if self.__height < 1:
raise Exception('''Matrix has no element''' )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
__lowerCamelCase = [
self.__matrix[0][y] * self.cofactor(0 , SCREAMING_SNAKE_CASE__ ) for y in range(self.__width )
]
return sum(SCREAMING_SNAKE_CASE__ )
def __magic_name__ ( __lowerCAmelCase : int ) -> Matrix:
__lowerCamelCase = [[0] * n for _ in range(__lowerCAmelCase )]
return Matrix(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def __magic_name__ ( __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : int ) -> Matrix:
random.seed(__lowerCAmelCase )
__lowerCamelCase = [
[random.randint(__lowerCAmelCase , __lowerCAmelCase ) for _ in range(__lowerCAmelCase )] for _ in range(__lowerCAmelCase )
]
return Matrix(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
| 298
|
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING
SCREAMING_SNAKE_CASE__ : str = logging.get_logger(__name__)
@add_end_docstrings(__lowercase )
class lowerCAmelCase__ ( __lowercase ):
def __init__( self : Optional[Any] , **SCREAMING_SNAKE_CASE__ : Tuple ) -> Union[str, Any]:
super().__init__(**SCREAMING_SNAKE_CASE__ )
requires_backends(self , '''vision''' )
requires_backends(self , '''torch''' )
if self.framework != "pt":
raise ValueError(f'''The {self.__class__} is only available in PyTorch.''' )
self.check_model_type(SCREAMING_SNAKE_CASE__ )
def __A ( self : str , **SCREAMING_SNAKE_CASE__ : str ) -> Any:
__lowerCamelCase = {}
__lowerCamelCase = {}
__lowerCamelCase = {}
# preprocess args
if "points_per_batch" in kwargs:
__lowerCamelCase = kwargs['''points_per_batch''']
if "points_per_crop" in kwargs:
__lowerCamelCase = kwargs['''points_per_crop''']
if "crops_n_layers" in kwargs:
__lowerCamelCase = kwargs['''crops_n_layers''']
if "crop_overlap_ratio" in kwargs:
__lowerCamelCase = kwargs['''crop_overlap_ratio''']
if "crop_n_points_downscale_factor" in kwargs:
__lowerCamelCase = kwargs['''crop_n_points_downscale_factor''']
# postprocess args
if "pred_iou_thresh" in kwargs:
__lowerCamelCase = kwargs['''pred_iou_thresh''']
if "stability_score_offset" in kwargs:
__lowerCamelCase = kwargs['''stability_score_offset''']
if "mask_threshold" in kwargs:
__lowerCamelCase = kwargs['''mask_threshold''']
if "stability_score_thresh" in kwargs:
__lowerCamelCase = kwargs['''stability_score_thresh''']
if "crops_nms_thresh" in kwargs:
__lowerCamelCase = kwargs['''crops_nms_thresh''']
if "output_rle_mask" in kwargs:
__lowerCamelCase = kwargs['''output_rle_mask''']
if "output_bboxes_mask" in kwargs:
__lowerCamelCase = kwargs['''output_bboxes_mask''']
return preprocess_kwargs, forward_params, postprocess_kwargs
def __call__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str] , *SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Any=None , SCREAMING_SNAKE_CASE__ : List[Any]=None , **SCREAMING_SNAKE_CASE__ : int ) -> Union[str, Any]:
return super().__call__(SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , num_workers=SCREAMING_SNAKE_CASE__ , batch_size=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __A ( self : Any , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any]=64 , SCREAMING_SNAKE_CASE__ : int = 0 , SCREAMING_SNAKE_CASE__ : float = 5_12 / 15_00 , SCREAMING_SNAKE_CASE__ : Optional[int] = 32 , SCREAMING_SNAKE_CASE__ : Optional[int] = 1 , ) -> List[str]:
__lowerCamelCase = load_image(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = self.image_processor.size['''longest_edge''']
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = self.image_processor.generate_crop_boxes(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = self.image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' )
with self.device_placement():
if self.framework == "pt":
__lowerCamelCase = self.get_inference_context()
with inference_context():
__lowerCamelCase = self._ensure_tensor_on_device(SCREAMING_SNAKE_CASE__ , device=self.device )
__lowerCamelCase = self.model.get_image_embeddings(model_inputs.pop('''pixel_values''' ) )
__lowerCamelCase = image_embeddings
__lowerCamelCase = grid_points.shape[1]
__lowerCamelCase = points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
'''Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. '''
'''To return all points at once, set points_per_batch to None''' )
for i in range(0 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase = grid_points[:, i : i + points_per_batch, :, :]
__lowerCamelCase = input_labels[:, i : i + points_per_batch]
__lowerCamelCase = i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def __A ( self : Tuple , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[int]=0.88 , SCREAMING_SNAKE_CASE__ : Tuple=0.95 , SCREAMING_SNAKE_CASE__ : Dict=0 , SCREAMING_SNAKE_CASE__ : Tuple=1 , ) -> Dict:
__lowerCamelCase = model_inputs.pop('''input_boxes''' )
__lowerCamelCase = model_inputs.pop('''is_last''' )
__lowerCamelCase = model_inputs.pop('''original_sizes''' ).tolist()
__lowerCamelCase = model_inputs.pop('''reshaped_input_sizes''' ).tolist()
__lowerCamelCase = self.model(**SCREAMING_SNAKE_CASE__ )
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
__lowerCamelCase = model_outputs['''pred_masks''']
__lowerCamelCase = self.image_processor.post_process_masks(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , binarize=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = model_outputs['''iou_scores''']
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = self.image_processor.filter_masks(
masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , )
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def __A ( self : str , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any]=False , SCREAMING_SNAKE_CASE__ : Optional[Any]=False , SCREAMING_SNAKE_CASE__ : Optional[int]=0.7 , ) -> Union[str, Any]:
__lowerCamelCase = []
__lowerCamelCase = []
__lowerCamelCase = []
for model_output in model_outputs:
all_scores.append(model_output.pop('''iou_scores''' ) )
all_masks.extend(model_output.pop('''masks''' ) )
all_boxes.append(model_output.pop('''boxes''' ) )
__lowerCamelCase = torch.cat(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = torch.cat(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = self.image_processor.post_process_for_mask_generation(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = defaultdict(SCREAMING_SNAKE_CASE__ )
for output in model_outputs:
for k, v in output.items():
extra[k].append(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = {}
if output_rle_mask:
__lowerCamelCase = rle_mask
if output_bboxes_mask:
__lowerCamelCase = bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
| 298
| 1
|
"""simple docstring"""
import inspect
import unittest
from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel
from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class a :
def __init__( self , UpperCamelCase_ , UpperCamelCase_=13 , UpperCamelCase_=32 , UpperCamelCase_=3 , UpperCamelCase_=4 , UpperCamelCase_=[10, 20, 30, 40] , UpperCamelCase_=[2, 2, 3, 2] , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=37 , UpperCamelCase_="gelu" , UpperCamelCase_=10 , UpperCamelCase_=0.02 , UpperCamelCase_=["stage2", "stage3", "stage4"] , UpperCamelCase_=[2, 3, 4] , UpperCamelCase_=None , ):
UpperCAmelCase__ : Tuple = parent
UpperCAmelCase__ : List[str] = batch_size
UpperCAmelCase__ : Tuple = image_size
UpperCAmelCase__ : List[Any] = num_channels
UpperCAmelCase__ : List[str] = num_stages
UpperCAmelCase__ : Optional[int] = hidden_sizes
UpperCAmelCase__ : int = depths
UpperCAmelCase__ : List[str] = is_training
UpperCAmelCase__ : Optional[int] = use_labels
UpperCAmelCase__ : Union[str, Any] = intermediate_size
UpperCAmelCase__ : List[str] = hidden_act
UpperCAmelCase__ : int = num_labels
UpperCAmelCase__ : int = initializer_range
UpperCAmelCase__ : Optional[Any] = out_features
UpperCAmelCase__ : Tuple = out_indices
UpperCAmelCase__ : Dict = scope
def __snake_case ( self ):
UpperCAmelCase__ : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase__ : Tuple = None
if self.use_labels:
UpperCAmelCase__ : Union[str, Any] = ids_tensor([self.batch_size] , self.num_labels )
UpperCAmelCase__ : List[str] = self.get_config()
return config, pixel_values, labels
def __snake_case ( self ):
return ConvNextConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=UpperCamelCase_ , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
UpperCAmelCase__ : Optional[Any] = ConvNextModel(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
UpperCAmelCase__ : int = model(UpperCamelCase_ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
UpperCAmelCase__ : str = ConvNextForImageClassification(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
UpperCAmelCase__ : Tuple = model(UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
UpperCAmelCase__ : List[str] = ConvNextBackbone(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
UpperCAmelCase__ : Optional[int] = model(UpperCamelCase_ )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
UpperCAmelCase__ : List[Any] = None
UpperCAmelCase__ : Dict = ConvNextBackbone(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
UpperCAmelCase__ : Optional[Any] = model(UpperCamelCase_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def __snake_case ( self ):
UpperCAmelCase__ : Optional[int] = self.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : int = config_and_inputs
UpperCAmelCase__ : List[str] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class a ( lowercase , lowercase , unittest.TestCase ):
UpperCamelCase : Optional[Any] = (
(
ConvNextModel,
ConvNextForImageClassification,
ConvNextBackbone,
)
if is_torch_available()
else ()
)
UpperCamelCase : Optional[int] = (
{"""feature-extraction""": ConvNextModel, """image-classification""": ConvNextForImageClassification}
if is_torch_available()
else {}
)
UpperCamelCase : str = True
UpperCamelCase : Union[str, Any] = False
UpperCamelCase : Any = False
UpperCamelCase : Union[str, Any] = False
UpperCamelCase : Optional[Any] = False
def __snake_case ( self ):
UpperCAmelCase__ : str = ConvNextModelTester(self )
UpperCAmelCase__ : Any = ConfigTester(self , config_class=UpperCamelCase_ , has_text_modality=UpperCamelCase_ , hidden_size=37 )
def __snake_case ( self ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __snake_case ( self ):
return
@unittest.skip(reason='ConvNext does not use inputs_embeds' )
def __snake_case ( self ):
pass
@unittest.skip(reason='ConvNext does not support input and output embeddings' )
def __snake_case ( self ):
pass
@unittest.skip(reason='ConvNext does not use feedforward chunking' )
def __snake_case ( self ):
pass
def __snake_case ( self ):
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : str = model_class(UpperCamelCase_ )
UpperCAmelCase__ : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase__ : List[str] = [*signature.parameters.keys()]
UpperCAmelCase__ : Union[str, Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] , UpperCamelCase_ )
def __snake_case ( self ):
UpperCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase_ )
def __snake_case ( self ):
UpperCAmelCase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*UpperCamelCase_ )
def __snake_case ( self ):
def check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
UpperCAmelCase__ : List[Any] = model_class(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
with torch.no_grad():
UpperCAmelCase__ : Optional[Any] = model(**self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ ) )
UpperCAmelCase__ : Union[str, Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCAmelCase__ : Optional[Any] = self.model_tester.num_stages
self.assertEqual(len(UpperCamelCase_ ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : List[Any] = True
check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase__ : str = True
check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def __snake_case ( self ):
UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase_ )
@slow
def __snake_case ( self ):
for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ : List[Any] = ConvNextModel.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
def lowerCamelCase ( ):
UpperCAmelCase__ : int = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class a ( unittest.TestCase ):
@cached_property
def __snake_case ( self ):
return AutoImageProcessor.from_pretrained('facebook/convnext-tiny-224' ) if is_vision_available() else None
@slow
def __snake_case ( self ):
UpperCAmelCase__ : Optional[Any] = ConvNextForImageClassification.from_pretrained('facebook/convnext-tiny-224' ).to(UpperCamelCase_ )
UpperCAmelCase__ : str = self.default_image_processor
UpperCAmelCase__ : List[Any] = prepare_img()
UpperCAmelCase__ : str = image_processor(images=UpperCamelCase_ , return_tensors='pt' ).to(UpperCamelCase_ )
# forward pass
with torch.no_grad():
UpperCAmelCase__ : Optional[Any] = model(**UpperCamelCase_ )
# verify the logits
UpperCAmelCase__ : Any = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , UpperCamelCase_ )
UpperCAmelCase__ : Optional[int] = torch.tensor([-0.0260, -0.4739, 0.1911] ).to(UpperCamelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase_ , atol=1E-4 ) )
@require_torch
class a ( unittest.TestCase , lowercase ):
UpperCamelCase : str = (ConvNextBackbone,) if is_torch_available() else ()
UpperCamelCase : List[str] = ConvNextConfig
UpperCamelCase : Tuple = False
def __snake_case ( self ):
UpperCAmelCase__ : List[str] = ConvNextModelTester(self )
| 254
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel
@require_tf
class a :
UpperCamelCase : str = BlenderbotConfig
UpperCamelCase : int = {}
UpperCamelCase : Tuple = """gelu"""
def __init__( self , UpperCamelCase_ , UpperCamelCase_=13 , UpperCamelCase_=7 , UpperCamelCase_=True , UpperCamelCase_=False , UpperCamelCase_=99 , UpperCamelCase_=32 , UpperCamelCase_=2 , UpperCamelCase_=4 , UpperCamelCase_=37 , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=20 , UpperCamelCase_=2 , UpperCamelCase_=1 , UpperCamelCase_=0 , ):
UpperCAmelCase__ : int = parent
UpperCAmelCase__ : Dict = batch_size
UpperCAmelCase__ : Optional[Any] = seq_length
UpperCAmelCase__ : Any = is_training
UpperCAmelCase__ : Optional[int] = use_labels
UpperCAmelCase__ : Optional[Any] = vocab_size
UpperCAmelCase__ : List[Any] = hidden_size
UpperCAmelCase__ : List[str] = num_hidden_layers
UpperCAmelCase__ : int = num_attention_heads
UpperCAmelCase__ : Union[str, Any] = intermediate_size
UpperCAmelCase__ : int = hidden_dropout_prob
UpperCAmelCase__ : int = attention_probs_dropout_prob
UpperCAmelCase__ : Tuple = max_position_embeddings
UpperCAmelCase__ : Dict = eos_token_id
UpperCAmelCase__ : int = pad_token_id
UpperCAmelCase__ : Union[str, Any] = bos_token_id
def __snake_case ( self ):
UpperCAmelCase__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
UpperCAmelCase__ : Dict = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
UpperCAmelCase__ : Dict = tf.concat([input_ids, eos_tensor] , axis=1 )
UpperCAmelCase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase__ : Optional[Any] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
UpperCAmelCase__ : List[Any] = prepare_blenderbot_inputs_dict(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
return config, inputs_dict
def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_ ):
UpperCAmelCase__ : Tuple = TFBlenderbotModel(config=UpperCamelCase_ ).get_decoder()
UpperCAmelCase__ : Any = inputs_dict['input_ids']
UpperCAmelCase__ : Optional[int] = input_ids[:1, :]
UpperCAmelCase__ : str = inputs_dict['attention_mask'][:1, :]
UpperCAmelCase__ : str = inputs_dict['head_mask']
UpperCAmelCase__ : List[Any] = 1
# first forward pass
UpperCAmelCase__ : Optional[Any] = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , head_mask=UpperCamelCase_ , use_cache=UpperCamelCase_ )
UpperCAmelCase__ , UpperCAmelCase__ : int = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
UpperCAmelCase__ : List[str] = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCAmelCase__ : Union[str, Any] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
UpperCAmelCase__ : Optional[Any] = tf.concat([input_ids, next_tokens] , axis=-1 )
UpperCAmelCase__ : Any = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
UpperCAmelCase__ : Tuple = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ )[0]
UpperCAmelCase__ : Optional[Any] = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , past_key_values=UpperCamelCase_ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
UpperCAmelCase__ : Tuple = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
UpperCAmelCase__ : Tuple = output_from_no_past[:, -3:, random_slice_idx]
UpperCAmelCase__ : Optional[Any] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(UpperCamelCase_ , UpperCamelCase_ , rtol=1E-3 )
def lowerCamelCase ( _snake_case ,_snake_case ,_snake_case ,_snake_case=None ,_snake_case=None ,_snake_case=None ,_snake_case=None ,_snake_case=None ,):
if attention_mask is None:
UpperCAmelCase__ : Tuple = tf.cast(tf.math.not_equal(_snake_case ,config.pad_token_id ) ,tf.inta )
if decoder_attention_mask is None:
UpperCAmelCase__ : List[str] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape ,dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] ,config.pad_token_id ) ,tf.inta ),
] ,axis=-1 ,)
if head_mask is None:
UpperCAmelCase__ : List[str] = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCAmelCase__ : List[Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
UpperCAmelCase__ : Tuple = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class a ( lowercase , lowercase , unittest.TestCase ):
UpperCamelCase : int = (TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else ()
UpperCamelCase : Dict = (TFBlenderbotForConditionalGeneration,) if is_tf_available() else ()
UpperCamelCase : Union[str, Any] = (
{
"""conversational""": TFBlenderbotForConditionalGeneration,
"""feature-extraction""": TFBlenderbotModel,
"""summarization""": TFBlenderbotForConditionalGeneration,
"""text2text-generation""": TFBlenderbotForConditionalGeneration,
"""translation""": TFBlenderbotForConditionalGeneration,
}
if is_tf_available()
else {}
)
UpperCamelCase : Optional[int] = True
UpperCamelCase : List[Any] = False
UpperCamelCase : Union[str, Any] = False
def __snake_case ( self ):
UpperCAmelCase__ : List[Any] = TFBlenderbotModelTester(self )
UpperCAmelCase__ : Tuple = ConfigTester(self , config_class=UpperCamelCase_ )
def __snake_case ( self ):
self.config_tester.run_common_tests()
def __snake_case ( self ):
UpperCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*UpperCamelCase_ )
@require_tokenizers
@require_tf
class a ( unittest.TestCase ):
UpperCamelCase : List[Any] = ["""My friends are cool but they eat too many carbs."""]
UpperCamelCase : List[str] = """facebook/blenderbot-400M-distill"""
@cached_property
def __snake_case ( self ):
return BlenderbotTokenizer.from_pretrained(self.model_name )
@cached_property
def __snake_case ( self ):
UpperCAmelCase__ : Optional[Any] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def __snake_case ( self ):
UpperCAmelCase__ : int = self.tokenizer(self.src_text , return_tensors='tf' )
UpperCAmelCase__ : int = self.model.generate(
model_inputs.input_ids , )
UpperCAmelCase__ : str = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=UpperCamelCase_ )[0]
assert (
generated_words
== " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?"
)
| 254
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
snake_case = {
'''configuration_poolformer''': [
'''POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''PoolFormerConfig''',
'''PoolFormerOnnxConfig''',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = ['''PoolFormerFeatureExtractor''']
snake_case = ['''PoolFormerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
'''POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PoolFormerForImageClassification''',
'''PoolFormerModel''',
'''PoolFormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 309
|
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class SCREAMING_SNAKE_CASE ( __a ):
"""simple docstring"""
__A = ["image_processor", "tokenizer"]
__A = "FlavaImageProcessor"
__A = ("BertTokenizer", "BertTokenizerFast")
def __init__( self : Dict , __lowerCAmelCase : int=None , __lowerCAmelCase : Optional[Any]=None , **__lowerCAmelCase : int ):
"""simple docstring"""
_lowerCAmelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , __lowerCAmelCase , )
_lowerCAmelCase = kwargs.pop('feature_extractor' )
_lowerCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(__lowerCAmelCase , __lowerCAmelCase )
_lowerCAmelCase = self.image_processor
def __call__( self : Union[str, Any] , __lowerCAmelCase : Optional[ImageInput] = None , __lowerCAmelCase : Optional[Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]]] = None , __lowerCAmelCase : bool = True , __lowerCAmelCase : Union[bool, str, PaddingStrategy] = False , __lowerCAmelCase : Union[bool, str, TruncationStrategy] = False , __lowerCAmelCase : Optional[int] = None , __lowerCAmelCase : int = 0 , __lowerCAmelCase : Optional[int] = None , __lowerCAmelCase : Optional[bool] = None , __lowerCAmelCase : Optional[bool] = None , __lowerCAmelCase : Optional[bool] = None , __lowerCAmelCase : Optional[bool] = None , __lowerCAmelCase : bool = False , __lowerCAmelCase : bool = False , __lowerCAmelCase : bool = False , __lowerCAmelCase : bool = False , __lowerCAmelCase : bool = True , __lowerCAmelCase : Optional[Union[str, TensorType]] = None , **__lowerCAmelCase : Union[str, Any] , ):
"""simple docstring"""
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
_lowerCAmelCase = self.tokenizer(
text=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=__lowerCAmelCase , stride=__lowerCAmelCase , pad_to_multiple_of=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , return_overflowing_tokens=__lowerCAmelCase , return_special_tokens_mask=__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , return_length=__lowerCAmelCase , verbose=__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase , )
if images is not None:
_lowerCAmelCase = self.image_processor(
__lowerCAmelCase , return_image_mask=__lowerCAmelCase , return_codebook_pixels=__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase , )
if text is not None and images is not None:
encoding.update(__lowerCAmelCase )
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__lowerCAmelCase ) , tensor_type=__lowerCAmelCase )
def a ( self : Any , *__lowerCAmelCase : str , **__lowerCAmelCase : List[Any] ):
"""simple docstring"""
return self.tokenizer.batch_decode(*__lowerCAmelCase , **__lowerCAmelCase )
def a ( self : List[str] , *__lowerCAmelCase : List[str] , **__lowerCAmelCase : Optional[int] ):
"""simple docstring"""
return self.tokenizer.decode(*__lowerCAmelCase , **__lowerCAmelCase )
@property
def a ( self : List[str] ):
"""simple docstring"""
_lowerCAmelCase = self.tokenizer.model_input_names
_lowerCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def a ( self : Optional[int] ):
"""simple docstring"""
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , __lowerCAmelCase , )
return self.image_processor_class
@property
def a ( self : Any ):
"""simple docstring"""
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , __lowerCAmelCase , )
return self.image_processor
| 309
| 1
|
"""simple docstring"""
import math
def lowerCamelCase ( _UpperCamelCase : int = 1_0_0 ) -> int:
'''simple docstring'''
__UpperCAmelCase : List[Any] = sum(i * i for i in range(1 , n + 1 ) )
__UpperCAmelCase : Tuple = int(math.pow(sum(range(1 , n + 1 ) ) , 2 ) )
return square_of_sum - sum_of_squares
if __name__ == "__main__":
print(F"{solution() = }")
| 299
|
"""simple docstring"""
import inspect
import unittest
from transformers import YolosConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import YolosForObjectDetection, YolosModel
from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCamelCase__ :
"""simple docstring"""
def __init__( self : str , UpperCamelCase : Optional[int] , UpperCamelCase : Optional[int]=13 , UpperCamelCase : Optional[Any]=[30, 30] , UpperCamelCase : Dict=2 , UpperCamelCase : Dict=3 , UpperCamelCase : Optional[Any]=True , UpperCamelCase : Dict=True , UpperCamelCase : Tuple=32 , UpperCamelCase : List[Any]=5 , UpperCamelCase : List[Any]=4 , UpperCamelCase : Any=37 , UpperCamelCase : Optional[int]="gelu" , UpperCamelCase : str=0.1 , UpperCamelCase : List[Any]=0.1 , UpperCamelCase : Optional[int]=10 , UpperCamelCase : Optional[int]=0.02 , UpperCamelCase : List[Any]=3 , UpperCamelCase : Optional[int]=None , UpperCamelCase : List[str]=8 , UpperCamelCase : Any=10 , ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = parent
__UpperCAmelCase : List[Any] = batch_size
__UpperCAmelCase : int = image_size
__UpperCAmelCase : Optional[Any] = patch_size
__UpperCAmelCase : Union[str, Any] = num_channels
__UpperCAmelCase : List[str] = is_training
__UpperCAmelCase : int = use_labels
__UpperCAmelCase : Optional[int] = hidden_size
__UpperCAmelCase : Optional[Any] = num_hidden_layers
__UpperCAmelCase : Tuple = num_attention_heads
__UpperCAmelCase : Optional[int] = intermediate_size
__UpperCAmelCase : int = hidden_act
__UpperCAmelCase : Dict = hidden_dropout_prob
__UpperCAmelCase : str = attention_probs_dropout_prob
__UpperCAmelCase : Dict = type_sequence_label_size
__UpperCAmelCase : int = initializer_range
__UpperCAmelCase : str = num_labels
__UpperCAmelCase : Dict = scope
__UpperCAmelCase : str = n_targets
__UpperCAmelCase : Optional[int] = num_detection_tokens
# we set the expected sequence length (which is used in several tests)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens
__UpperCAmelCase : Optional[int] = (image_size[1] // patch_size) * (image_size[0] // patch_size)
__UpperCAmelCase : List[Any] = num_patches + 1 + self.num_detection_tokens
def lowerCamelCase__ ( self : List[Any] ):
'''simple docstring'''
__UpperCAmelCase : int = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]] )
__UpperCAmelCase : Optional[Any] = None
if self.use_labels:
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
__UpperCAmelCase : Tuple = []
for i in range(self.batch_size ):
__UpperCAmelCase : Optional[int] = {}
__UpperCAmelCase : Optional[int] = torch.randint(
high=self.num_labels , size=(self.n_targets,) , device=UpperCamelCase )
__UpperCAmelCase : Tuple = torch.rand(self.n_targets , 4 , device=UpperCamelCase )
labels.append(UpperCamelCase )
__UpperCAmelCase : str = self.get_config()
return config, pixel_values, labels
def lowerCamelCase__ ( self : List[str] ):
'''simple docstring'''
return YolosConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , )
def lowerCamelCase__ ( self : List[str] , UpperCamelCase : Optional[int] , UpperCamelCase : Tuple , UpperCamelCase : Optional[Any] ):
'''simple docstring'''
__UpperCAmelCase : str = YolosModel(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
__UpperCAmelCase : str = model(UpperCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size) )
def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase : List[Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : Tuple ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = YolosForObjectDetection(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
__UpperCAmelCase : str = model(pixel_values=UpperCamelCase )
__UpperCAmelCase : str = model(UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
__UpperCAmelCase : Any = model(pixel_values=UpperCamelCase , labels=UpperCamelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = self.prepare_config_and_inputs()
__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase : Union[str, Any] = config_and_inputs
__UpperCAmelCase : Union[str, Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase__ ( A , A , unittest.TestCase ):
"""simple docstring"""
__a = (YolosModel, YolosForObjectDetection) if is_torch_available() else ()
__a = (
{"""feature-extraction""": YolosModel, """object-detection""": YolosForObjectDetection} if is_torch_available() else {}
)
__a = False
__a = False
__a = False
__a = False
def lowerCamelCase__ ( self : Dict , UpperCamelCase : Optional[Any] , UpperCamelCase : int , UpperCamelCase : int=False ):
'''simple docstring'''
__UpperCAmelCase : int = super()._prepare_for_class(UpperCamelCase , UpperCamelCase , return_labels=UpperCamelCase )
if return_labels:
if model_class.__name__ == "YolosForObjectDetection":
__UpperCAmelCase : Optional[Any] = []
for i in range(self.model_tester.batch_size ):
__UpperCAmelCase : List[Any] = {}
__UpperCAmelCase : Optional[int] = torch.ones(
size=(self.model_tester.n_targets,) , device=UpperCamelCase , dtype=torch.long )
__UpperCAmelCase : Union[str, Any] = torch.ones(
self.model_tester.n_targets , 4 , device=UpperCamelCase , dtype=torch.float )
labels.append(UpperCamelCase )
__UpperCAmelCase : List[Any] = labels
return inputs_dict
def lowerCamelCase__ ( self : str ):
'''simple docstring'''
__UpperCAmelCase : List[str] = YolosModelTester(self )
__UpperCAmelCase : Tuple = ConfigTester(self , config_class=UpperCamelCase , has_text_modality=UpperCamelCase , hidden_size=37 )
def lowerCamelCase__ ( self : List[Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase__ ( self : Optional[int] ):
'''simple docstring'''
pass
def lowerCamelCase__ ( self : Optional[Any] ):
'''simple docstring'''
__UpperCAmelCase ,__UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : Dict = model_class(UpperCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__UpperCAmelCase : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase , nn.Linear ) )
def lowerCamelCase__ ( self : Optional[Any] ):
'''simple docstring'''
__UpperCAmelCase ,__UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : List[Any] = model_class(UpperCamelCase )
__UpperCAmelCase : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCAmelCase : int = [*signature.parameters.keys()]
__UpperCAmelCase : Tuple = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCamelCase )
def lowerCamelCase__ ( self : str ):
'''simple docstring'''
__UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase )
def lowerCamelCase__ ( self : Optional[int] ):
'''simple docstring'''
__UpperCAmelCase ,__UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase : Any = True
# in YOLOS, the seq_len is different
__UpperCAmelCase : List[str] = self.model_tester.expected_seq_len
for model_class in self.all_model_classes:
__UpperCAmelCase : Union[str, Any] = True
__UpperCAmelCase : Any = False
__UpperCAmelCase : Any = True
__UpperCAmelCase : List[str] = model_class(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
with torch.no_grad():
__UpperCAmelCase : List[str] = model(**self._prepare_for_class(UpperCamelCase , UpperCamelCase ) )
__UpperCAmelCase : str = outputs.attentions
self.assertEqual(len(UpperCamelCase ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__UpperCAmelCase : Dict = True
__UpperCAmelCase : str = model_class(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
with torch.no_grad():
__UpperCAmelCase : List[Any] = model(**self._prepare_for_class(UpperCamelCase , UpperCamelCase ) )
__UpperCAmelCase : Union[str, Any] = outputs.attentions
self.assertEqual(len(UpperCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
__UpperCAmelCase : Optional[int] = len(UpperCamelCase )
# Check attention is always last and order is fine
__UpperCAmelCase : Any = True
__UpperCAmelCase : Dict = True
__UpperCAmelCase : str = model_class(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
with torch.no_grad():
__UpperCAmelCase : Dict = model(**self._prepare_for_class(UpperCamelCase , UpperCamelCase ) )
__UpperCAmelCase : int = 1
self.assertEqual(out_len + added_hidden_states , len(UpperCamelCase ) )
__UpperCAmelCase : Union[str, Any] = outputs.attentions
self.assertEqual(len(UpperCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def lowerCamelCase__ ( self : Dict ):
'''simple docstring'''
def check_hidden_states_output(UpperCamelCase : Union[str, Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : int ):
__UpperCAmelCase : str = model_class(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
with torch.no_grad():
__UpperCAmelCase : Union[str, Any] = model(**self._prepare_for_class(UpperCamelCase , UpperCamelCase ) )
__UpperCAmelCase : str = outputs.hidden_states
__UpperCAmelCase : Tuple = getattr(
self.model_tester , """expected_num_hidden_layers""" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(UpperCamelCase ) , UpperCamelCase )
# YOLOS has a different seq_length
__UpperCAmelCase : int = self.model_tester.expected_seq_len
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
__UpperCAmelCase ,__UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : Any = True
check_hidden_states_output(UpperCamelCase , UpperCamelCase , UpperCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCAmelCase : Optional[int] = True
check_hidden_states_output(UpperCamelCase , UpperCamelCase , UpperCamelCase )
def lowerCamelCase__ ( self : List[str] ):
'''simple docstring'''
__UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_object_detection(*UpperCamelCase )
@slow
def lowerCamelCase__ ( self : Dict ):
'''simple docstring'''
for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase : Union[str, Any] = YolosModel.from_pretrained(UpperCamelCase )
self.assertIsNotNone(UpperCamelCase )
def lowerCamelCase ( ) -> Optional[Any]:
'''simple docstring'''
__UpperCAmelCase : Dict = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class lowerCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowerCamelCase__ ( self : int ):
'''simple docstring'''
return AutoImageProcessor.from_pretrained("""hustvl/yolos-small""" ) if is_vision_available() else None
@slow
def lowerCamelCase__ ( self : int ):
'''simple docstring'''
__UpperCAmelCase : int = YolosForObjectDetection.from_pretrained("""hustvl/yolos-small""" ).to(UpperCamelCase )
__UpperCAmelCase : List[str] = self.default_image_processor
__UpperCAmelCase : Any = prepare_img()
__UpperCAmelCase : Union[str, Any] = image_processor(images=UpperCamelCase , return_tensors="""pt""" ).to(UpperCamelCase )
# forward pass
with torch.no_grad():
__UpperCAmelCase : Union[str, Any] = model(inputs.pixel_values )
# verify outputs
__UpperCAmelCase : Any = torch.Size((1, 100, 92) )
self.assertEqual(outputs.logits.shape , UpperCamelCase )
__UpperCAmelCase : List[str] = torch.tensor(
[[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]] , device=UpperCamelCase , )
__UpperCAmelCase : Union[str, Any] = torch.tensor(
[[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]] , device=UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , UpperCamelCase , atol=1e-4 ) )
self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , UpperCamelCase , atol=1e-4 ) )
# verify postprocessing
__UpperCAmelCase : List[str] = image_processor.post_process_object_detection(
UpperCamelCase , threshold=0.3 , target_sizes=[image.size[::-1]] )[0]
__UpperCAmelCase : Optional[Any] = torch.tensor([0.9994, 0.9790, 0.9964, 0.9972, 0.9861] ).to(UpperCamelCase )
__UpperCAmelCase : Any = [75, 75, 17, 63, 17]
__UpperCAmelCase : Any = torch.tensor([335.0609, 79.3848, 375.4216, 187.2495] ).to(UpperCamelCase )
self.assertEqual(len(results["""scores"""] ) , 5 )
self.assertTrue(torch.allclose(results["""scores"""] , UpperCamelCase , atol=1e-4 ) )
self.assertSequenceEqual(results["""labels"""].tolist() , UpperCamelCase )
self.assertTrue(torch.allclose(results["""boxes"""][0, :] , UpperCamelCase ) )
| 299
| 1
|
from .configuration_bert_masked import MaskedBertConfig
from .modeling_bert_masked import (
MaskedBertForMultipleChoice,
MaskedBertForQuestionAnswering,
MaskedBertForSequenceClassification,
MaskedBertForTokenClassification,
MaskedBertModel,
)
from .modules import *
| 278
|
"""simple docstring"""
from __future__ import annotations
from numpy import array, cos, cross, floataa, radians, sin
from numpy.typing import NDArray
def lowerCamelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = False ) -> list[float]:
'''simple docstring'''
if radian_mode:
return [magnitude * cos(__lowerCAmelCase ), magnitude * sin(__lowerCAmelCase )]
return [magnitude * cos(radians(__lowerCAmelCase ) ), magnitude * sin(radians(__lowerCAmelCase ) )]
def lowerCamelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 10**-1 ) -> bool:
'''simple docstring'''
lowerCamelCase__ =cross(__lowerCAmelCase , __lowerCAmelCase )
lowerCamelCase__ =sum(__lowerCAmelCase )
return abs(__lowerCAmelCase ) < eps
if __name__ == "__main__":
# Test to check if it works
a =array(
[
polar_force(718.4, 180 - 30),
polar_force(879.54, 45),
polar_force(100, -90),
]
)
a =array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem 1 in image_data/2D_problems.jpg
a =array(
[
polar_force(30 * 9.81, 15),
polar_force(215, 180 - 45),
polar_force(264, 90 - 30),
]
)
a =array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem in image_data/2D_problems_1.jpg
a =array([[0, -2000], [0, -1200], [0, 1_5600], [0, -1_2400]])
a =array([[0, 0], [6, 0], [10, 0], [12, 0]])
assert in_static_equilibrium(forces, location)
import doctest
doctest.testmod()
| 530
| 0
|
'''simple docstring'''
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class lowerCamelCase ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self : int ):
'''simple docstring'''
_snake_case: Optional[Any] = inspect.getfile(accelerate.test_utils )
_snake_case: str = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_script.py'] )
_snake_case: Optional[int] = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_distributed_data_loop.py'] )
_snake_case: Union[str, Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_ops.py'] )
@require_multi_gpu
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
'''simple docstring'''
print(f'''Found {torch.cuda.device_count()} devices.''' )
_snake_case: Tuple = ['torchrun', f'''--nproc_per_node={torch.cuda.device_count()}''', self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__snake_case , env=os.environ.copy() )
@require_multi_gpu
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
'''simple docstring'''
print(f'''Found {torch.cuda.device_count()} devices.''' )
_snake_case: Any = ['torchrun', f'''--nproc_per_node={torch.cuda.device_count()}''', self.operation_file_path]
print(f'''Command: {cmd}''' )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__snake_case , env=os.environ.copy() )
@require_multi_gpu
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
'''simple docstring'''
_snake_case: Optional[int] = ['torchrun', f'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__snake_case , env=os.environ.copy() )
@require_multi_gpu
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
'''simple docstring'''
print(f'''Found {torch.cuda.device_count()} devices, using 2 devices only''' )
_snake_case: Dict = ['torchrun', f'''--nproc_per_node={torch.cuda.device_count()}''', self.data_loop_file_path]
with patch_environment(omp_num_threads=1 , cuda_visible_devices='0,1' ):
execute_subprocess_async(__snake_case , env=os.environ.copy() )
if __name__ == "__main__":
A : Dict = Accelerator()
A : List[str] = (accelerator.state.process_index + 2, 10)
A : List[Any] = torch.randint(0, 10, shape).to(accelerator.device)
A : List[str] = ''
A : Union[str, Any] = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
A : Optional[Any] = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
A : str = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 273
|
'''simple docstring'''
import unittest
from transformers import BigBirdTokenizer, BigBirdTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
A : Tuple = '▁'
A : Optional[Any] = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class lowerCamelCase ( __UpperCAmelCase , unittest.TestCase ):
_SCREAMING_SNAKE_CASE = BigBirdTokenizer
_SCREAMING_SNAKE_CASE = BigBirdTokenizerFast
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = True
def SCREAMING_SNAKE_CASE_ ( self : str ):
'''simple docstring'''
super().setUp()
_snake_case: Dict = self.tokenizer_class(__snake_case , keep_accents=__snake_case )
tokenizer.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case: List[str] = '<s>'
_snake_case: Tuple = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__snake_case ) , __snake_case )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__snake_case ) , __snake_case )
def SCREAMING_SNAKE_CASE_ ( self : int ):
'''simple docstring'''
_snake_case: Dict = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<unk>' )
self.assertEqual(vocab_keys[1] , '<s>' )
self.assertEqual(vocab_keys[-1] , '[MASK]' )
self.assertEqual(len(__snake_case ) , 10_04 )
def SCREAMING_SNAKE_CASE_ ( self : int ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 10_00 )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
_snake_case: Optional[int] = self.get_tokenizer()
_snake_case: Union[str, Any] = self.get_rust_tokenizer()
_snake_case: List[str] = 'I was born in 92000, and this is falsé.'
_snake_case: str = tokenizer.tokenize(__snake_case )
_snake_case: Dict = rust_tokenizer.tokenize(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
_snake_case: Union[str, Any] = tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
_snake_case: List[Any] = rust_tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
self.assertListEqual(__snake_case , __snake_case )
_snake_case: Optional[int] = self.get_rust_tokenizer()
_snake_case: int = tokenizer.encode(__snake_case )
_snake_case: Tuple = rust_tokenizer.encode(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
def SCREAMING_SNAKE_CASE_ ( self : str ):
'''simple docstring'''
_snake_case: Tuple = BigBirdTokenizer(__snake_case , keep_accents=__snake_case )
_snake_case: Optional[int] = tokenizer.tokenize('This is a test' )
self.assertListEqual(__snake_case , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__snake_case ) , [2_85, 46, 10, 1_70, 3_82] , )
_snake_case: Any = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
__snake_case , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
_snake_case: List[Any] = tokenizer.convert_tokens_to_ids(__snake_case )
self.assertListEqual(
__snake_case , [8, 21, 84, 55, 24, 19, 7, 0, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
_snake_case: Optional[Any] = tokenizer.convert_ids_to_tokens(__snake_case )
self.assertListEqual(
__snake_case , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
@cached_property
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
'''simple docstring'''
return BigBirdTokenizer.from_pretrained('google/bigbird-roberta-base' )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
'''simple docstring'''
_snake_case: Dict = 'Hello World!'
_snake_case: Optional[int] = [65, 1_85_36, 22_60, 1_01, 66]
self.assertListEqual(__snake_case , self.big_tokenizer.encode(__snake_case ) )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case: str = (
'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'
' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'
)
# fmt: off
_snake_case: str = [65, 8_71, 4_19, 3_58, 9_46, 9_91, 25_21, 4_52, 3_58, 13_57, 3_87, 77_51, 35_36, 1_12, 9_85, 4_56, 1_26, 8_65, 9_38, 54_00, 57_34, 4_58, 13_68, 4_67, 7_86, 24_62, 52_46, 11_59, 6_33, 8_65, 45_19, 4_57, 5_82, 8_52, 25_57, 4_27, 9_16, 5_08, 4_05, 3_43_24, 4_97, 3_91, 4_08, 1_13_42, 12_44, 3_85, 1_00, 9_38, 9_85, 4_56, 5_74, 3_62, 1_25_97, 32_00, 31_29, 11_72, 66] # noqa: E231
# fmt: on
self.assertListEqual(__snake_case , self.big_tokenizer.encode(__snake_case ) )
@require_torch
@slow
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
'''simple docstring'''
import torch
from transformers import BigBirdConfig, BigBirdModel
# Build sequence
_snake_case: Tuple = list(self.big_tokenizer.get_vocab().keys() )[:10]
_snake_case: Union[str, Any] = ' '.join(__snake_case )
_snake_case: Optional[Any] = self.big_tokenizer.encode_plus(__snake_case , return_tensors='pt' , return_token_type_ids=__snake_case )
_snake_case: int = self.big_tokenizer.batch_encode_plus(
[sequence + ' ' + sequence] , return_tensors='pt' , return_token_type_ids=__snake_case )
_snake_case: int = BigBirdConfig(attention_type='original_full' )
_snake_case: int = BigBirdModel(__snake_case )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**__snake_case )
model(**__snake_case )
@slow
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
'''simple docstring'''
_snake_case: Tuple = BigBirdTokenizer.from_pretrained('google/bigbird-roberta-base' )
_snake_case: Optional[Any] = tokenizer.decode(tokenizer('Paris is the [MASK].' ).input_ids )
self.assertTrue(decoded_text == '[CLS] Paris is the[MASK].[SEP]' )
@slow
def SCREAMING_SNAKE_CASE_ ( self : int ):
'''simple docstring'''
_snake_case: Dict = {'input_ids': [[65, 3_92_86, 4_58, 3_63_35, 20_01, 4_56, 1_30_73, 1_32_66, 4_55, 1_13, 77_46, 17_41, 1_11_57, 3_91, 1_30_73, 1_32_66, 4_55, 1_13, 39_67, 3_54_12, 1_13, 49_36, 1_09, 38_70, 23_77, 1_13, 3_00_84, 4_57_20, 4_58, 1_34, 1_74_96, 1_12, 5_03, 1_16_72, 1_13, 1_18, 1_12, 56_65, 1_33_47, 3_86_87, 1_12, 14_96, 3_13_89, 1_12, 32_68, 4_72_64, 1_34, 9_62, 1_12, 1_63_77, 80_35, 2_31_30, 4_30, 1_21_69, 1_55_18, 2_85_92, 4_58, 1_46, 4_16_97, 1_09, 3_91, 1_21_69, 1_55_18, 1_66_89, 4_58, 1_46, 4_13_58, 1_09, 4_52, 7_26, 40_34, 1_11, 7_63, 3_54_12, 50_82, 3_88, 19_03, 1_11, 90_51, 3_91, 28_70, 4_89_18, 19_00, 11_23, 5_50, 9_98, 1_12, 95_86, 1_59_85, 4_55, 3_91, 4_10, 2_29_55, 3_76_36, 1_14, 66], [65, 4_48, 1_74_96, 4_19, 36_63, 3_85, 7_63, 1_13, 2_75_33, 28_70, 32_83, 1_30_43, 16_39, 2_47_13, 5_23, 6_56, 2_40_13, 1_85_50, 25_21, 5_17, 2_70_14, 2_12_44, 4_20, 12_12, 14_65, 3_91, 9_27, 48_33, 3_88, 5_78, 1_17_86, 1_14, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [65, 4_84, 21_69, 76_87, 2_19_32, 1_81_46, 7_26, 3_63, 1_70_32, 33_91, 1_14, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__snake_case , model_name='google/bigbird-roberta-base' , revision='215c99f1600e06f83acce68422f2035b2b5c3510' , )
| 273
| 1
|
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = ['''model.decoder.embed_positions.weights''']
def UpperCAmelCase__ ( lowerCamelCase_ : Tuple ):
if "emb" in name:
__a : Any = name.replace('emb' , 'model.decoder.embed_tokens' )
if "transformer" in name:
__a : str = name.replace('transformer' , 'model.decoder' )
if "cross_attention" in name:
__a : List[Any] = name.replace('cross_attention' , 'encoder_attn' )
if "linear1" in name:
__a : List[Any] = name.replace('linear1' , 'fc1' )
if "linear2" in name:
__a : List[str] = name.replace('linear2' , 'fc2' )
if "norm1" in name:
__a : List[str] = name.replace('norm1' , 'self_attn_layer_norm' )
if "norm_cross" in name:
__a : List[Any] = name.replace('norm_cross' , 'encoder_attn_layer_norm' )
if "norm2" in name:
__a : str = name.replace('norm2' , 'final_layer_norm' )
if "out_norm" in name:
__a : int = name.replace('out_norm' , 'model.decoder.layer_norm' )
if "linears" in name:
__a : Any = name.replace('linears' , 'lm_heads' )
if "condition_provider.conditioners.description.output_proj" in name:
__a : List[Any] = name.replace('condition_provider.conditioners.description.output_proj' , 'enc_to_dec_proj' )
return name
def UpperCAmelCase__ ( lowerCamelCase_ : OrderedDict , lowerCamelCase_ : int ):
__a : Union[str, Any] = list(state_dict.keys() )
__a : Optional[int] = {}
for key in keys:
__a : Optional[int] = state_dict.pop(lowerCamelCase_ )
__a : List[Any] = rename_keys(lowerCamelCase_ )
if "in_proj_weight" in key:
# split fused qkv proj
__a : Optional[Any] = val[:hidden_size, :]
__a : Optional[Any] = val[hidden_size : 2 * hidden_size, :]
__a : str = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
__a : List[str] = val
else:
__a : Any = val
return state_dict, enc_dec_proj_state_dict
def UpperCAmelCase__ ( lowerCamelCase_ : str ):
if checkpoint == "small":
# default config values
__a : Union[str, Any] = 1_0_2_4
__a : Any = 2_4
__a : Tuple = 1_6
elif checkpoint == "medium":
__a : Dict = 1_5_3_6
__a : Dict = 4_8
__a : Union[str, Any] = 2_4
elif checkpoint == "large":
__a : int = 2_0_4_8
__a : Dict = 4_8
__a : Union[str, Any] = 3_2
else:
raise ValueError(f'''Checkpoint should be one of `[\'small\', \'medium\', \'large\']`, got {checkpoint}.''' )
__a : str = MusicgenDecoderConfig(
hidden_size=lowerCamelCase_ , ffn_dim=hidden_size * 4 , num_hidden_layers=lowerCamelCase_ , num_attention_heads=lowerCamelCase_ , )
return config
@torch.no_grad()
def UpperCAmelCase__ ( lowerCamelCase_ : Tuple , lowerCamelCase_ : Optional[Any]=None , lowerCamelCase_ : int=None , lowerCamelCase_ : List[Any]="cpu" ):
__a : str = MusicGen.get_pretrained(lowerCamelCase_ , device=lowerCamelCase_ )
__a : str = decoder_config_from_checkpoint(lowerCamelCase_ )
__a : Tuple = fairseq_model.lm.state_dict()
__a , __a : int = rename_state_dict(
lowerCamelCase_ , hidden_size=decoder_config.hidden_size )
__a : int = TaEncoderModel.from_pretrained('t5-base' )
__a : List[Any] = EncodecModel.from_pretrained('facebook/encodec_32khz' )
__a : Tuple = MusicgenForCausalLM(lowerCamelCase_ ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
__a , __a : Optional[int] = decoder.load_state_dict(lowerCamelCase_ , strict=lowerCamelCase_ )
for key in missing_keys.copy():
if key.startswith(('text_encoder', 'audio_encoder') ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(lowerCamelCase_ )
if len(lowerCamelCase_ ) > 0:
raise ValueError(f'''Missing key(s) in state_dict: {missing_keys}''' )
if len(lowerCamelCase_ ) > 0:
raise ValueError(f'''Unexpected key(s) in state_dict: {unexpected_keys}''' )
# init the composite model
__a : Union[str, Any] = MusicgenForConditionalGeneration(text_encoder=lowerCamelCase_ , audio_encoder=lowerCamelCase_ , decoder=lowerCamelCase_ )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(lowerCamelCase_ )
# check we can do a forward pass
__a : Any = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
__a : Tuple = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
__a : Any = model(input_ids=lowerCamelCase_ , decoder_input_ids=lowerCamelCase_ ).logits
if logits.shape != (8, 1, 2_0_4_8):
raise ValueError('Incorrect shape for logits' )
# now construct the processor
__a : Any = AutoTokenizer.from_pretrained('t5-base' )
__a : Optional[Any] = AutoFeatureExtractor.from_pretrained('facebook/encodec_32khz' , padding_side='left' )
__a : Union[str, Any] = MusicgenProcessor(feature_extractor=lowerCamelCase_ , tokenizer=lowerCamelCase_ )
# set the appropriate bos/pad token ids
__a : Tuple = 2_0_4_8
__a : int = 2_0_4_8
# set other default generation config params
__a : Union[str, Any] = int(3_0 * audio_encoder.config.frame_rate )
__a : List[Any] = True
__a : Any = 3.0
if pytorch_dump_folder is not None:
Path(lowerCamelCase_ ).mkdir(exist_ok=lowerCamelCase_ )
logger.info(f'''Saving model {checkpoint} to {pytorch_dump_folder}''' )
model.save_pretrained(lowerCamelCase_ )
processor.save_pretrained(lowerCamelCase_ )
if repo_id:
logger.info(f'''Pushing model {checkpoint} to {repo_id}''' )
model.push_to_hub(lowerCamelCase_ )
processor.push_to_hub(lowerCamelCase_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint''',
default='''small''',
type=str,
help='''Checkpoint size of the MusicGen model you\'d like to convert. Can be one of: `[\'small\', \'medium\', \'large\']`.''',
)
parser.add_argument(
'''--pytorch_dump_folder''',
required=True,
default=None,
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
parser.add_argument(
'''--device''', default='''cpu''', type=str, help='''Torch device to run the conversion, either cpu or cuda.'''
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 47
|
from collections.abc import Sequence
from queue import Queue
class _UpperCamelCase:
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : int=None , SCREAMING_SNAKE_CASE__ : Tuple=None ):
'''simple docstring'''
__a : Tuple = start
__a : Dict = end
__a : List[str] = val
__a : List[Any] = (start + end) // 2
__a : Optional[Any] = left
__a : List[str] = right
def __repr__( self : Dict ):
'''simple docstring'''
return f'''SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})'''
class _UpperCamelCase:
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : Sequence , SCREAMING_SNAKE_CASE__ : Optional[Any] ):
'''simple docstring'''
__a : Tuple = collection
__a : Dict = function
if self.collection:
__a : int = self._build_tree(0 , len(SCREAMING_SNAKE_CASE__ ) - 1 )
def __lowerCAmelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
self._update_tree(self.root , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : str , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str] ):
'''simple docstring'''
return self._query_range(self.root , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Dict ):
'''simple docstring'''
if start == end:
return SegmentTreeNode(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , self.collection[start] )
__a : Tuple = (start + end) // 2
__a : Optional[int] = self._build_tree(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__a : Tuple = self._build_tree(mid + 1 , SCREAMING_SNAKE_CASE__ )
return SegmentTreeNode(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , self.fn(left.val , right.val ) , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[Any] ):
'''simple docstring'''
if node.start == i and node.end == i:
__a : Optional[Any] = val
return
if i <= node.mid:
self._update_tree(node.left , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
else:
self._update_tree(node.right , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__a : int = self.fn(node.left.val , node.right.val )
def __lowerCAmelCase ( self : Any , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any] ):
'''simple docstring'''
if node.start == i and node.end == j:
return node.val
if i <= node.mid:
if j <= node.mid:
# range in left child tree
return self._query_range(node.left , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
else:
# range in left child tree and right child tree
return self.fn(
self._query_range(node.left , SCREAMING_SNAKE_CASE__ , node.mid ) , self._query_range(node.right , node.mid + 1 , SCREAMING_SNAKE_CASE__ ) , )
else:
# range in right child tree
return self._query_range(node.right , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
if self.root is not None:
__a : Tuple = Queue()
queue.put(self.root )
while not queue.empty():
__a : Tuple = queue.get()
yield node
if node.left is not None:
queue.put(node.left )
if node.right is not None:
queue.put(node.right )
if __name__ == "__main__":
import operator
for fn in [operator.add, max, min]:
print('''*''' * 50)
SCREAMING_SNAKE_CASE__ = SegmentTree([2, 1, 5, 3, 4], fn)
for node in arr.traverse():
print(node)
print()
arr.update(1, 5)
for node in arr.traverse():
print(node)
print()
print(arr.query_range(3, 4)) # 7
print(arr.query_range(2, 2)) # 5
print(arr.query_range(1, 3)) # 13
print()
| 47
| 1
|
'''simple docstring'''
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase_ = get_tests_dir("fixtures/test_sentencepiece_no_bos.model")
@require_sentencepiece
@require_tokenizers
class _a ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
A : Any = PegasusTokenizer
A : Optional[Any] = PegasusTokenizerFast
A : Tuple = True
A : Any = True
def UpperCamelCase_ ( self ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
SCREAMING_SNAKE_CASE : List[Any] = PegasusTokenizer(A )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def UpperCamelCase_ ( self ):
'''simple docstring'''
return PegasusTokenizer.from_pretrained('google/pegasus-large' )
def UpperCamelCase_ ( self, **A ):
'''simple docstring'''
return PegasusTokenizer.from_pretrained(self.tmpdirname, **A )
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
return ("This is a test", "This is a test")
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = '</s>'
SCREAMING_SNAKE_CASE : Optional[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A ), A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A ), A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0], '<pad>' )
self.assertEqual(vocab_keys[1], '</s>' )
self.assertEqual(vocab_keys[-1], 'v' )
self.assertEqual(len(A ), 1_103 )
def UpperCamelCase_ ( self ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size, 1_103 )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE : int = self.tokenizer_class.from_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE : Tuple = (
'Let\'s see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important'
' </s> <pad> <pad> <pad>'
)
SCREAMING_SNAKE_CASE : List[Any] = rust_tokenizer([raw_input_str], return_tensors=A, add_special_tokens=A ).input_ids[0]
SCREAMING_SNAKE_CASE : Tuple = py_tokenizer([raw_input_str], return_tensors=A, add_special_tokens=A ).input_ids[0]
self.assertListEqual(A, A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
SCREAMING_SNAKE_CASE : int = '<mask_1> To ensure a <mask_2> flow of bank resolutions.'
SCREAMING_SNAKE_CASE : Union[str, Any] = [2, 413, 615, 114, 3, 1_971, 113, 1_679, 10_710, 107, 1]
SCREAMING_SNAKE_CASE : List[Any] = tokenizer([raw_input_str], return_tensors=A ).input_ids[0]
self.assertListEqual(A, A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 96_103
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 103
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1_024
SCREAMING_SNAKE_CASE : Optional[Any] = 'To ensure a smooth flow of bank resolutions.'
SCREAMING_SNAKE_CASE : int = [413, 615, 114, 2_291, 1_971, 113, 1_679, 10_710, 107, 1]
SCREAMING_SNAKE_CASE : str = tokenizer([raw_input_str], return_tensors=A ).input_ids[0]
self.assertListEqual(A, A )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = ['This is going to be way too long.' * 150, 'short example']
SCREAMING_SNAKE_CASE : List[str] = ['not super long but more than 5 tokens', 'tiny']
SCREAMING_SNAKE_CASE : List[Any] = self._large_tokenizer(A, padding=A, truncation=A, return_tensors='pt' )
SCREAMING_SNAKE_CASE : Dict = self._large_tokenizer(
text_target=A, max_length=5, padding=A, truncation=A, return_tensors='pt' )
assert batch.input_ids.shape == (2, 1_024)
assert batch.attention_mask.shape == (2, 1_024)
assert targets["input_ids"].shape == (2, 5)
assert len(A ) == 2 # input_ids, attention_mask.
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = {'input_ids': [[38_979, 143, 18_485, 606, 130, 26_669, 87_686, 121, 54_189, 1_129, 111, 26_669, 87_686, 121, 9_114, 14_787, 121, 13_249, 158, 592, 956, 121, 14_621, 31_576, 143, 62_613, 108, 9_688, 930, 43_430, 11_562, 62_613, 304, 108, 11_443, 897, 108, 9_314, 17_415, 63_399, 108, 11_443, 7_614, 18_316, 118, 4_284, 7_148, 12_430, 143, 1_400, 25_703, 158, 111, 4_284, 7_148, 11_772, 143, 21_297, 1_064, 158, 122, 204, 3_506, 1_754, 1_133, 14_787, 1_581, 115, 33_224, 4_482, 111, 1_355, 110, 29_173, 317, 50_833, 108, 20_147, 94_665, 111, 77_198, 107, 1], [110, 62_613, 117, 638, 112, 1_133, 121, 20_098, 1_355, 79_050, 13_872, 135, 1_596, 53_541, 1_352, 141, 13_039, 5_542, 124, 302, 518, 111, 268, 2_956, 115, 149, 4_427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1_235, 2_799, 18_289, 17_780, 204, 109, 9_474, 1_296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=A, model_name='google/bigbird-pegasus-large-arxiv', revision='ba85d0851d708441f91440d509690f1ab6353415', )
@require_sentencepiece
@require_tokenizers
class _a ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
A : List[Any] = PegasusTokenizer
A : Any = PegasusTokenizerFast
A : Any = True
A : str = True
def UpperCamelCase_ ( self ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
SCREAMING_SNAKE_CASE : Any = PegasusTokenizer(A, offset=0, mask_token_sent=A, mask_token='[MASK]' )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def UpperCamelCase_ ( self ):
'''simple docstring'''
return PegasusTokenizer.from_pretrained('google/bigbird-pegasus-large-arxiv' )
def UpperCamelCase_ ( self, **A ):
'''simple docstring'''
return PegasusTokenizer.from_pretrained(self.tmpdirname, **A )
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
return ("This is a test", "This is a test")
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE : Optional[int] = self.tokenizer_class.from_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE : Any = (
'Let\'s see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>'
' <pad> <pad> <pad>'
)
SCREAMING_SNAKE_CASE : str = rust_tokenizer([raw_input_str], return_tensors=A, add_special_tokens=A ).input_ids[0]
SCREAMING_SNAKE_CASE : List[str] = py_tokenizer([raw_input_str], return_tensors=A, add_special_tokens=A ).input_ids[0]
self.assertListEqual(A, A )
@require_torch
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = ['This is going to be way too long.' * 1_000, 'short example']
SCREAMING_SNAKE_CASE : Optional[int] = ['not super long but more than 5 tokens', 'tiny']
SCREAMING_SNAKE_CASE : List[str] = self._large_tokenizer(A, padding=A, truncation=A, return_tensors='pt' )
SCREAMING_SNAKE_CASE : Optional[Any] = self._large_tokenizer(
text_target=A, max_length=5, padding=A, truncation=A, return_tensors='pt' )
assert batch.input_ids.shape == (2, 4_096)
assert batch.attention_mask.shape == (2, 4_096)
assert targets["input_ids"].shape == (2, 5)
assert len(A ) == 2 # input_ids, attention_mask.
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = (
'This is an example string that is used to test the original TF implementation against the HF'
' implementation'
)
SCREAMING_SNAKE_CASE : Union[str, Any] = self._large_tokenizer(A ).input_ids
self.assertListEqual(
A, [182, 117, 142, 587, 4_211, 120, 117, 263, 112, 804, 109, 856, 25_016, 3_137, 464, 109, 26_955, 3_137, 1], )
| 704
|
'''simple docstring'''
import inspect
import unittest
import warnings
from math import ceil, floor
from transformers import LevitConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
LevitForImageClassification,
LevitForImageClassificationWithTeacher,
LevitModel,
)
from transformers.models.levit.modeling_levit import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(A, 'hidden_sizes' ) )
self.parent.assertTrue(hasattr(A, 'num_attention_heads' ) )
class _a :
'''simple docstring'''
def __init__( self, A, A=13, A=64, A=3, A=3, A=2, A=1, A=16, A=[128, 256, 384], A=[4, 6, 8], A=[2, 3, 4], A=[16, 16, 16], A=0, A=[2, 2, 2], A=[2, 2, 2], A=0.02, A=True, A=True, A=2, ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = parent
SCREAMING_SNAKE_CASE : Any = batch_size
SCREAMING_SNAKE_CASE : List[Any] = image_size
SCREAMING_SNAKE_CASE : Tuple = num_channels
SCREAMING_SNAKE_CASE : Tuple = kernel_size
SCREAMING_SNAKE_CASE : Tuple = stride
SCREAMING_SNAKE_CASE : Union[str, Any] = padding
SCREAMING_SNAKE_CASE : int = hidden_sizes
SCREAMING_SNAKE_CASE : Optional[int] = num_attention_heads
SCREAMING_SNAKE_CASE : Optional[int] = depths
SCREAMING_SNAKE_CASE : int = key_dim
SCREAMING_SNAKE_CASE : List[str] = drop_path_rate
SCREAMING_SNAKE_CASE : int = patch_size
SCREAMING_SNAKE_CASE : Tuple = attention_ratio
SCREAMING_SNAKE_CASE : Tuple = mlp_ratio
SCREAMING_SNAKE_CASE : Dict = initializer_range
SCREAMING_SNAKE_CASE : Optional[Any] = [
['Subsample', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['Subsample', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
SCREAMING_SNAKE_CASE : Union[str, Any] = is_training
SCREAMING_SNAKE_CASE : List[str] = use_labels
SCREAMING_SNAKE_CASE : List[Any] = num_labels
SCREAMING_SNAKE_CASE : Optional[Any] = initializer_range
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE : Union[str, Any] = None
if self.use_labels:
SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor([self.batch_size], self.num_labels )
SCREAMING_SNAKE_CASE : Tuple = self.get_config()
return config, pixel_values, labels
def UpperCamelCase_ ( self ):
'''simple docstring'''
return LevitConfig(
image_size=self.image_size, num_channels=self.num_channels, kernel_size=self.kernel_size, stride=self.stride, padding=self.padding, patch_size=self.patch_size, hidden_sizes=self.hidden_sizes, num_attention_heads=self.num_attention_heads, depths=self.depths, key_dim=self.key_dim, drop_path_rate=self.drop_path_rate, mlp_ratio=self.mlp_ratio, attention_ratio=self.attention_ratio, initializer_range=self.initializer_range, down_ops=self.down_ops, )
def UpperCamelCase_ ( self, A, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = LevitModel(config=A )
model.to(A )
model.eval()
SCREAMING_SNAKE_CASE : Tuple = model(A )
SCREAMING_SNAKE_CASE : Tuple = (self.image_size, self.image_size)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = image_size[0], image_size[1]
for _ in range(4 ):
SCREAMING_SNAKE_CASE : int = floor(((height + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
SCREAMING_SNAKE_CASE : int = floor(((width + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, ceil(height / 4 ) * ceil(width / 4 ), self.hidden_sizes[-1]), )
def UpperCamelCase_ ( self, A, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.num_labels
SCREAMING_SNAKE_CASE : Union[str, Any] = LevitForImageClassification(A )
model.to(A )
model.eval()
SCREAMING_SNAKE_CASE : List[Any] = model(A, labels=A )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = config_and_inputs
SCREAMING_SNAKE_CASE : Any = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
A : Any = (
(LevitModel, LevitForImageClassification, LevitForImageClassificationWithTeacher)
if is_torch_available()
else ()
)
A : Tuple = (
{
'''feature-extraction''': LevitModel,
'''image-classification''': (LevitForImageClassification, LevitForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
A : Any = False
A : Union[str, Any] = False
A : int = False
A : int = False
A : int = False
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = LevitModelTester(self )
SCREAMING_SNAKE_CASE : Dict = ConfigTester(self, config_class=A, has_text_modality=A, hidden_size=37 )
def UpperCamelCase_ ( self ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCamelCase_ ( self ):
'''simple docstring'''
return
@unittest.skip(reason='Levit does not use inputs_embeds' )
def UpperCamelCase_ ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='Levit does not support input and output embeddings' )
def UpperCamelCase_ ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='Levit does not output attentions' )
def UpperCamelCase_ ( self ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Optional[Any] = model_class(A )
SCREAMING_SNAKE_CASE : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE : Tuple = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE : Optional[Any] = ['pixel_values']
self.assertListEqual(arg_names[:1], A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
def check_hidden_states_output(A, A, A ):
SCREAMING_SNAKE_CASE : Tuple = model_class(A )
model.to(A )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE : Optional[Any] = model(**self._prepare_for_class(A, A ) )
SCREAMING_SNAKE_CASE : List[Any] = outputs.hidden_states
SCREAMING_SNAKE_CASE : List[str] = len(self.model_tester.depths ) + 1
self.assertEqual(len(A ), A )
SCREAMING_SNAKE_CASE : Optional[Any] = (self.model_tester.image_size, self.model_tester.image_size)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = image_size[0], image_size[1]
for _ in range(4 ):
SCREAMING_SNAKE_CASE : Optional[int] = floor(
(
(height + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
SCREAMING_SNAKE_CASE : Union[str, Any] = floor(
(
(width + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ), [
height * width,
self.model_tester.hidden_sizes[0],
], )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Optional[int] = True
check_hidden_states_output(A, A, A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE : List[Any] = True
check_hidden_states_output(A, A, A )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def UpperCamelCase_ ( self ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self, A, A, A=False ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = super()._prepare_for_class(A, A, return_labels=A )
if return_labels:
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : Optional[Any] = True
for model_class in self.all_model_classes:
# LevitForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(A )
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
SCREAMING_SNAKE_CASE : Union[str, Any] = model_class(A )
model.to(A )
model.train()
SCREAMING_SNAKE_CASE : List[Any] = self._prepare_for_class(A, A, return_labels=A )
SCREAMING_SNAKE_CASE : Any = model(**A ).loss
loss.backward()
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE : Optional[int] = False
SCREAMING_SNAKE_CASE : Optional[int] = True
for model_class in self.all_model_classes:
if model_class in get_values(A ) or not model_class.supports_gradient_checkpointing:
continue
# LevitForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
continue
SCREAMING_SNAKE_CASE : List[str] = model_class(A )
model.gradient_checkpointing_enable()
model.to(A )
model.train()
SCREAMING_SNAKE_CASE : List[Any] = self._prepare_for_class(A, A, return_labels=A )
SCREAMING_SNAKE_CASE : int = model(**A ).loss
loss.backward()
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : Dict = [
{'title': 'multi_label_classification', 'num_labels': 2, 'dtype': torch.float},
{'title': 'single_label_classification', 'num_labels': 1, 'dtype': torch.long},
{'title': 'regression', 'num_labels': 1, 'dtype': torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(A ),
]
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F"Testing {model_class} with {problem_type['title']}" ):
SCREAMING_SNAKE_CASE : str = problem_type['title']
SCREAMING_SNAKE_CASE : int = problem_type['num_labels']
SCREAMING_SNAKE_CASE : Any = model_class(A )
model.to(A )
model.train()
SCREAMING_SNAKE_CASE : Optional[Any] = self._prepare_for_class(A, A, return_labels=A )
if problem_type["num_labels"] > 1:
SCREAMING_SNAKE_CASE : Optional[int] = inputs['labels'].unsqueeze(1 ).repeat(1, problem_type['num_labels'] )
SCREAMING_SNAKE_CASE : Optional[Any] = inputs['labels'].to(problem_type['dtype'] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=A ) as warning_list:
SCREAMING_SNAKE_CASE : str = model(**A ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F"Something is going wrong in the regression problem: intercepted {w.message}" )
loss.backward()
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
for model_name in LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : int = LevitModel.from_pretrained(A )
self.assertIsNotNone(A )
def lowercase__( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class _a ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase_ ( self ):
'''simple docstring'''
return LevitImageProcessor.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = LevitForImageClassificationWithTeacher.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
A )
SCREAMING_SNAKE_CASE : List[str] = self.default_image_processor
SCREAMING_SNAKE_CASE : List[Any] = prepare_img()
SCREAMING_SNAKE_CASE : Tuple = image_processor(images=A, return_tensors='pt' ).to(A )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE : str = model(**A )
# verify the logits
SCREAMING_SNAKE_CASE : Optional[int] = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape, A )
SCREAMING_SNAKE_CASE : List[str] = torch.tensor([1.04_48, -0.37_45, -1.83_17] ).to(A )
self.assertTrue(torch.allclose(outputs.logits[0, :3], A, atol=1E-4 ) )
| 508
| 0
|
'''simple docstring'''
import json
import os
from typing import Optional
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessorMixin
from ...utils import logging
from ...utils.hub import get_file_from_repo
from ..auto import AutoTokenizer
snake_case = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE ( __a ):
"""simple docstring"""
__A = "AutoTokenizer"
__A = ["tokenizer"]
__A = {
"semantic_prompt": 1,
"coarse_prompt": 2,
"fine_prompt": 2,
}
def __init__( self : Any , __lowerCAmelCase : Tuple , __lowerCAmelCase : int=None ):
"""simple docstring"""
super().__init__(__lowerCAmelCase )
_lowerCAmelCase = speaker_embeddings
@classmethod
def a ( cls : Any , __lowerCAmelCase : int , __lowerCAmelCase : str="speaker_embeddings_path.json" , **__lowerCAmelCase : List[Any] ):
"""simple docstring"""
if speaker_embeddings_dict_path is not None:
_lowerCAmelCase = get_file_from_repo(
__lowerCAmelCase , __lowerCAmelCase , subfolder=kwargs.pop('subfolder' , __lowerCAmelCase ) , cache_dir=kwargs.pop('cache_dir' , __lowerCAmelCase ) , force_download=kwargs.pop('force_download' , __lowerCAmelCase ) , proxies=kwargs.pop('proxies' , __lowerCAmelCase ) , resume_download=kwargs.pop('resume_download' , __lowerCAmelCase ) , local_files_only=kwargs.pop('local_files_only' , __lowerCAmelCase ) , use_auth_token=kwargs.pop('use_auth_token' , __lowerCAmelCase ) , revision=kwargs.pop('revision' , __lowerCAmelCase ) , )
if speaker_embeddings_path is None:
logger.warning(
F"`{os.path.join(__lowerCAmelCase , __lowerCAmelCase )}` does not exists\n , no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json\n dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`." )
_lowerCAmelCase = None
else:
with open(__lowerCAmelCase ) as speaker_embeddings_json:
_lowerCAmelCase = json.load(__lowerCAmelCase )
else:
_lowerCAmelCase = None
_lowerCAmelCase = AutoTokenizer.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase )
return cls(tokenizer=__lowerCAmelCase , speaker_embeddings=__lowerCAmelCase )
def a ( self : str , __lowerCAmelCase : str , __lowerCAmelCase : Optional[Any]="speaker_embeddings_path.json" , __lowerCAmelCase : Any="speaker_embeddings" , __lowerCAmelCase : bool = False , **__lowerCAmelCase : Dict , ):
"""simple docstring"""
if self.speaker_embeddings is not None:
os.makedirs(os.path.join(__lowerCAmelCase , __lowerCAmelCase , 'v2' ) , exist_ok=__lowerCAmelCase )
_lowerCAmelCase = {}
_lowerCAmelCase = save_directory
for prompt_key in self.speaker_embeddings:
if prompt_key != "repo_or_path":
_lowerCAmelCase = self._load_voice_preset(__lowerCAmelCase )
_lowerCAmelCase = {}
for key in self.speaker_embeddings[prompt_key]:
np.save(
os.path.join(
embeddings_dict['repo_or_path'] , __lowerCAmelCase , F"{prompt_key}_{key}" ) , voice_preset[key] , allow_pickle=__lowerCAmelCase , )
_lowerCAmelCase = os.path.join(__lowerCAmelCase , F"{prompt_key}_{key}.npy" )
_lowerCAmelCase = tmp_dict
with open(os.path.join(__lowerCAmelCase , __lowerCAmelCase ) , 'w' ) as fp:
json.dump(__lowerCAmelCase , __lowerCAmelCase )
super().save_pretrained(__lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase )
def a ( self : Optional[int] , __lowerCAmelCase : str = None , **__lowerCAmelCase : int ):
"""simple docstring"""
_lowerCAmelCase = self.speaker_embeddings[voice_preset]
_lowerCAmelCase = {}
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset_paths:
raise ValueError(
F"Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}]." )
_lowerCAmelCase = get_file_from_repo(
self.speaker_embeddings.get('repo_or_path' , '/' ) , voice_preset_paths[key] , subfolder=kwargs.pop('subfolder' , __lowerCAmelCase ) , cache_dir=kwargs.pop('cache_dir' , __lowerCAmelCase ) , force_download=kwargs.pop('force_download' , __lowerCAmelCase ) , proxies=kwargs.pop('proxies' , __lowerCAmelCase ) , resume_download=kwargs.pop('resume_download' , __lowerCAmelCase ) , local_files_only=kwargs.pop('local_files_only' , __lowerCAmelCase ) , use_auth_token=kwargs.pop('use_auth_token' , __lowerCAmelCase ) , revision=kwargs.pop('revision' , __lowerCAmelCase ) , )
if path is None:
raise ValueError(
F"`{os.path.join(self.speaker_embeddings.get('repo_or_path' , '/' ) , voice_preset_paths[key] )}` does not exists\n , no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}\n embeddings." )
_lowerCAmelCase = np.load(__lowerCAmelCase )
return voice_preset_dict
def a ( self : List[Any] , __lowerCAmelCase : Optional[dict] = None ):
"""simple docstring"""
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset:
raise ValueError(F"Voice preset unrecognized, missing {key} as a key." )
if not isinstance(voice_preset[key] , np.ndarray ):
raise ValueError(F"{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray." )
if len(voice_preset[key].shape ) != self.preset_shape[key]:
raise ValueError(F"{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray." )
def __call__( self : Optional[int] , __lowerCAmelCase : Union[str, Any]=None , __lowerCAmelCase : Tuple=None , __lowerCAmelCase : Any="pt" , __lowerCAmelCase : Optional[Any]=256 , __lowerCAmelCase : Optional[Any]=False , __lowerCAmelCase : Optional[int]=True , __lowerCAmelCase : int=False , **__lowerCAmelCase : List[Any] , ):
"""simple docstring"""
if voice_preset is not None and not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
if (
isinstance(__lowerCAmelCase , __lowerCAmelCase )
and self.speaker_embeddings is not None
and voice_preset in self.speaker_embeddings
):
_lowerCAmelCase = self._load_voice_preset(__lowerCAmelCase )
else:
if isinstance(__lowerCAmelCase , __lowerCAmelCase ) and not voice_preset.endswith('.npz' ):
_lowerCAmelCase = voice_preset + '.npz'
_lowerCAmelCase = np.load(__lowerCAmelCase )
if voice_preset is not None:
self._validate_voice_preset_dict(__lowerCAmelCase , **__lowerCAmelCase )
_lowerCAmelCase = BatchFeature(data=__lowerCAmelCase , tensor_type=__lowerCAmelCase )
_lowerCAmelCase = self.tokenizer(
__lowerCAmelCase , return_tensors=__lowerCAmelCase , padding='max_length' , max_length=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , **__lowerCAmelCase , )
if voice_preset is not None:
_lowerCAmelCase = voice_preset
return encoded_text
| 309
|
'''simple docstring'''
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class SCREAMING_SNAKE_CASE ( __a ):
"""simple docstring"""
__A = ""
__A = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
__A = None # compression type in fsspec. ex: "gzip"
__A = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self : List[str] , __lowerCAmelCase : str = "" , __lowerCAmelCase : Optional[str] = None , __lowerCAmelCase : Optional[dict] = None , **__lowerCAmelCase : Optional[int] ):
"""simple docstring"""
super().__init__(self , **__lowerCAmelCase )
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
_lowerCAmelCase = fsspec.open(
__lowerCAmelCase , mode='rb' , protocol=__lowerCAmelCase , compression=self.compression , client_kwargs={
'requote_redirect_url': False, # see https://github.com/huggingface/datasets/pull/5459
'trust_env': True, # Enable reading proxy env variables.
**(target_options or {}).pop('client_kwargs' , {} ), # To avoid issues if it was already passed.
} , **(target_options or {}) , )
_lowerCAmelCase = os.path.basename(self.file.path.split('::' )[0] )
_lowerCAmelCase = (
self.compressed_name[: self.compressed_name.rindex('.' )]
if '.' in self.compressed_name
else self.compressed_name
)
_lowerCAmelCase = None
@classmethod
def a ( cls : int , __lowerCAmelCase : Optional[int] ):
"""simple docstring"""
return super()._strip_protocol(__lowerCAmelCase ).lstrip('/' )
def a ( self : Union[str, Any] ):
"""simple docstring"""
if self.dir_cache is None:
_lowerCAmelCase = {**self.file.fs.info(self.file.path ), 'name': self.uncompressed_name}
_lowerCAmelCase = {f['name']: f}
def a ( self : Optional[Any] , __lowerCAmelCase : str ):
"""simple docstring"""
return self.file.open().read()
def a ( self : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : str = "rb" , __lowerCAmelCase : Union[str, Any]=None , __lowerCAmelCase : Dict=True , __lowerCAmelCase : Tuple=None , **__lowerCAmelCase : Dict , ):
"""simple docstring"""
_lowerCAmelCase = self._strip_protocol(__lowerCAmelCase )
if mode != "rb":
raise ValueError(F"Tried to read with mode {mode} on file {self.file.path} opened with mode 'rb'" )
return self.file.open()
class SCREAMING_SNAKE_CASE ( __a ):
"""simple docstring"""
__A = "bz2"
__A = "bz2"
__A = ".bz2"
class SCREAMING_SNAKE_CASE ( __a ):
"""simple docstring"""
__A = "gzip"
__A = "gzip"
__A = ".gz"
class SCREAMING_SNAKE_CASE ( __a ):
"""simple docstring"""
__A = "lz4"
__A = "lz4"
__A = ".lz4"
class SCREAMING_SNAKE_CASE ( __a ):
"""simple docstring"""
__A = "xz"
__A = "xz"
__A = ".xz"
class SCREAMING_SNAKE_CASE ( __a ):
"""simple docstring"""
__A = "zstd"
__A = "zstd"
__A = ".zst"
def __init__( self : Optional[int] , __lowerCAmelCase : str , __lowerCAmelCase : str = "rb" , __lowerCAmelCase : Optional[str] = None , __lowerCAmelCase : Optional[dict] = None , __lowerCAmelCase : int = DEFAULT_BLOCK_SIZE , **__lowerCAmelCase : Optional[int] , ):
"""simple docstring"""
super().__init__(
fo=__lowerCAmelCase , mode=__lowerCAmelCase , target_protocol=__lowerCAmelCase , target_options=__lowerCAmelCase , block_size=__lowerCAmelCase , **__lowerCAmelCase , )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
_lowerCAmelCase = self.file.__enter__
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : Dict , __lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
_lowerCAmelCase = file_
def __enter__( self : List[Any] ):
"""simple docstring"""
self._file.__enter__()
return self
def __exit__( self : List[str] , *__lowerCAmelCase : int , **__lowerCAmelCase : Optional[int] ):
"""simple docstring"""
self._file.__exit__(*__lowerCAmelCase , **__lowerCAmelCase )
def __iter__( self : Any ):
"""simple docstring"""
return iter(self._file )
def a ( self : int ):
"""simple docstring"""
return next(self._file )
def __getattr__( self : Tuple , __lowerCAmelCase : Dict ):
"""simple docstring"""
return getattr(self._file , __lowerCAmelCase )
def fixed_enter(*__lowerCAmelCase : List[str] , **__lowerCAmelCase : int ):
return WrappedFile(_enter(*__lowerCAmelCase , **__lowerCAmelCase ) )
_lowerCAmelCase = fixed_enter
| 309
| 1
|
def UpperCAmelCase_( a__ ):
"""simple docstring"""
if collection == []:
return []
# get some information about the collection
SCREAMING_SNAKE_CASE : List[Any] = len(a__ )
SCREAMING_SNAKE_CASE : int = max(a__ )
SCREAMING_SNAKE_CASE : Optional[int] = min(a__ )
# create the counting array
SCREAMING_SNAKE_CASE : str = coll_max + 1 - coll_min
SCREAMING_SNAKE_CASE : Any = [0] * counting_arr_length
# count how much a number appears in the collection
for number in collection:
counting_arr[number - coll_min] += 1
# sum each position with it's predecessors. now, counting_arr[i] tells
# us how many elements <= i has in the collection
for i in range(1 , a__ ):
SCREAMING_SNAKE_CASE : Tuple = counting_arr[i] + counting_arr[i - 1]
# create the output collection
SCREAMING_SNAKE_CASE : int = [0] * coll_len
# place the elements in the output, respecting the original order (stable
# sort) from end to begin, updating counting_arr
for i in reversed(range(0 , a__ ) ):
SCREAMING_SNAKE_CASE : Dict = collection[i]
counting_arr[collection[i] - coll_min] -= 1
return ordered
def UpperCAmelCase_( a__ ):
"""simple docstring"""
return "".join([chr(a__ ) for i in counting_sort([ord(a__ ) for c in string] )] )
if __name__ == "__main__":
# Test string sort
assert counting_sort_string('''thisisthestring''') == "eghhiiinrsssttt"
a__ : Optional[int] = input('''Enter numbers separated by a comma:\n''').strip()
a__ : str = [int(item) for item in user_input.split(''',''')]
print(counting_sort(unsorted))
| 333
|
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import KarrasVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class a_ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : UNetaDModel
__SCREAMING_SNAKE_CASE : KarrasVeScheduler
def __init__( self , _lowerCamelCase , _lowerCamelCase ) ->Dict:
super().__init__()
self.register_modules(unet=_lowerCamelCase , scheduler=_lowerCamelCase )
@torch.no_grad()
def __call__( self , _lowerCamelCase = 1 , _lowerCamelCase = 50 , _lowerCamelCase = None , _lowerCamelCase = "pil" , _lowerCamelCase = True , **_lowerCamelCase , ) ->Union[Tuple, ImagePipelineOutput]:
SCREAMING_SNAKE_CASE : Union[str, Any] = self.unet.config.sample_size
SCREAMING_SNAKE_CASE : List[str] = (batch_size, 3, img_size, img_size)
SCREAMING_SNAKE_CASE : Optional[int] = self.unet
# sample x_0 ~ N(0, sigma_0^2 * I)
SCREAMING_SNAKE_CASE : Optional[int] = randn_tensor(_lowerCamelCase , generator=_lowerCamelCase , device=self.device ) * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(_lowerCamelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# here sigma_t == t_i from the paper
SCREAMING_SNAKE_CASE : List[Any] = self.scheduler.schedule[t]
SCREAMING_SNAKE_CASE : Any = self.scheduler.schedule[t - 1] if t > 0 else 0
# 1. Select temporarily increased noise level sigma_hat
# 2. Add new noise to move from sample_i to sample_hat
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = self.scheduler.add_noise_to_input(_lowerCamelCase , _lowerCamelCase , generator=_lowerCamelCase )
# 3. Predict the noise residual given the noise magnitude `sigma_hat`
# The model inputs and output are adjusted by following eq. (213) in [1].
SCREAMING_SNAKE_CASE : List[str] = (sigma_hat / 2) * model((sample_hat + 1) / 2 , sigma_hat / 2 ).sample
# 4. Evaluate dx/dt at sigma_hat
# 5. Take Euler step from sigma to sigma_prev
SCREAMING_SNAKE_CASE : Optional[Any] = self.scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if sigma_prev != 0:
# 6. Apply 2nd order correction
# The model inputs and output are adjusted by following eq. (213) in [1].
SCREAMING_SNAKE_CASE : Union[str, Any] = (sigma_prev / 2) * model((step_output.prev_sample + 1) / 2 , sigma_prev / 2 ).sample
SCREAMING_SNAKE_CASE : Optional[Any] = self.scheduler.step_correct(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , step_output.prev_sample , step_output['''derivative'''] , )
SCREAMING_SNAKE_CASE : Optional[int] = step_output.prev_sample
SCREAMING_SNAKE_CASE : Any = (sample / 2 + 0.5).clamp(0 , 1 )
SCREAMING_SNAKE_CASE : Tuple = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE : Any = self.numpy_to_pil(_lowerCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_lowerCamelCase )
| 333
| 1
|
"""simple docstring"""
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
lowerCAmelCase_ = {
'cola': 2,
'mnli': 3,
'mrpc': 2,
'sst-2': 2,
'sts-b': 1,
'qqp': 2,
'qnli': 2,
'rte': 2,
'wnli': 2,
}
logging.set_verbosity_info()
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=None ) -> Union[str, Any]:
# Initialise PyTorch model
lowercase__ : Dict = XLNetConfig.from_json_file(__lowerCamelCase )
lowercase__ : Any = finetuning_task.lower() if finetuning_task is not None else ''''''
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(f"""Building PyTorch XLNetForSequenceClassification model from configuration: {config}""" )
lowercase__ : List[str] = finetuning_task
lowercase__ : List[Any] = GLUE_TASKS_NUM_LABELS[finetuning_task]
lowercase__ : List[Any] = XLNetForSequenceClassification(__lowerCamelCase )
elif "squad" in finetuning_task:
lowercase__ : Any = finetuning_task
lowercase__ : int = XLNetForQuestionAnswering(__lowerCamelCase )
else:
lowercase__ : Optional[Any] = XLNetLMHeadModel(__lowerCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# Save pytorch-model
lowercase__ : Optional[int] = os.path.join(__lowerCamelCase , __lowerCamelCase )
lowercase__ : int = os.path.join(__lowerCamelCase , __lowerCamelCase )
print(f"""Save PyTorch model to {os.path.abspath(__lowerCamelCase )}""" )
torch.save(model.state_dict() , __lowerCamelCase )
print(f"""Save configuration file to {os.path.abspath(__lowerCamelCase )}""" )
with open(__lowerCamelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--xlnet_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained XLNet model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the folder to store the PyTorch model or dataset/vocab.',
)
parser.add_argument(
'--finetuning_task',
default=None,
type=str,
help='Name of a task on which the XLNet TensorFlow model was fine-tuned',
)
lowerCAmelCase_ = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 560
|
"""simple docstring"""
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import sys
import transformers
lowerCAmelCase_ = '3'
print('Python version:', sys.version)
print('transformers version:', transformers.__version__)
try:
import torch
print('Torch version:', torch.__version__)
print('Cuda available:', torch.cuda.is_available())
print('Cuda version:', torch.version.cuda)
print('CuDNN version:', torch.backends.cudnn.version())
print('Number of GPUs available:', torch.cuda.device_count())
print('NCCL version:', torch.cuda.nccl.version())
except ImportError:
print('Torch version:', None)
try:
import deepspeed
print('DeepSpeed version:', deepspeed.__version__)
except ImportError:
print('DeepSpeed version:', None)
try:
import tensorflow as tf
print('TensorFlow version:', tf.__version__)
print('TF GPUs available:', bool(tf.config.list_physical_devices('GPU')))
print('Number of TF GPUs available:', len(tf.config.list_physical_devices('GPU')))
except ImportError:
print('TensorFlow version:', None)
| 560
| 1
|
"""simple docstring"""
from __future__ import annotations
def UpperCamelCase_ ( SCREAMING_SNAKE_CASE_ ):
if len(SCREAMING_SNAKE_CASE_ ) == 0:
return []
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = min(SCREAMING_SNAKE_CASE_ ), max(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = int(max_value - min_value ) + 1
SCREAMING_SNAKE_CASE = [[] for _ in range(SCREAMING_SNAKE_CASE_ )]
for i in my_list:
buckets[int(i - min_value )].append(SCREAMING_SNAKE_CASE_ )
return [v for bucket in buckets for v in sorted(SCREAMING_SNAKE_CASE_ )]
if __name__ == "__main__":
from doctest import testmod
testmod()
assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bucket_sort([0, 1, -1_0, 1_5, 2, -2]) == [-1_0, -2, 0, 1, 2, 1_5]
| 406
|
"""simple docstring"""
import unittest
from transformers import BertGenerationTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
snake_case = '▁'
snake_case = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
class UpperCamelCase ( __magic_name__ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ : int = BertGenerationTokenizer
UpperCAmelCase_ : List[str] = False
UpperCAmelCase_ : str = True
def A ( self ) -> int:
"""simple docstring"""
super().setUp()
SCREAMING_SNAKE_CASE = BertGenerationTokenizer(lowercase__ , keep_accents=lowercase__ )
tokenizer.save_pretrained(self.tmpdirname )
def A ( self ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE = '<s>'
SCREAMING_SNAKE_CASE = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase__ ) , lowercase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase__ ) , lowercase__ )
def A ( self ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<unk>' )
self.assertEqual(vocab_keys[1] , '<s>' )
self.assertEqual(vocab_keys[-1] , '<pad>' )
self.assertEqual(len(lowercase__ ) , 1002 )
def A ( self ) -> Any:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def A ( self ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE = BertGenerationTokenizer(lowercase__ , keep_accents=lowercase__ )
SCREAMING_SNAKE_CASE = tokenizer.tokenize('This is a test' )
self.assertListEqual(lowercase__ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowercase__ ) , [285, 46, 10, 170, 382] , )
SCREAMING_SNAKE_CASE = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
lowercase__ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
SCREAMING_SNAKE_CASE = tokenizer.convert_tokens_to_ids(lowercase__ )
self.assertListEqual(
lowercase__ , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
SCREAMING_SNAKE_CASE = tokenizer.convert_ids_to_tokens(lowercase__ )
self.assertListEqual(
lowercase__ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
@cached_property
def A ( self ) -> Any:
"""simple docstring"""
return BertGenerationTokenizer.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' )
@slow
def A ( self ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE = 'Hello World!'
SCREAMING_SNAKE_CASE = [18536, 2260, 101]
self.assertListEqual(lowercase__ , self.big_tokenizer.encode(lowercase__ ) )
@slow
def A ( self ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE = (
'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'
' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'
)
SCREAMING_SNAKE_CASE = [
871,
419,
358,
946,
991,
2521,
452,
358,
1357,
387,
7751,
3536,
112,
985,
456,
126,
865,
938,
5400,
5734,
458,
1368,
467,
786,
2462,
5246,
1159,
633,
865,
4519,
457,
582,
852,
2557,
427,
916,
508,
405,
34324,
497,
391,
408,
11342,
1244,
385,
100,
938,
985,
456,
574,
362,
12597,
3200,
3129,
1172,
]
self.assertListEqual(lowercase__ , self.big_tokenizer.encode(lowercase__ ) )
@require_torch
@slow
def A ( self ) -> int:
"""simple docstring"""
import torch
from transformers import BertGenerationConfig, BertGenerationEncoder
# Build sequence
SCREAMING_SNAKE_CASE = list(self.big_tokenizer.get_vocab().keys() )[:10]
SCREAMING_SNAKE_CASE = ' '.join(lowercase__ )
SCREAMING_SNAKE_CASE = self.big_tokenizer.encode_plus(lowercase__ , return_tensors='pt' , return_token_type_ids=lowercase__ )
SCREAMING_SNAKE_CASE = self.big_tokenizer.batch_encode_plus(
[sequence + ' ' + sequence] , return_tensors='pt' , return_token_type_ids=lowercase__ )
SCREAMING_SNAKE_CASE = BertGenerationConfig()
SCREAMING_SNAKE_CASE = BertGenerationEncoder(lowercase__ )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**lowercase__ )
model(**lowercase__ )
@slow
def A ( self ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE = {'input_ids': [[39286, 458, 36335, 2001, 456, 13073, 13266, 455, 113, 7746, 1741, 11157, 391, 13073, 13266, 455, 113, 3967, 35412, 113, 4936, 109, 3870, 2377, 113, 30084, 45720, 458, 134, 17496, 112, 503, 11672, 113, 118, 112, 5665, 13347, 38687, 112, 1496, 31389, 112, 3268, 47264, 134, 962, 112, 16377, 8035, 23130, 430, 12169, 15518, 28592, 458, 146, 41697, 109, 391, 12169, 15518, 16689, 458, 146, 41358, 109, 452, 726, 4034, 111, 763, 35412, 5082, 388, 1903, 111, 9051, 391, 2870, 48918, 1900, 1123, 550, 998, 112, 9586, 15985, 455, 391, 410, 22955, 37636, 114], [448, 17496, 419, 3663, 385, 763, 113, 27533, 2870, 3283, 13043, 1639, 24713, 523, 656, 24013, 18550, 2521, 517, 27014, 21244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 11786, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [484, 2169, 7687, 21932, 18146, 726, 363, 17032, 3391, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowercase__ , model_name='google/bert_for_seq_generation_L-24_bbc_encoder' , revision='c817d1fd1be2ffa69431227a1fe320544943d4db' , )
| 406
| 1
|
"""simple docstring"""
lowercase__ = frozenset(
[
"""prompt""",
"""height""",
"""width""",
"""guidance_scale""",
"""negative_prompt""",
"""prompt_embeds""",
"""negative_prompt_embeds""",
"""cross_attention_kwargs""",
]
)
lowercase__ = frozenset(["""prompt""", """negative_prompt"""])
lowercase__ = frozenset([])
lowercase__ = frozenset(["""image"""])
lowercase__ = frozenset(
[
"""image""",
"""height""",
"""width""",
"""guidance_scale""",
]
)
lowercase__ = frozenset(["""image"""])
lowercase__ = frozenset(
[
"""prompt""",
"""image""",
"""height""",
"""width""",
"""guidance_scale""",
"""negative_prompt""",
"""prompt_embeds""",
"""negative_prompt_embeds""",
]
)
lowercase__ = frozenset(["""prompt""", """image""", """negative_prompt"""])
lowercase__ = frozenset(
[
# Text guided image variation with an image mask
"""prompt""",
"""image""",
"""mask_image""",
"""height""",
"""width""",
"""guidance_scale""",
"""negative_prompt""",
"""prompt_embeds""",
"""negative_prompt_embeds""",
]
)
lowercase__ = frozenset(["""prompt""", """image""", """mask_image""", """negative_prompt"""])
lowercase__ = frozenset(
[
# image variation with an image mask
"""image""",
"""mask_image""",
"""height""",
"""width""",
"""guidance_scale""",
]
)
lowercase__ = frozenset(["""image""", """mask_image"""])
lowercase__ = frozenset(
[
"""example_image""",
"""image""",
"""mask_image""",
"""height""",
"""width""",
"""guidance_scale""",
]
)
lowercase__ = frozenset(["""example_image""", """image""", """mask_image"""])
lowercase__ = frozenset(["""class_labels"""])
lowercase__ = frozenset(["""class_labels"""])
lowercase__ = frozenset(["""batch_size"""])
lowercase__ = frozenset([])
lowercase__ = frozenset(["""batch_size"""])
lowercase__ = frozenset([])
lowercase__ = frozenset(
[
"""prompt""",
"""audio_length_in_s""",
"""guidance_scale""",
"""negative_prompt""",
"""prompt_embeds""",
"""negative_prompt_embeds""",
"""cross_attention_kwargs""",
]
)
lowercase__ = frozenset(["""prompt""", """negative_prompt"""])
lowercase__ = frozenset(["""input_tokens"""])
lowercase__ = frozenset(["""input_tokens"""])
| 630
|
'''simple docstring'''
from manim import *
class a__ ( UpperCAmelCase__ ):
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
"""simple docstring"""
__lowerCamelCase = Rectangle(height=0.5 , width=0.5 )
__lowerCamelCase = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
__lowerCamelCase = Rectangle(height=0.25 , width=0.25 )
__lowerCamelCase = [mem.copy() for i in range(6 )]
__lowerCamelCase = [mem.copy() for i in range(6 )]
__lowerCamelCase = VGroup(*a ).arrange(a , buff=0 )
__lowerCamelCase = VGroup(*a ).arrange(a , buff=0 )
__lowerCamelCase = VGroup(a , a ).arrange(a , buff=0 )
__lowerCamelCase = Text('''CPU''' , font_size=24 )
__lowerCamelCase = Group(a , a ).arrange(a , buff=0.5 , aligned_edge=a )
cpu.move_to([-2.5, -0.5, 0] )
self.add(a )
__lowerCamelCase = [mem.copy() for i in range(4 )]
__lowerCamelCase = VGroup(*a ).arrange(a , buff=0 )
__lowerCamelCase = Text('''GPU''' , font_size=24 )
__lowerCamelCase = Group(a , a ).arrange(a , buff=0.5 , aligned_edge=a )
gpu.move_to([-1, -1, 0] )
self.add(a )
__lowerCamelCase = [mem.copy() for i in range(6 )]
__lowerCamelCase = VGroup(*a ).arrange(a , buff=0 )
__lowerCamelCase = Text('''Model''' , font_size=24 )
__lowerCamelCase = Group(a , a ).arrange(a , buff=0.5 , aligned_edge=a )
model.move_to([3, -1.0, 0] )
self.add(a )
__lowerCamelCase = []
__lowerCamelCase = []
for i, rect in enumerate(a ):
__lowerCamelCase = fill.copy().set_fill(a , opacity=0.8 )
target.move_to(a )
model_arr.append(a )
__lowerCamelCase = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(a , opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(a )
self.add(*a , *a )
__lowerCamelCase = [meta_mem.copy() for i in range(6 )]
__lowerCamelCase = [meta_mem.copy() for i in range(6 )]
__lowerCamelCase = VGroup(*a ).arrange(a , buff=0 )
__lowerCamelCase = VGroup(*a ).arrange(a , buff=0 )
__lowerCamelCase = VGroup(a , a ).arrange(a , buff=0 )
__lowerCamelCase = Text('''Disk''' , font_size=24 )
__lowerCamelCase = Group(a , a ).arrange(a , buff=0.5 , aligned_edge=a )
disk.move_to([-4, -1.25, 0] )
self.add(a , a )
__lowerCamelCase = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
__lowerCamelCase = MarkupText(
f"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(a , a )
__lowerCamelCase = MarkupText(
f"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=18 , )
blue_text.next_to(a , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(a )
__lowerCamelCase = MarkupText(
f"""Now watch as an input is passed through the model\nand how the memory is utilized and handled.""" , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(a ) )
__lowerCamelCase = Square(0.3 )
input.set_fill(a , opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] , a , buff=0.5 )
self.play(Write(a ) )
input.generate_target()
input.target.next_to(model_arr[0] , direction=a , buff=0.02 )
self.play(MoveToTarget(a ) )
self.play(FadeOut(a ) )
__lowerCamelCase = Arrow(start=a , end=a , color=a , buff=0.5 )
a.next_to(model_arr[0].get_left() , a , buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
__lowerCamelCase = MarkupText(
f"""As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.""" , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(a , run_time=3 ) )
__lowerCamelCase = {'''run_time''': 1, '''fade_in''': True, '''fade_out''': True, '''buff''': 0.02}
self.play(
Write(a ) , Circumscribe(model_arr[0] , color=a , **a ) , Circumscribe(model_cpu_arr[0] , color=a , **a ) , Circumscribe(gpu_rect[0] , color=a , **a ) , )
self.play(MoveToTarget(model_cpu_arr[0] ) )
__lowerCamelCase = a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.02 , a , buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.02 )
__lowerCamelCase = AnimationGroup(
FadeOut(a , run_time=0.5 ) , MoveToTarget(a , run_time=0.5 ) , FadeIn(a , run_time=0.5 ) , lag_ratio=0.2 )
self.play(a )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
__lowerCamelCase = 0.7
self.play(
Circumscribe(model_arr[i] , **a ) , Circumscribe(cpu_left_col_base[i] , **a ) , Circumscribe(cpu_left_col_base[i + 1] , color=a , **a ) , Circumscribe(gpu_rect[0] , color=a , **a ) , Circumscribe(model_arr[i + 1] , color=a , **a ) , )
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , )
else:
self.play(
MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , )
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.02 , buff=0.2 )
self.play(
Circumscribe(model_arr[-1] , color=a , **a ) , Circumscribe(cpu_left_col_base[-1] , color=a , **a ) , Circumscribe(gpu_rect[0] , color=a , **a ) , )
self.play(MoveToTarget(model_cpu_arr[i] ) )
__lowerCamelCase = a_c
__lowerCamelCase = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] , RIGHT + 0.02 , buff=0.5 )
self.play(
FadeOut(a ) , FadeOut(a , run_time=0.5 ) , )
__lowerCamelCase = MarkupText(f"""Inference on a model too large for GPU memory\nis successfully completed.""" , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(a , run_time=3 ) , MoveToTarget(a ) )
self.wait()
| 546
| 0
|
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class _A ( unittest.TestCase ):
@property
def _a (self ) -> Union[str, Any]:
'''simple docstring'''
torch.manual_seed(0 )
UpperCamelCase__ = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
return model
@property
def _a (self ) -> Optional[Any]:
'''simple docstring'''
torch.manual_seed(0 )
UpperCamelCase__ = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=3 , )
return model
@property
def _a (self ) -> Union[str, Any]:
'''simple docstring'''
torch.manual_seed(0 )
UpperCamelCase__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModel(SCREAMING_SNAKE_CASE_ )
def _a (self ) -> int:
'''simple docstring'''
UpperCamelCase__ = self.dummy_uncond_unet
UpperCamelCase__ = DDIMScheduler()
UpperCamelCase__ = self.dummy_vq_model
UpperCamelCase__ = LDMPipeline(unet=SCREAMING_SNAKE_CASE_ , vqvae=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ )
ldm.to(SCREAMING_SNAKE_CASE_ )
ldm.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = torch.manual_seed(0 )
UpperCamelCase__ = ldm(generator=SCREAMING_SNAKE_CASE_ , num_inference_steps=2 , output_type='''numpy''' ).images
UpperCamelCase__ = torch.manual_seed(0 )
UpperCamelCase__ = ldm(generator=SCREAMING_SNAKE_CASE_ , num_inference_steps=2 , output_type='''numpy''' , return_dict=SCREAMING_SNAKE_CASE_ )[0]
UpperCamelCase__ = image[0, -3:, -3:, -1]
UpperCamelCase__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCamelCase__ = np.array([0.8512, 0.818, 0.6411, 0.6808, 0.4465, 0.5618, 0.46, 0.6231, 0.5172] )
UpperCamelCase__ = 1E-2 if torch_device != '''mps''' else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class _A ( unittest.TestCase ):
def _a (self ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ = LDMPipeline.from_pretrained('''CompVis/ldm-celebahq-256''' )
ldm.to(SCREAMING_SNAKE_CASE_ )
ldm.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = torch.manual_seed(0 )
UpperCamelCase__ = ldm(generator=SCREAMING_SNAKE_CASE_ , num_inference_steps=5 , output_type='''numpy''' ).images
UpperCamelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
UpperCamelCase__ = np.array([0.4399, 0.44975, 0.46825, 0.474, 0.4359, 0.4581, 0.45095, 0.4341, 0.4447] )
UpperCamelCase__ = 1E-2 if torch_device != '''mps''' else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
| 469
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ =logging.get_logger(__name__)
__magic_name__ ={
'''facebook/vit-mae-base''': '''https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json''',
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class _A ( __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ : Dict ="vit_mae"
def __init__(self , SCREAMING_SNAKE_CASE_=768 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=3072 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=1E-12 , SCREAMING_SNAKE_CASE_=224 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=512 , SCREAMING_SNAKE_CASE_=8 , SCREAMING_SNAKE_CASE_=2048 , SCREAMING_SNAKE_CASE_=0.75 , SCREAMING_SNAKE_CASE_=False , **SCREAMING_SNAKE_CASE_ , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = initializer_range
UpperCamelCase__ = layer_norm_eps
UpperCamelCase__ = image_size
UpperCamelCase__ = patch_size
UpperCamelCase__ = num_channels
UpperCamelCase__ = qkv_bias
UpperCamelCase__ = decoder_num_attention_heads
UpperCamelCase__ = decoder_hidden_size
UpperCamelCase__ = decoder_num_hidden_layers
UpperCamelCase__ = decoder_intermediate_size
UpperCamelCase__ = mask_ratio
UpperCamelCase__ = norm_pix_loss
| 469
| 1
|
"""simple docstring"""
from torch import nn
def SCREAMING_SNAKE_CASE__ ( snake_case : Union[str, Any] )-> Any:
'''simple docstring'''
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(f'Unsupported activation function: {act_fn}' )
| 438
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import (
TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaubertConfig,
TFFlaubertForMultipleChoice,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForSequenceClassification,
TFFlaubertForTokenClassification,
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
)
class A :
def __init__( self , SCREAMING_SNAKE_CASE , ) -> Optional[Any]:
"""simple docstring"""
A : List[str] = parent
A : int = 13
A : Dict = 7
A : str = True
A : Dict = True
A : Tuple = True
A : Union[str, Any] = True
A : Dict = True
A : str = False
A : Union[str, Any] = False
A : Union[str, Any] = False
A : List[str] = 2
A : Optional[int] = 99
A : List[str] = 0
A : int = 32
A : Any = 2
A : Optional[Any] = 4
A : List[Any] = 0.1
A : List[str] = 0.1
A : Tuple = 512
A : Optional[Any] = 16
A : List[str] = 2
A : Tuple = 0.02
A : List[str] = 3
A : List[Any] = 4
A : Any = '''last'''
A : int = True
A : Union[str, Any] = None
A : Dict = 0
def __lowerCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
A : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A : Any = random_attention_mask([self.batch_size, self.seq_length] , dtype=tf.floataa )
A : int = None
if self.use_input_lengths:
A : int = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
A : Tuple = None
if self.use_token_type_ids:
A : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
A : List[Any] = None
A : List[Any] = None
A : Tuple = None
if self.use_labels:
A : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A : List[Any] = ids_tensor([self.batch_size] , 2 , dtype=tf.floataa )
A : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
A : Dict = FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , bos_token_id=self.bos_token_id , )
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ) -> List[str]:
"""simple docstring"""
A : int = TFFlaubertModel(config=SCREAMING_SNAKE_CASE )
A : Optional[int] = {'''input_ids''': input_ids, '''lengths''': input_lengths, '''langs''': token_type_ids}
A : int = model(SCREAMING_SNAKE_CASE )
A : List[str] = [input_ids, input_mask]
A : str = model(SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ) -> List[str]:
"""simple docstring"""
A : List[str] = TFFlaubertWithLMHeadModel(SCREAMING_SNAKE_CASE )
A : Union[str, Any] = {'''input_ids''': input_ids, '''lengths''': input_lengths, '''langs''': token_type_ids}
A : Tuple = model(SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ) -> Optional[Any]:
"""simple docstring"""
A : Union[str, Any] = TFFlaubertForQuestionAnsweringSimple(SCREAMING_SNAKE_CASE )
A : List[str] = {'''input_ids''': input_ids, '''lengths''': input_lengths}
A : Tuple = model(SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ) -> Any:
"""simple docstring"""
A : Union[str, Any] = TFFlaubertForSequenceClassification(SCREAMING_SNAKE_CASE )
A : Dict = {'''input_ids''': input_ids, '''lengths''': input_lengths}
A : Optional[Any] = model(SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ) -> str:
"""simple docstring"""
A : Optional[Any] = self.num_labels
A : str = TFFlaubertForTokenClassification(config=SCREAMING_SNAKE_CASE )
A : List[str] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
A : int = model(SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ) -> Any:
"""simple docstring"""
A : str = self.num_choices
A : int = TFFlaubertForMultipleChoice(config=SCREAMING_SNAKE_CASE )
A : str = tf.tile(tf.expand_dims(SCREAMING_SNAKE_CASE , 1 ) , (1, self.num_choices, 1) )
A : Optional[Any] = tf.tile(tf.expand_dims(SCREAMING_SNAKE_CASE , 1 ) , (1, self.num_choices, 1) )
A : str = tf.tile(tf.expand_dims(SCREAMING_SNAKE_CASE , 1 ) , (1, self.num_choices, 1) )
A : List[str] = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
A : List[Any] = model(SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __lowerCAmelCase ( self ) -> Any:
"""simple docstring"""
A : Tuple = self.prepare_config_and_inputs()
(
(
A
), (
A
), (
A
), (
A
), (
A
), (
A
), (
A
), (
A
), (
A
),
) : Any = config_and_inputs
A : List[str] = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''langs''': token_type_ids,
'''lengths''': input_lengths,
}
return config, inputs_dict
@require_tf
class A ( __snake_case , __snake_case , unittest.TestCase ):
__magic_name__ = (
(
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
TFFlaubertForSequenceClassification,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForTokenClassification,
TFFlaubertForMultipleChoice,
)
if is_tf_available()
else ()
)
__magic_name__ = (
(TFFlaubertWithLMHeadModel,) if is_tf_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
__magic_name__ = (
{
'''feature-extraction''': TFFlaubertModel,
'''fill-mask''': TFFlaubertWithLMHeadModel,
'''question-answering''': TFFlaubertForQuestionAnsweringSimple,
'''text-classification''': TFFlaubertForSequenceClassification,
'''token-classification''': TFFlaubertForTokenClassification,
'''zero-shot''': TFFlaubertForSequenceClassification,
}
if is_tf_available()
else {}
)
__magic_name__ = False
__magic_name__ = False
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('''Fast''' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def __lowerCAmelCase ( self ) -> Any:
"""simple docstring"""
A : int = TFFlaubertModelTester(self )
A : Dict = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , emb_dim=37 )
def __lowerCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
A : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> Dict:
"""simple docstring"""
A : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
A : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
A : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> Any:
"""simple docstring"""
A : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_token_classification(*SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> str:
"""simple docstring"""
A : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_multiple_choice(*SCREAMING_SNAKE_CASE )
@slow
def __lowerCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A : Union[str, Any] = TFFlaubertModel.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
@require_tf
@require_sentencepiece
@require_tokenizers
class A ( unittest.TestCase ):
@slow
def __lowerCAmelCase ( self ) -> int:
"""simple docstring"""
A : Tuple = TFFlaubertModel.from_pretrained('''jplu/tf-flaubert-small-cased''' )
A : List[str] = tf.convert_to_tensor(
[[0, 158, 735, 2592, 1424, 6727, 82, 1]] , dtype=tf.intaa , ) # "J'aime flaubert !"
A : int = model(SCREAMING_SNAKE_CASE )[0]
A : str = tf.TensorShape((1, 8, 512) )
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE )
# compare the actual values for a slice.
A : Union[str, Any] = tf.convert_to_tensor(
[
[
[-1.8_768_773, -1.566_555, 0.27_072_418],
[-1.6_920_038, -0.5_873_505, 1.9_329_599],
[-2.9_563_985, -1.6_993_835, 1.7_972_052],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 634
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__UpperCAmelCase = {
"configuration_vision_encoder_decoder": ["VisionEncoderDecoderConfig", "VisionEncoderDecoderOnnxConfig"]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ["VisionEncoderDecoderModel"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ["TFVisionEncoderDecoderModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ["FlaxVisionEncoderDecoderModel"]
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 705
|
import pytest
from datasets.splits import SplitDict, SplitInfo
from datasets.utils.py_utils import asdict
@pytest.mark.parametrize(
"split_dict" , [
SplitDict(),
SplitDict({"train": SplitInfo(name="train" , num_bytes=1_3_3_7 , num_examples=4_2 , dataset_name="my_dataset" )} ),
SplitDict({"train": SplitInfo(name="train" , num_bytes=1_3_3_7 , num_examples=4_2 )} ),
SplitDict({"train": SplitInfo()} ),
] , )
def _lowerCamelCase ( A_ : SplitDict ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ : Tuple =split_dict._to_yaml_list()
assert len(A_ ) == len(A_ )
UpperCamelCase__ : str =SplitDict._from_yaml_list(A_ )
for split_name, split_info in split_dict.items():
# dataset_name field is deprecated, and is therefore not part of the YAML dump
UpperCamelCase__ : int =None
# the split name of split_dict takes over the name of the split info object
UpperCamelCase__ : Dict =split_name
assert split_dict == reloaded
@pytest.mark.parametrize(
"split_info" , [SplitInfo(), SplitInfo(dataset_name=A_ ), SplitInfo(dataset_name="my_dataset" )] )
def _lowerCamelCase ( A_ : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ : Optional[Any] =asdict(SplitDict({"train": split_info} ) )
assert "dataset_name" in split_dict_asdict["train"]
assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
| 582
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__a: Tuple = {
'''configuration_groupvit''': [
'''GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''GroupViTConfig''',
'''GroupViTOnnxConfig''',
'''GroupViTTextConfig''',
'''GroupViTVisionConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a: str = [
'''GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GroupViTModel''',
'''GroupViTPreTrainedModel''',
'''GroupViTTextModel''',
'''GroupViTVisionModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a: Optional[Any] = [
'''TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFGroupViTModel''',
'''TFGroupViTPreTrainedModel''',
'''TFGroupViTTextModel''',
'''TFGroupViTVisionModel''',
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
__a: str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 108
|
import json
import logging
import os
import sys
from pathlib import Path
import finetune_rag
from transformers.file_utils import is_apex_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
require_ray,
require_torch_gpu,
require_torch_multi_gpu,
)
logging.basicConfig(level=logging.DEBUG)
_SCREAMING_SNAKE_CASE = logging.getLogger()
_SCREAMING_SNAKE_CASE = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase ):
def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : List[str] ):
"""simple docstring"""
os.makedirs(lowerCamelCase_ , exist_ok=lowerCamelCase_ )
UpperCamelCase = {"""source""": """What is love ?""", """target""": """life"""}
UpperCamelCase = {"""train""": 12, """val""": 2, """test""": 2}
for split in ["train", "test", "val"]:
for field in ["source", "target"]:
UpperCamelCase = """\n""".join([contents[field]] * n_lines[split] )
with open(os.path.join(lowerCamelCase_ , f"""{split}.{field}""" ) , """w""" ) as f:
f.write(lowerCamelCase_ )
def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : int , lowerCamelCase_ : str = "pytorch" ):
"""simple docstring"""
UpperCamelCase = self.get_auto_remove_tmp_dir()
UpperCamelCase = os.path.join(lowerCamelCase_ , """output""" )
UpperCamelCase = os.path.join(lowerCamelCase_ , """data""" )
self._create_dummy_data(data_dir=lowerCamelCase_ )
UpperCamelCase = f"""
--data_dir {data_dir} \
--output_dir {output_dir} \
--model_name_or_path facebook/rag-sequence-base \
--model_type rag_sequence \
--do_train \
--do_predict \
--n_val -1 \
--val_check_interval 1.0 \
--train_batch_size 2 \
--eval_batch_size 1 \
--max_source_length 25 \
--max_target_length 25 \
--val_max_target_length 25 \
--test_max_target_length 25 \
--label_smoothing 0.1 \
--dropout 0.1 \
--attention_dropout 0.1 \
--weight_decay 0.001 \
--adam_epsilon 1e-08 \
--max_grad_norm 0.1 \
--lr_scheduler polynomial \
--learning_rate 3e-04 \
--num_train_epochs 1 \
--warmup_steps 4 \
--gradient_accumulation_steps 1 \
--distributed-port 8787 \
--use_dummy_dataset 1 \
--distributed_retriever {distributed_retriever} \
""".split()
if gpus > 0:
testargs.append(f"""--gpus={gpus}""" )
if is_apex_available():
testargs.append("""--fp16""" )
else:
testargs.append("""--gpus=0""" )
testargs.append("""--distributed_backend=ddp_cpu""" )
testargs.append("""--num_processes=2""" )
UpperCamelCase = [sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs
execute_subprocess_async(lowerCamelCase_ , env=self.get_env() )
UpperCamelCase = os.path.join(lowerCamelCase_ , """metrics.json""" )
with open(lowerCamelCase_ ) as f:
UpperCamelCase = json.load(lowerCamelCase_ )
return result
@require_torch_gpu
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = self._run_finetune(gpus=1 )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
@require_torch_multi_gpu
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = self._run_finetune(gpus=2 )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
@require_torch_gpu
@require_ray
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = self._run_finetune(gpus=1 , distributed_retriever="""ray""" )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
@require_torch_multi_gpu
@require_ray
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
UpperCamelCase = self._run_finetune(gpus=1 , distributed_retriever="""ray""" )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
| 537
| 0
|
import pytest
import datasets
# Import fixture modules as plugins
UpperCAmelCase__ : Optional[Any] = ['tests.fixtures.files', 'tests.fixtures.hub', 'tests.fixtures.fsspec']
def _A ( _UpperCamelCase , _UpperCamelCase ):
# Mark tests as "unit" by default if not marked as "integration" (or already marked as "unit")
for item in items:
if any(marker in item.keywords for marker in ['''integration''', '''unit'''] ):
continue
item.add_marker(pytest.mark.unit )
def _A ( _UpperCamelCase ):
config.addinivalue_line('''markers''' , '''torchaudio_latest: mark test to run with torchaudio>=0.12''' )
@pytest.fixture(autouse=_UpperCamelCase )
def _A ( _UpperCamelCase , _UpperCamelCase ):
# test_hf_cache_home = tmp_path_factory.mktemp("cache") # TODO: why a cache dir per test function does not work?
_UpperCAmelCase : Any = tmp_path_factory.getbasetemp() / '''cache'''
_UpperCAmelCase : Any = test_hf_cache_home / '''datasets'''
_UpperCAmelCase : List[str] = test_hf_cache_home / '''metrics'''
_UpperCAmelCase : List[str] = test_hf_cache_home / '''modules'''
monkeypatch.setattr('''datasets.config.HF_DATASETS_CACHE''' , str(_UpperCamelCase ) )
monkeypatch.setattr('''datasets.config.HF_METRICS_CACHE''' , str(_UpperCamelCase ) )
monkeypatch.setattr('''datasets.config.HF_MODULES_CACHE''' , str(_UpperCamelCase ) )
_UpperCAmelCase : Tuple = test_hf_datasets_cache / '''downloads'''
monkeypatch.setattr('''datasets.config.DOWNLOADED_DATASETS_PATH''' , str(_UpperCamelCase ) )
_UpperCAmelCase : str = test_hf_datasets_cache / '''downloads''' / '''extracted'''
monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_PATH''' , str(_UpperCamelCase ) )
@pytest.fixture(autouse=_UpperCamelCase , scope='''session''' )
def _A ( ):
datasets.disable_progress_bar()
@pytest.fixture(autouse=_UpperCamelCase )
def _A ( _UpperCamelCase ):
# don't take tests into account when counting downloads
monkeypatch.setattr('''datasets.config.HF_UPDATE_DOWNLOAD_COUNTS''' , _UpperCamelCase )
@pytest.fixture
def _A ( _UpperCamelCase ):
# Required to suppress RemovedIn20Warning when feature(s) are not compatible with SQLAlchemy 2.0
# To be removed once SQLAlchemy 2.0 supported
monkeypatch.setattr('''sqlalchemy.util.deprecations.SILENCE_UBER_WARNING''' , _UpperCamelCase )
| 416
|
from typing import List
from .keymap import KEYMAP, get_character
def _A ( _UpperCamelCase ):
def decorator(_UpperCamelCase ):
_UpperCAmelCase : Optional[int] = getattr(_UpperCamelCase , '''handle_key''' , [] )
handle += [key]
setattr(_UpperCamelCase , '''handle_key''' , _UpperCamelCase )
return func
return decorator
def _A ( *_UpperCamelCase ):
def decorator(_UpperCamelCase ):
_UpperCAmelCase : Any = getattr(_UpperCamelCase , '''handle_key''' , [] )
handle += keys
setattr(_UpperCamelCase , '''handle_key''' , _UpperCamelCase )
return func
return decorator
class lowerCAmelCase_ ( lowercase_ ):
def __new__( cls : List[str] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Dict ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = super().__new__(cls , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
if not hasattr(UpperCAmelCase_ , '''key_handler''' ):
setattr(UpperCAmelCase_ , '''key_handler''' , {} )
setattr(UpperCAmelCase_ , '''handle_input''' , KeyHandler.handle_input )
for value in attrs.values():
_UpperCAmelCase : List[str] = getattr(UpperCAmelCase_ , '''handle_key''' , [] )
for key in handled_keys:
_UpperCAmelCase : Optional[Any] = value
return new_cls
@staticmethod
def a_ ( cls : Optional[Any] ) -> int:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = get_character()
if char != KEYMAP["undefined"]:
_UpperCAmelCase : str = ord(UpperCAmelCase_ )
_UpperCAmelCase : str = cls.key_handler.get(UpperCAmelCase_ )
if handler:
_UpperCAmelCase : Optional[int] = char
return handler(cls )
else:
return None
def _A ( cls ):
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
| 416
| 1
|
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
__A = logging.get_logger(__name__)
__A = OrderedDict(
[
# Base model mapping
("albert", "FlaxAlbertModel"),
("bart", "FlaxBartModel"),
("beit", "FlaxBeitModel"),
("bert", "FlaxBertModel"),
("big_bird", "FlaxBigBirdModel"),
("blenderbot", "FlaxBlenderbotModel"),
("blenderbot-small", "FlaxBlenderbotSmallModel"),
("clip", "FlaxCLIPModel"),
("distilbert", "FlaxDistilBertModel"),
("electra", "FlaxElectraModel"),
("gpt-sw3", "FlaxGPT2Model"),
("gpt2", "FlaxGPT2Model"),
("gpt_neo", "FlaxGPTNeoModel"),
("gptj", "FlaxGPTJModel"),
("longt5", "FlaxLongT5Model"),
("marian", "FlaxMarianModel"),
("mbart", "FlaxMBartModel"),
("mt5", "FlaxMT5Model"),
("opt", "FlaxOPTModel"),
("pegasus", "FlaxPegasusModel"),
("regnet", "FlaxRegNetModel"),
("resnet", "FlaxResNetModel"),
("roberta", "FlaxRobertaModel"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormModel"),
("roformer", "FlaxRoFormerModel"),
("t5", "FlaxT5Model"),
("vision-text-dual-encoder", "FlaxVisionTextDualEncoderModel"),
("vit", "FlaxViTModel"),
("wav2vec2", "FlaxWav2Vec2Model"),
("whisper", "FlaxWhisperModel"),
("xglm", "FlaxXGLMModel"),
("xlm-roberta", "FlaxXLMRobertaModel"),
]
)
__A = OrderedDict(
[
# Model for pre-training mapping
("albert", "FlaxAlbertForPreTraining"),
("bart", "FlaxBartForConditionalGeneration"),
("bert", "FlaxBertForPreTraining"),
("big_bird", "FlaxBigBirdForPreTraining"),
("electra", "FlaxElectraForPreTraining"),
("longt5", "FlaxLongT5ForConditionalGeneration"),
("mbart", "FlaxMBartForConditionalGeneration"),
("mt5", "FlaxMT5ForConditionalGeneration"),
("roberta", "FlaxRobertaForMaskedLM"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMaskedLM"),
("roformer", "FlaxRoFormerForMaskedLM"),
("t5", "FlaxT5ForConditionalGeneration"),
("wav2vec2", "FlaxWav2Vec2ForPreTraining"),
("whisper", "FlaxWhisperForConditionalGeneration"),
("xlm-roberta", "FlaxXLMRobertaForMaskedLM"),
]
)
__A = OrderedDict(
[
# Model for Masked LM mapping
("albert", "FlaxAlbertForMaskedLM"),
("bart", "FlaxBartForConditionalGeneration"),
("bert", "FlaxBertForMaskedLM"),
("big_bird", "FlaxBigBirdForMaskedLM"),
("distilbert", "FlaxDistilBertForMaskedLM"),
("electra", "FlaxElectraForMaskedLM"),
("mbart", "FlaxMBartForConditionalGeneration"),
("roberta", "FlaxRobertaForMaskedLM"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMaskedLM"),
("roformer", "FlaxRoFormerForMaskedLM"),
("xlm-roberta", "FlaxXLMRobertaForMaskedLM"),
]
)
__A = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
("bart", "FlaxBartForConditionalGeneration"),
("blenderbot", "FlaxBlenderbotForConditionalGeneration"),
("blenderbot-small", "FlaxBlenderbotSmallForConditionalGeneration"),
("encoder-decoder", "FlaxEncoderDecoderModel"),
("longt5", "FlaxLongT5ForConditionalGeneration"),
("marian", "FlaxMarianMTModel"),
("mbart", "FlaxMBartForConditionalGeneration"),
("mt5", "FlaxMT5ForConditionalGeneration"),
("pegasus", "FlaxPegasusForConditionalGeneration"),
("t5", "FlaxT5ForConditionalGeneration"),
]
)
__A = OrderedDict(
[
# Model for Image-classsification
("beit", "FlaxBeitForImageClassification"),
("regnet", "FlaxRegNetForImageClassification"),
("resnet", "FlaxResNetForImageClassification"),
("vit", "FlaxViTForImageClassification"),
]
)
__A = OrderedDict(
[
("vision-encoder-decoder", "FlaxVisionEncoderDecoderModel"),
]
)
__A = OrderedDict(
[
# Model for Causal LM mapping
("bart", "FlaxBartForCausalLM"),
("bert", "FlaxBertForCausalLM"),
("big_bird", "FlaxBigBirdForCausalLM"),
("electra", "FlaxElectraForCausalLM"),
("gpt-sw3", "FlaxGPT2LMHeadModel"),
("gpt2", "FlaxGPT2LMHeadModel"),
("gpt_neo", "FlaxGPTNeoForCausalLM"),
("gptj", "FlaxGPTJForCausalLM"),
("opt", "FlaxOPTForCausalLM"),
("roberta", "FlaxRobertaForCausalLM"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForCausalLM"),
("xglm", "FlaxXGLMForCausalLM"),
("xlm-roberta", "FlaxXLMRobertaForCausalLM"),
]
)
__A = OrderedDict(
[
# Model for Sequence Classification mapping
("albert", "FlaxAlbertForSequenceClassification"),
("bart", "FlaxBartForSequenceClassification"),
("bert", "FlaxBertForSequenceClassification"),
("big_bird", "FlaxBigBirdForSequenceClassification"),
("distilbert", "FlaxDistilBertForSequenceClassification"),
("electra", "FlaxElectraForSequenceClassification"),
("mbart", "FlaxMBartForSequenceClassification"),
("roberta", "FlaxRobertaForSequenceClassification"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForSequenceClassification"),
("roformer", "FlaxRoFormerForSequenceClassification"),
("xlm-roberta", "FlaxXLMRobertaForSequenceClassification"),
]
)
__A = OrderedDict(
[
# Model for Question Answering mapping
("albert", "FlaxAlbertForQuestionAnswering"),
("bart", "FlaxBartForQuestionAnswering"),
("bert", "FlaxBertForQuestionAnswering"),
("big_bird", "FlaxBigBirdForQuestionAnswering"),
("distilbert", "FlaxDistilBertForQuestionAnswering"),
("electra", "FlaxElectraForQuestionAnswering"),
("mbart", "FlaxMBartForQuestionAnswering"),
("roberta", "FlaxRobertaForQuestionAnswering"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForQuestionAnswering"),
("roformer", "FlaxRoFormerForQuestionAnswering"),
("xlm-roberta", "FlaxXLMRobertaForQuestionAnswering"),
]
)
__A = OrderedDict(
[
# Model for Token Classification mapping
("albert", "FlaxAlbertForTokenClassification"),
("bert", "FlaxBertForTokenClassification"),
("big_bird", "FlaxBigBirdForTokenClassification"),
("distilbert", "FlaxDistilBertForTokenClassification"),
("electra", "FlaxElectraForTokenClassification"),
("roberta", "FlaxRobertaForTokenClassification"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForTokenClassification"),
("roformer", "FlaxRoFormerForTokenClassification"),
("xlm-roberta", "FlaxXLMRobertaForTokenClassification"),
]
)
__A = OrderedDict(
[
# Model for Multiple Choice mapping
("albert", "FlaxAlbertForMultipleChoice"),
("bert", "FlaxBertForMultipleChoice"),
("big_bird", "FlaxBigBirdForMultipleChoice"),
("distilbert", "FlaxDistilBertForMultipleChoice"),
("electra", "FlaxElectraForMultipleChoice"),
("roberta", "FlaxRobertaForMultipleChoice"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMultipleChoice"),
("roformer", "FlaxRoFormerForMultipleChoice"),
("xlm-roberta", "FlaxXLMRobertaForMultipleChoice"),
]
)
__A = OrderedDict(
[
("bert", "FlaxBertForNextSentencePrediction"),
]
)
__A = OrderedDict(
[
("speech-encoder-decoder", "FlaxSpeechEncoderDecoderModel"),
("whisper", "FlaxWhisperForConditionalGeneration"),
]
)
__A = OrderedDict(
[
("whisper", "FlaxWhisperForAudioClassification"),
]
)
__A = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
__A = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
__A = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
__A = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
__A = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
__A = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
__A = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
__A = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
__A = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
__A = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
__A = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
__A = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
__A = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
__A = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class _A ( _BaseAutoModelClass ):
"""simple docstring"""
lowerCamelCase : Union[str, Any] = FLAX_MODEL_MAPPING
__A = auto_class_update(FlaxAutoModel)
class _A ( _BaseAutoModelClass ):
"""simple docstring"""
lowerCamelCase : Optional[Any] = FLAX_MODEL_FOR_PRETRAINING_MAPPING
__A = auto_class_update(FlaxAutoModelForPreTraining, head_doc="pretraining")
class _A ( _BaseAutoModelClass ):
"""simple docstring"""
lowerCamelCase : int = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
__A = auto_class_update(FlaxAutoModelForCausalLM, head_doc="causal language modeling")
class _A ( _BaseAutoModelClass ):
"""simple docstring"""
lowerCamelCase : Dict = FLAX_MODEL_FOR_MASKED_LM_MAPPING
__A = auto_class_update(FlaxAutoModelForMaskedLM, head_doc="masked language modeling")
class _A ( _BaseAutoModelClass ):
"""simple docstring"""
lowerCamelCase : Union[str, Any] = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
__A = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc="sequence-to-sequence language modeling", checkpoint_for_example="t5-base"
)
class _A ( _BaseAutoModelClass ):
"""simple docstring"""
lowerCamelCase : Optional[int] = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
__A = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc="sequence classification"
)
class _A ( _BaseAutoModelClass ):
"""simple docstring"""
lowerCamelCase : Any = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
__A = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc="question answering")
class _A ( _BaseAutoModelClass ):
"""simple docstring"""
lowerCamelCase : List[str] = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
__A = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc="token classification"
)
class _A ( _BaseAutoModelClass ):
"""simple docstring"""
lowerCamelCase : List[Any] = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
__A = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc="multiple choice")
class _A ( _BaseAutoModelClass ):
"""simple docstring"""
lowerCamelCase : Dict = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
__A = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc="next sentence prediction"
)
class _A ( _BaseAutoModelClass ):
"""simple docstring"""
lowerCamelCase : Tuple = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
__A = auto_class_update(
FlaxAutoModelForImageClassification, head_doc="image classification"
)
class _A ( _BaseAutoModelClass ):
"""simple docstring"""
lowerCamelCase : Optional[Any] = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
__A = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc="vision-to-text modeling")
class _A ( _BaseAutoModelClass ):
"""simple docstring"""
lowerCamelCase : List[str] = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
__A = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc="sequence-to-sequence speech-to-text modeling"
)
| 68
|
import argparse
import math
import os
from copy import deepcopy
import torch
from audio_diffusion.models import DiffusionAttnUnetaD
from diffusion import sampling
from torch import nn
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
A = {
'gwf-440k': {
'url': 'https://model-server.zqevans2.workers.dev/gwf-440k.ckpt',
'sample_rate': 4_8_0_0_0,
'sample_size': 6_5_5_3_6,
},
'jmann-small-190k': {
'url': 'https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt',
'sample_rate': 4_8_0_0_0,
'sample_size': 6_5_5_3_6,
},
'jmann-large-580k': {
'url': 'https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt',
'sample_rate': 4_8_0_0_0,
'sample_size': 1_3_1_0_7_2,
},
'maestro-uncond-150k': {
'url': 'https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt',
'sample_rate': 1_6_0_0_0,
'sample_size': 6_5_5_3_6,
},
'unlocked-uncond-250k': {
'url': 'https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt',
'sample_rate': 1_6_0_0_0,
'sample_size': 6_5_5_3_6,
},
'honk-140k': {
'url': 'https://model-server.zqevans2.workers.dev/honk-140k.ckpt',
'sample_rate': 1_6_0_0_0,
'sample_size': 6_5_5_3_6,
},
}
def lowerCamelCase ( UpperCamelCase : Optional[int] , UpperCamelCase : Union[str, Any] ) -> Optional[Any]:
return torch.atana(UpperCamelCase , UpperCamelCase ) / math.pi * 2
def lowerCamelCase ( UpperCamelCase : str ) -> Union[str, Any]:
_lowerCamelCase = torch.sin(t * math.pi / 2 ) ** 2
_lowerCamelCase = (1 - sigma**2) ** 0.5
return alpha_sigma_to_t(UpperCamelCase , UpperCamelCase )
class lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
pass
class lowerCAmelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , snake_case__ : Any ) -> Optional[Any]:
super().__init__()
_lowerCamelCase = DiffusionAttnUnetaD(snake_case__ , n_attn_layers=4 )
_lowerCamelCase = deepcopy(self.diffusion )
_lowerCamelCase = torch.quasirandom.SobolEngine(1 , scramble=snake_case__ )
def lowerCamelCase ( UpperCamelCase : List[Any] ) -> List[str]:
_lowerCamelCase = MODELS_MAP[model_name]['url']
os.system(F"""wget {url} ./""" )
return F"""./{model_name}.ckpt"""
A = {
'1': 'resnets.0',
'2': 'attentions.0',
'3': 'resnets.1',
'4': 'attentions.1',
'5': 'resnets.2',
'6': 'attentions.2',
}
A = {
'8': 'resnets.0',
'9': 'attentions.0',
'10': 'resnets.1',
'11': 'attentions.1',
'12': 'resnets.2',
'13': 'attentions.2',
}
A = {
'1': 'resnets.0',
'2': 'attentions.0',
'3': 'resnets.1',
'4': 'attentions.1',
'5': 'resnets.2',
'6': 'attentions.2',
'8': 'resnets.3',
'9': 'attentions.3',
'10': 'resnets.4',
'11': 'attentions.4',
'12': 'resnets.5',
'13': 'attentions.5',
}
A = {
'0': 'resnets.0',
'1': 'resnets.1',
'2': 'resnets.2',
'4': 'resnets.0',
'5': 'resnets.1',
'6': 'resnets.2',
}
A = {
'skip': 'conv_skip',
'main.0': 'conv_1',
'main.1': 'group_norm_1',
'main.3': 'conv_2',
'main.4': 'group_norm_2',
}
A = {
'norm': 'group_norm',
'qkv_proj': ['query', 'key', 'value'],
'out_proj': ['proj_attn'],
}
def lowerCamelCase ( UpperCamelCase : Tuple ) -> int:
if name.startswith('skip' ):
return name.replace('skip' , RES_CONV_MAP['skip'] )
# name has to be of format main.{digit}
if not name.startswith('main.' ):
raise ValueError(F"""ResConvBlock error with {name}""" )
return name.replace(name[:6] , RES_CONV_MAP[name[:6]] )
def lowerCamelCase ( UpperCamelCase : Optional[Any] ) -> Tuple:
for key, value in ATTN_MAP.items():
if name.startswith(UpperCamelCase ) and not isinstance(UpperCamelCase , UpperCamelCase ):
return name.replace(UpperCamelCase , UpperCamelCase )
elif name.startswith(UpperCamelCase ):
return [name.replace(UpperCamelCase , UpperCamelCase ) for v in value]
raise ValueError(F"""Attn error with {name}""" )
def lowerCamelCase ( UpperCamelCase : Any , UpperCamelCase : int=13 ) -> Optional[int]:
_lowerCamelCase = input_string
if string.split('.' )[0] == "timestep_embed":
return string.replace('timestep_embed' , 'time_proj' )
_lowerCamelCase = 0
if string.startswith('net.3.' ):
depth += 1
_lowerCamelCase = string[6:]
elif string.startswith('net.' ):
_lowerCamelCase = string[4:]
while string.startswith('main.7.' ):
depth += 1
_lowerCamelCase = string[7:]
if string.startswith('main.' ):
_lowerCamelCase = string[5:]
# mid block
if string[:2].isdigit():
_lowerCamelCase = string[:2]
_lowerCamelCase = string[2:]
else:
_lowerCamelCase = string[0]
_lowerCamelCase = string[1:]
if depth == max_depth:
_lowerCamelCase = MID_NUM_TO_LAYER[layer_num]
_lowerCamelCase = 'mid_block'
elif depth > 0 and int(UpperCamelCase ) < 7:
_lowerCamelCase = DOWN_NUM_TO_LAYER[layer_num]
_lowerCamelCase = F"""down_blocks.{depth}"""
elif depth > 0 and int(UpperCamelCase ) > 7:
_lowerCamelCase = UP_NUM_TO_LAYER[layer_num]
_lowerCamelCase = F"""up_blocks.{max_depth - depth - 1}"""
elif depth == 0:
_lowerCamelCase = DEPTH_0_TO_LAYER[layer_num]
_lowerCamelCase = F"""up_blocks.{max_depth - 1}""" if int(UpperCamelCase ) > 3 else 'down_blocks.0'
if not string_left.startswith('.' ):
raise ValueError(F"""Naming error with {input_string} and string_left: {string_left}.""" )
_lowerCamelCase = string_left[1:]
if "resnets" in new_layer:
_lowerCamelCase = convert_resconv_naming(UpperCamelCase )
elif "attentions" in new_layer:
_lowerCamelCase = convert_attn_naming(UpperCamelCase )
_lowerCamelCase = new_string_left
if not isinstance(UpperCamelCase , UpperCamelCase ):
_lowerCamelCase = prefix + '.' + new_layer + '.' + string_left
else:
_lowerCamelCase = [prefix + '.' + new_layer + '.' + s for s in string_left]
return new_string
def lowerCamelCase ( UpperCamelCase : List[Any] ) -> int:
_lowerCamelCase = {}
for k, v in state_dict.items():
if k.endswith('kernel' ):
# up- and downsample layers, don't have trainable weights
continue
_lowerCamelCase = rename(UpperCamelCase )
# check if we need to transform from Conv => Linear for attention
if isinstance(UpperCamelCase , UpperCamelCase ):
_lowerCamelCase = transform_conv_attns(UpperCamelCase , UpperCamelCase , UpperCamelCase )
else:
_lowerCamelCase = v
return new_state_dict
def lowerCamelCase ( UpperCamelCase : List[str] , UpperCamelCase : Tuple , UpperCamelCase : Dict ) -> Optional[Any]:
if len(UpperCamelCase ) == 1:
if len(v.shape ) == 3:
# weight
_lowerCamelCase = v[:, :, 0]
else:
# bias
_lowerCamelCase = v
else:
# qkv matrices
_lowerCamelCase = v.shape[0]
_lowerCamelCase = trippled_shape // 3
for i in range(3 ):
if len(v.shape ) == 3:
_lowerCamelCase = v[i * single_shape : (i + 1) * single_shape, :, 0]
else:
_lowerCamelCase = v[i * single_shape : (i + 1) * single_shape]
return new_state_dict
def lowerCamelCase ( UpperCamelCase : Any ) -> Optional[Any]:
_lowerCamelCase = torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
_lowerCamelCase = args.model_path.split('/' )[-1].split('.' )[0]
if not os.path.isfile(args.model_path ):
assert (
model_name == args.model_path
), F"""Make sure to provide one of the official model names {MODELS_MAP.keys()}"""
_lowerCamelCase = download(UpperCamelCase )
_lowerCamelCase = MODELS_MAP[model_name]['sample_rate']
_lowerCamelCase = MODELS_MAP[model_name]['sample_size']
_lowerCamelCase = Object()
_lowerCamelCase = sample_size
_lowerCamelCase = sample_rate
_lowerCamelCase = 0
_lowerCamelCase = UNetaDModel(sample_size=UpperCamelCase , sample_rate=UpperCamelCase )
_lowerCamelCase = diffusers_model.state_dict()
_lowerCamelCase = DiffusionUncond(UpperCamelCase )
orig_model.load_state_dict(torch.load(args.model_path , map_location=UpperCamelCase )['state_dict'] )
_lowerCamelCase = orig_model.diffusion_ema.eval()
_lowerCamelCase = orig_model.state_dict()
_lowerCamelCase = rename_orig_weights(UpperCamelCase )
_lowerCamelCase = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() )
_lowerCamelCase = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() )
assert len(UpperCamelCase ) == 0, F"""Problem with {renamed_minus_diffusers}"""
assert all(k.endswith('kernel' ) for k in list(UpperCamelCase ) ), F"""Problem with {diffusers_minus_renamed}"""
for key, value in renamed_state_dict.items():
assert (
diffusers_state_dict[key].squeeze().shape == value.squeeze().shape
), F"""Shape for {key} doesn't match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}"""
if key == "time_proj.weight":
_lowerCamelCase = value.squeeze()
_lowerCamelCase = value
diffusers_model.load_state_dict(UpperCamelCase )
_lowerCamelCase = 1_00
_lowerCamelCase = 33
_lowerCamelCase = IPNDMScheduler(num_train_timesteps=UpperCamelCase )
_lowerCamelCase = torch.manual_seed(UpperCamelCase )
_lowerCamelCase = torch.randn([1, 2, config.sample_size] , generator=UpperCamelCase ).to(UpperCamelCase )
_lowerCamelCase = torch.linspace(1 , 0 , steps + 1 , device=UpperCamelCase )[:-1]
_lowerCamelCase = get_crash_schedule(UpperCamelCase )
_lowerCamelCase = DanceDiffusionPipeline(unet=UpperCamelCase , scheduler=UpperCamelCase )
_lowerCamelCase = torch.manual_seed(33 )
_lowerCamelCase = pipe(num_inference_steps=UpperCamelCase , generator=UpperCamelCase ).audios
_lowerCamelCase = sampling.iplms_sample(UpperCamelCase , UpperCamelCase , UpperCamelCase , {} )
_lowerCamelCase = generated.clamp(-1 , 1 )
_lowerCamelCase = (generated - audio).abs().sum()
_lowerCamelCase = (generated - audio).abs().max()
if args.save:
pipe.save_pretrained(args.checkpoint_path )
print('Diff sum' , UpperCamelCase )
print('Diff max' , UpperCamelCase )
assert diff_max < 1e-3, F"""Diff max: {diff_max} is too much :-/"""
print(F"""Conversion for {model_name} successful!""" )
if __name__ == "__main__":
A = argparse.ArgumentParser()
parser.add_argument('--model_path', default=None, type=str, required=True, help='Path to the model to convert.')
parser.add_argument(
'--save', default=True, type=bool, required=False, help='Whether to save the converted model or not.'
)
parser.add_argument('--checkpoint_path', default=None, type=str, required=True, help='Path to the output model.')
A = parser.parse_args()
main(args)
| 544
| 0
|
'''simple docstring'''
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotSmallConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
SCREAMING_SNAKE_CASE_ = 'platform'
import jax
import jax.numpy as jnp
from transformers.models.blenderbot_small.modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
shift_tokens_right,
)
def UpperCamelCase__ ( _lowercase : Optional[Any] , _lowercase : Union[str, Any] , _lowercase : Optional[Any]=None , _lowercase : List[str]=None , _lowercase : int=None , _lowercase : Optional[int]=None , _lowercase : Dict=None , _lowercase : Optional[Any]=None , ) -> Tuple:
if attention_mask is None:
__UpperCAmelCase: Optional[Any] = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
__UpperCAmelCase: int = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
__UpperCAmelCase: Union[str, Any] = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__UpperCAmelCase: Optional[Any] = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
__UpperCAmelCase: int = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class a :
"""simple docstring"""
def __init__( self , snake_case_ , snake_case_=13 , snake_case_=7 , snake_case_=True , snake_case_=False , snake_case_=99 , snake_case_=16 , snake_case_=2 , snake_case_=4 , snake_case_=4 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=32 , snake_case_=2 , snake_case_=1 , snake_case_=0 , snake_case_=0.0_2 , ):
'''simple docstring'''
__UpperCAmelCase: int = parent
__UpperCAmelCase: int = batch_size
__UpperCAmelCase: Union[str, Any] = seq_length
__UpperCAmelCase: Tuple = is_training
__UpperCAmelCase: Union[str, Any] = use_labels
__UpperCAmelCase: Any = vocab_size
__UpperCAmelCase: Tuple = hidden_size
__UpperCAmelCase: int = num_hidden_layers
__UpperCAmelCase: int = num_attention_heads
__UpperCAmelCase: Tuple = intermediate_size
__UpperCAmelCase: Optional[Any] = hidden_act
__UpperCAmelCase: Tuple = hidden_dropout_prob
__UpperCAmelCase: Dict = attention_probs_dropout_prob
__UpperCAmelCase: Optional[int] = max_position_embeddings
__UpperCAmelCase: Optional[int] = eos_token_id
__UpperCAmelCase: Optional[int] = pad_token_id
__UpperCAmelCase: Dict = bos_token_id
__UpperCAmelCase: List[Any] = initializer_range
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: Union[str, Any] = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
__UpperCAmelCase: str = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
__UpperCAmelCase: Dict = shift_tokens_right(snake_case_ , 1 , 2 )
__UpperCAmelCase: List[str] = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=snake_case_ , )
__UpperCAmelCase: Dict = prepare_blenderbot_inputs_dict(snake_case_ , snake_case_ , snake_case_ )
return config, inputs_dict
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase, __UpperCAmelCase: int = self.prepare_config_and_inputs()
return config, inputs_dict
def lowercase_ ( self , snake_case_ , snake_case_ , snake_case_ ):
'''simple docstring'''
__UpperCAmelCase: Any = 20
__UpperCAmelCase: List[str] = model_class_name(snake_case_ )
__UpperCAmelCase: int = model.encode(inputs_dict["""input_ids"""] )
__UpperCAmelCase, __UpperCAmelCase: Any = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
__UpperCAmelCase: Any = model.init_cache(decoder_input_ids.shape[0] , snake_case_ , snake_case_ )
__UpperCAmelCase: Optional[int] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="""i4""" )
__UpperCAmelCase: str = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__UpperCAmelCase: Dict = model.decode(
decoder_input_ids[:, :-1] , snake_case_ , decoder_attention_mask=snake_case_ , past_key_values=snake_case_ , decoder_position_ids=snake_case_ , )
__UpperCAmelCase: Optional[int] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" )
__UpperCAmelCase: Any = model.decode(
decoder_input_ids[:, -1:] , snake_case_ , decoder_attention_mask=snake_case_ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=snake_case_ , )
__UpperCAmelCase: Tuple = model.decode(snake_case_ , snake_case_ )
__UpperCAmelCase: Optional[Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=F'''Max diff is {diff}''' )
def lowercase_ ( self , snake_case_ , snake_case_ , snake_case_ ):
'''simple docstring'''
__UpperCAmelCase: str = 20
__UpperCAmelCase: Optional[int] = model_class_name(snake_case_ )
__UpperCAmelCase: Optional[Any] = model.encode(inputs_dict["""input_ids"""] )
__UpperCAmelCase, __UpperCAmelCase: Optional[int] = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
__UpperCAmelCase: List[str] = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
__UpperCAmelCase: List[Any] = model.init_cache(decoder_input_ids.shape[0] , snake_case_ , snake_case_ )
__UpperCAmelCase: List[str] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__UpperCAmelCase: List[str] = model.decode(
decoder_input_ids[:, :-1] , snake_case_ , decoder_attention_mask=snake_case_ , past_key_values=snake_case_ , decoder_position_ids=snake_case_ , )
__UpperCAmelCase: Optional[Any] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" )
__UpperCAmelCase: Any = model.decode(
decoder_input_ids[:, -1:] , snake_case_ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=snake_case_ , decoder_position_ids=snake_case_ , )
__UpperCAmelCase: Tuple = model.decode(snake_case_ , snake_case_ , decoder_attention_mask=snake_case_ )
__UpperCAmelCase: Optional[Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=F'''Max diff is {diff}''' )
@require_flax
class a ( unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase = 9_9
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: Union[str, Any] = np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] , dtype=np.intaa , )
__UpperCAmelCase: int = input_ids.shape[0]
__UpperCAmelCase: Optional[int] = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase: str = self._get_config_and_data()
__UpperCAmelCase: Union[str, Any] = FlaxBlenderbotSmallForConditionalGeneration(snake_case_ )
__UpperCAmelCase: Union[str, Any] = lm_model(input_ids=snake_case_ )
__UpperCAmelCase: List[Any] = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs["""logits"""].shape , snake_case_ )
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: Any = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , )
__UpperCAmelCase: str = FlaxBlenderbotSmallForConditionalGeneration(snake_case_ )
__UpperCAmelCase: Dict = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa )
__UpperCAmelCase: int = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa )
__UpperCAmelCase: List[Any] = lm_model(input_ids=snake_case_ , decoder_input_ids=snake_case_ )
__UpperCAmelCase: str = (*summary.shape, config.vocab_size)
self.assertEqual(outputs["""logits"""].shape , snake_case_ )
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: str = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa )
__UpperCAmelCase: Tuple = shift_tokens_right(snake_case_ , 1 , 2 )
__UpperCAmelCase: Optional[Any] = np.equal(snake_case_ , 1 ).astype(np.floataa ).sum()
__UpperCAmelCase: Tuple = np.equal(snake_case_ , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(snake_case_ , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class a ( __lowerCAmelCase , unittest.TestCase , __lowerCAmelCase ):
"""simple docstring"""
__lowerCAmelCase = True
__lowerCAmelCase = (
(
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallForConditionalGeneration,
)
if is_flax_available()
else ()
)
__lowerCAmelCase = (FlaxBlenderbotSmallForConditionalGeneration,) if is_flax_available() else ()
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: Optional[int] = FlaxBlenderbotSmallModelTester(self )
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase, __UpperCAmelCase: int = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(snake_case_ , snake_case_ , snake_case_ )
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase, __UpperCAmelCase: Union[str, Any] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(snake_case_ , snake_case_ , snake_case_ )
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase, __UpperCAmelCase: Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__UpperCAmelCase: Dict = self._prepare_for_class(snake_case_ , snake_case_ )
__UpperCAmelCase: Dict = model_class(snake_case_ )
@jax.jit
def encode_jitted(snake_case_ , snake_case_=None , **snake_case_ ):
return model.encode(input_ids=snake_case_ , attention_mask=snake_case_ )
with self.subTest("""JIT Enabled""" ):
__UpperCAmelCase: Optional[int] = encode_jitted(**snake_case_ ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
__UpperCAmelCase: Optional[int] = encode_jitted(**snake_case_ ).to_tuple()
self.assertEqual(len(snake_case_ ) , len(snake_case_ ) )
for jitted_output, output in zip(snake_case_ , snake_case_ ):
self.assertEqual(jitted_output.shape , output.shape )
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase, __UpperCAmelCase: Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__UpperCAmelCase: Optional[int] = model_class(snake_case_ )
__UpperCAmelCase: int = model.encode(inputs_dict["""input_ids"""] , inputs_dict["""attention_mask"""] )
__UpperCAmelCase: Optional[Any] = {
"""decoder_input_ids""": inputs_dict["""decoder_input_ids"""],
"""decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""],
"""encoder_outputs""": encoder_outputs,
}
@jax.jit
def decode_jitted(snake_case_ , snake_case_ , snake_case_ ):
return model.decode(
decoder_input_ids=snake_case_ , decoder_attention_mask=snake_case_ , encoder_outputs=snake_case_ , )
with self.subTest("""JIT Enabled""" ):
__UpperCAmelCase: List[str] = decode_jitted(**snake_case_ ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
__UpperCAmelCase: Tuple = decode_jitted(**snake_case_ ).to_tuple()
self.assertEqual(len(snake_case_ ) , len(snake_case_ ) )
for jitted_output, output in zip(snake_case_ , snake_case_ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def lowercase_ ( self ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
__UpperCAmelCase: List[Any] = model_class_name.from_pretrained("""facebook/blenderbot_small-90M""" )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
__UpperCAmelCase: Any = np.ones((1, 1) ) * model.config.eos_token_id
__UpperCAmelCase: List[str] = model(snake_case_ )
self.assertIsNotNone(snake_case_ )
| 466
|
'''simple docstring'''
from __future__ import annotations
SCREAMING_SNAKE_CASE_ = 10
def UpperCamelCase__ ( _lowercase : list[int] ) -> list[int]:
__UpperCAmelCase: Union[str, Any] = 1
__UpperCAmelCase: Optional[Any] = max(_lowercase )
while placement <= max_digit:
# declare and initialize empty buckets
__UpperCAmelCase: list[list] = [[] for _ in range(_lowercase )]
# split list_of_ints between the buckets
for i in list_of_ints:
__UpperCAmelCase: Optional[Any] = int((i / placement) % RADIX )
buckets[tmp].append(_lowercase )
# put each buckets' contents into list_of_ints
__UpperCAmelCase: Optional[int] = 0
for b in range(_lowercase ):
for i in buckets[b]:
__UpperCAmelCase: str = i
a += 1
# move to next
placement *= RADIX
return list_of_ints
if __name__ == "__main__":
import doctest
doctest.testmod()
| 466
| 1
|
import argparse
from collections import defaultdict
import yaml
A = 'docs/source/en/_toctree.yml'
def lowerCamelCase ( UpperCamelCase : Union[str, Any] ) -> Optional[Any]:
_lowerCamelCase = defaultdict(UpperCamelCase )
for doc in model_doc:
counts[doc["local"]] += 1
_lowerCamelCase = [key for key, value in counts.items() if value > 1]
_lowerCamelCase = []
for duplicate_key in duplicates:
_lowerCamelCase = list({doc['title'] for doc in model_doc if doc['local'] == duplicate_key} )
if len(UpperCamelCase ) > 1:
raise ValueError(
F"""{duplicate_key} is present several times in the documentation table of content at """
'`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '
'others.' )
# Only add this once
new_doc.append({'local': duplicate_key, 'title': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc['local']] == 1] )
# Sort
return sorted(UpperCamelCase , key=lambda UpperCamelCase : s["title"].lower() )
def lowerCamelCase ( UpperCamelCase : List[str]=False ) -> Dict:
with open(UpperCamelCase , encoding='utf-8' ) as f:
_lowerCamelCase = yaml.safe_load(f.read() )
# Get to the API doc
_lowerCamelCase = 0
while content[api_idx]["title"] != "API":
api_idx += 1
_lowerCamelCase = content[api_idx]['sections']
# Then to the model doc
_lowerCamelCase = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
_lowerCamelCase = api_doc[model_idx]['sections']
_lowerCamelCase = [(idx, section) for idx, section in enumerate(UpperCamelCase ) if 'sections' in section]
_lowerCamelCase = False
for idx, modality_doc in modalities_docs:
_lowerCamelCase = modality_doc['sections']
_lowerCamelCase = clean_model_doc_toc(UpperCamelCase )
if old_modality_doc != new_modality_doc:
_lowerCamelCase = True
if overwrite:
_lowerCamelCase = new_modality_doc
if diff:
if overwrite:
_lowerCamelCase = model_doc
_lowerCamelCase = api_doc
with open(UpperCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(yaml.dump(UpperCamelCase , allow_unicode=UpperCamelCase ) )
else:
raise ValueError(
'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' )
if __name__ == "__main__":
A = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
A = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 544
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ,unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ = ShapEImgaImgPipeline
lowerCAmelCase_ = ['image']
lowerCAmelCase_ = ['image']
lowerCAmelCase_ = [
'num_images_per_prompt',
'num_inference_steps',
'generator',
'latents',
'guidance_scale',
'frame_size',
'output_type',
'return_dict',
]
lowerCAmelCase_ = False
@property
def _snake_case ( self : List[str] ) -> Any:
return 3_2
@property
def _snake_case ( self : int ) -> Dict:
return 3_2
@property
def _snake_case ( self : Tuple ) -> Optional[Any]:
return self.time_input_dim * 4
@property
def _snake_case ( self : Tuple ) -> Optional[Any]:
return 8
@property
def _snake_case ( self : int ) -> Dict:
torch.manual_seed(0 )
_lowerCamelCase = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=6_4 , projection_dim=self.text_embedder_hidden_size , intermediate_size=3_7 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , )
_lowerCamelCase = CLIPVisionModel(snake_case__ )
return model
@property
def _snake_case ( self : Tuple ) -> Any:
_lowerCamelCase = CLIPImageProcessor(
crop_size=2_2_4 , do_center_crop=snake_case__ , do_normalize=snake_case__ , do_resize=snake_case__ , image_mean=[0.4814_5466, 0.457_8275, 0.4082_1073] , image_std=[0.2686_2954, 0.2613_0258, 0.2757_7711] , resample=3 , size=2_2_4 , )
return image_processor
@property
def _snake_case ( self : List[str] ) -> Any:
torch.manual_seed(0 )
_lowerCamelCase = {
'num_attention_heads': 2,
'attention_head_dim': 1_6,
'embedding_dim': self.time_input_dim,
'num_embeddings': 3_2,
'embedding_proj_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'num_layers': 1,
'clip_embed_dim': self.time_input_dim * 2,
'additional_embeddings': 0,
'time_embed_act_fn': 'gelu',
'norm_in_type': 'layer',
'embedding_proj_norm_type': 'layer',
'encoder_hid_proj_type': None,
'added_emb_type': None,
}
_lowerCamelCase = PriorTransformer(**snake_case__ )
return model
@property
def _snake_case ( self : List[Any] ) -> Optional[int]:
torch.manual_seed(0 )
_lowerCamelCase = {
'param_shapes': (
(self.renderer_dim, 9_3),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'd_latent': self.time_input_dim,
'd_hidden': self.renderer_dim,
'n_output': 1_2,
'background': (
0.1,
0.1,
0.1,
),
}
_lowerCamelCase = ShapERenderer(**snake_case__ )
return model
def _snake_case ( self : Any ) -> str:
_lowerCamelCase = self.dummy_prior
_lowerCamelCase = self.dummy_image_encoder
_lowerCamelCase = self.dummy_image_processor
_lowerCamelCase = self.dummy_renderer
_lowerCamelCase = HeunDiscreteScheduler(
beta_schedule='exp' , num_train_timesteps=1_0_2_4 , prediction_type='sample' , use_karras_sigmas=snake_case__ , clip_sample=snake_case__ , clip_sample_range=1.0 , )
_lowerCamelCase = {
'prior': prior,
'image_encoder': image_encoder,
'image_processor': image_processor,
'renderer': renderer,
'scheduler': scheduler,
}
return components
def _snake_case ( self : List[str] , snake_case__ : str , snake_case__ : Optional[Any]=0 ) -> Union[str, Any]:
_lowerCamelCase = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(snake_case__ ) ).to(snake_case__ )
if str(snake_case__ ).startswith('mps' ):
_lowerCamelCase = torch.manual_seed(snake_case__ )
else:
_lowerCamelCase = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
_lowerCamelCase = {
'image': input_image,
'generator': generator,
'num_inference_steps': 1,
'frame_size': 3_2,
'output_type': 'np',
}
return inputs
def _snake_case ( self : Union[str, Any] ) -> Union[str, Any]:
_lowerCamelCase = 'cpu'
_lowerCamelCase = self.get_dummy_components()
_lowerCamelCase = self.pipeline_class(**snake_case__ )
_lowerCamelCase = pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
_lowerCamelCase = pipe(**self.get_dummy_inputs(snake_case__ ) )
_lowerCamelCase = output.images[0]
_lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (2_0, 3_2, 3_2, 3)
_lowerCamelCase = np.array(
[
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _snake_case ( self : Dict ) -> Optional[Any]:
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def _snake_case ( self : Optional[Any] ) -> Tuple:
_lowerCamelCase = torch_device == 'cpu'
_lowerCamelCase = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=snake_case__ , relax_max_difference=snake_case__ , )
def _snake_case ( self : Any ) -> Tuple:
_lowerCamelCase = self.get_dummy_components()
_lowerCamelCase = self.pipeline_class(**snake_case__ )
_lowerCamelCase = pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
_lowerCamelCase = 1
_lowerCamelCase = 2
_lowerCamelCase = self.get_dummy_inputs(snake_case__ )
for key in inputs.keys():
if key in self.batch_params:
_lowerCamelCase = batch_size * [inputs[key]]
_lowerCamelCase = pipe(**snake_case__ , num_images_per_prompt=snake_case__ )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self : Union[str, Any] ) -> Optional[int]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self : List[str] ) -> List[Any]:
_lowerCamelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/shap_e/corgi.png' )
_lowerCamelCase = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/shap_e/test_shap_e_img2img_out.npy' )
_lowerCamelCase = ShapEImgaImgPipeline.from_pretrained('openai/shap-e-img2img' )
_lowerCamelCase = pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
_lowerCamelCase = torch.Generator(device=snake_case__ ).manual_seed(0 )
_lowerCamelCase = pipe(
snake_case__ , generator=snake_case__ , guidance_scale=3.0 , num_inference_steps=6_4 , frame_size=6_4 , output_type='np' , ).images[0]
assert images.shape == (2_0, 6_4, 6_4, 3)
assert_mean_pixel_difference(snake_case__ , snake_case__ )
| 544
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A : Any = logging.get_logger(__name__)
__A : Union[str, Any] = {
'weiweishi/roc-bert-base-zh': 'https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json',
}
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ = "roc_bert"
def __init__( self : List[str] , __lowerCamelCase : Any=30522 , __lowerCamelCase : Union[str, Any]=768 , __lowerCamelCase : int=12 , __lowerCamelCase : int=12 , __lowerCamelCase : Any=3072 , __lowerCamelCase : Any="gelu" , __lowerCamelCase : List[str]=0.1 , __lowerCamelCase : Dict=0.1 , __lowerCamelCase : Dict=512 , __lowerCamelCase : List[str]=2 , __lowerCamelCase : List[str]=0.02 , __lowerCamelCase : int=1e-12 , __lowerCamelCase : str=True , __lowerCamelCase : List[Any]=0 , __lowerCamelCase : str="absolute" , __lowerCamelCase : Dict=None , __lowerCamelCase : List[Any]=True , __lowerCamelCase : Tuple=True , __lowerCamelCase : List[str]=768 , __lowerCamelCase : Any=910 , __lowerCamelCase : Union[str, Any]=512 , __lowerCamelCase : Dict=24858 , __lowerCamelCase : Tuple=True , **__lowerCamelCase : Any , ):
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = type_vocab_size
SCREAMING_SNAKE_CASE = layer_norm_eps
SCREAMING_SNAKE_CASE = use_cache
SCREAMING_SNAKE_CASE = enable_pronunciation
SCREAMING_SNAKE_CASE = enable_shape
SCREAMING_SNAKE_CASE = pronunciation_embed_dim
SCREAMING_SNAKE_CASE = pronunciation_vocab_size
SCREAMING_SNAKE_CASE = shape_embed_dim
SCREAMING_SNAKE_CASE = shape_vocab_size
SCREAMING_SNAKE_CASE = concat_input
SCREAMING_SNAKE_CASE = position_embedding_type
SCREAMING_SNAKE_CASE = classifier_dropout
super().__init__(pad_token_id=__lowerCamelCase , **__lowerCamelCase )
| 716
|
import cmath
import math
def __a ( A__ : float , A__ : float , A__ : float , A__ : float ):
SCREAMING_SNAKE_CASE = math.radians(A__ )
SCREAMING_SNAKE_CASE = math.radians(A__ )
# Convert voltage and current to rectangular form
SCREAMING_SNAKE_CASE = cmath.rect(A__ , A__ )
SCREAMING_SNAKE_CASE = cmath.rect(A__ , A__ )
# Calculate apparent power
return voltage_rect * current_rect
if __name__ == "__main__":
import doctest
doctest.testmod()
| 698
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json"""
),
}
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = '''dpr'''
def __init__( self : Tuple , lowerCamelCase_ : Dict=3_05_22 , lowerCamelCase_ : List[Any]=7_68 , lowerCamelCase_ : Optional[int]=12 , lowerCamelCase_ : Optional[int]=12 , lowerCamelCase_ : List[str]=30_72 , lowerCamelCase_ : int="gelu" , lowerCamelCase_ : Optional[Any]=0.1 , lowerCamelCase_ : Optional[int]=0.1 , lowerCamelCase_ : Dict=5_12 , lowerCamelCase_ : List[str]=2 , lowerCamelCase_ : Optional[int]=0.02 , lowerCamelCase_ : Optional[int]=1e-12 , lowerCamelCase_ : Any=0 , lowerCamelCase_ : Union[str, Any]="absolute" , lowerCamelCase_ : int = 0 , **lowerCamelCase_ : str , ):
'''simple docstring'''
super().__init__(pad_token_id=lowerCamelCase_ , **lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = vocab_size
SCREAMING_SNAKE_CASE : Optional[int] = hidden_size
SCREAMING_SNAKE_CASE : Union[str, Any] = num_hidden_layers
SCREAMING_SNAKE_CASE : int = num_attention_heads
SCREAMING_SNAKE_CASE : Tuple = hidden_act
SCREAMING_SNAKE_CASE : Any = intermediate_size
SCREAMING_SNAKE_CASE : List[str] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Optional[int] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Optional[int] = max_position_embeddings
SCREAMING_SNAKE_CASE : int = type_vocab_size
SCREAMING_SNAKE_CASE : Union[str, Any] = initializer_range
SCREAMING_SNAKE_CASE : Tuple = layer_norm_eps
SCREAMING_SNAKE_CASE : Any = projection_dim
SCREAMING_SNAKE_CASE : str = position_embedding_type
| 379
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
"""google/fnet-base""": """https://huggingface.co/google/fnet-base/resolve/main/config.json""",
"""google/fnet-large""": """https://huggingface.co/google/fnet-large/resolve/main/config.json"""
# See all FNet models at https://huggingface.co/models?filter=fnet
}
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = '''fnet'''
def __init__( self : Any , lowerCamelCase_ : List[str]=3_20_00 , lowerCamelCase_ : List[Any]=7_68 , lowerCamelCase_ : Union[str, Any]=12 , lowerCamelCase_ : Optional[Any]=30_72 , lowerCamelCase_ : Optional[Any]="gelu_new" , lowerCamelCase_ : Any=0.1 , lowerCamelCase_ : Union[str, Any]=5_12 , lowerCamelCase_ : Optional[int]=4 , lowerCamelCase_ : Optional[int]=0.02 , lowerCamelCase_ : str=1e-12 , lowerCamelCase_ : Any=False , lowerCamelCase_ : str=5_12 , lowerCamelCase_ : str=3 , lowerCamelCase_ : int=1 , lowerCamelCase_ : Optional[Any]=2 , **lowerCamelCase_ : Optional[int] , ):
'''simple docstring'''
super().__init__(pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , **lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = vocab_size
SCREAMING_SNAKE_CASE : Dict = max_position_embeddings
SCREAMING_SNAKE_CASE : str = hidden_size
SCREAMING_SNAKE_CASE : Optional[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE : Union[str, Any] = intermediate_size
SCREAMING_SNAKE_CASE : int = hidden_act
SCREAMING_SNAKE_CASE : Dict = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Tuple = initializer_range
SCREAMING_SNAKE_CASE : List[Any] = type_vocab_size
SCREAMING_SNAKE_CASE : Any = layer_norm_eps
SCREAMING_SNAKE_CASE : int = use_tpu_fourier_optimizations
SCREAMING_SNAKE_CASE : List[Any] = tpu_short_seq_length
| 379
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_UpperCamelCase : Optional[Any] = {
"configuration_efficientformer": [
"EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"EfficientFormerConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : str = ["EfficientFormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : str = [
"EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"EfficientFormerForImageClassification",
"EfficientFormerForImageClassificationWithTeacher",
"EfficientFormerModel",
"EfficientFormerPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : Union[str, Any] = [
"TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFEfficientFormerForImageClassification",
"TFEfficientFormerForImageClassificationWithTeacher",
"TFEfficientFormerModel",
"TFEfficientFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientformer import EfficientFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientformer import (
EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientFormerForImageClassification,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerModel,
EfficientFormerPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
TFEfficientFormerPreTrainedModel,
)
else:
import sys
_UpperCamelCase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 710
|
'''simple docstring'''
import pytest
from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs
@pytest.mark.parametrize(
'kwargs, expected' , [
({'num_shards': 0, 'max_num_jobs': 1}, []),
({'num_shards': 10, 'max_num_jobs': 1}, [range(10 )]),
({'num_shards': 10, 'max_num_jobs': 10}, [range(snake_case , i + 1 ) for i in range(10 )]),
({'num_shards': 1, 'max_num_jobs': 10}, [range(1 )]),
({'num_shards': 10, 'max_num_jobs': 3}, [range(0 , 4 ), range(4 , 7 ), range(7 , 10 )]),
({'num_shards': 3, 'max_num_jobs': 10}, [range(0 , 1 ), range(1 , 2 ), range(2 , 3 )]),
] , )
def snake_case ( snake_case : Optional[int] , snake_case : str ) -> str:
"""simple docstring"""
lowerCAmelCase = _distribute_shards(**snake_case )
assert out == expected
@pytest.mark.parametrize(
'gen_kwargs, max_num_jobs, expected' , [
({'foo': 0}, 10, [{'foo': 0}]),
({'shards': [0, 1, 2, 3]}, 1, [{'shards': [0, 1, 2, 3]}]),
({'shards': [0, 1, 2, 3]}, 4, [{'shards': [0]}, {'shards': [1]}, {'shards': [2]}, {'shards': [3]}]),
({'shards': [0, 1]}, 4, [{'shards': [0]}, {'shards': [1]}]),
({'shards': [0, 1, 2, 3]}, 2, [{'shards': [0, 1]}, {'shards': [2, 3]}]),
] , )
def snake_case ( snake_case : Optional[int] , snake_case : int , snake_case : Tuple ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase = _split_gen_kwargs(snake_case , snake_case )
assert out == expected
@pytest.mark.parametrize(
'gen_kwargs, expected' , [
({'foo': 0}, 1),
({'shards': [0]}, 1),
({'shards': [0, 1, 2, 3]}, 4),
({'shards': [0, 1, 2, 3], 'foo': 0}, 4),
({'shards': [0, 1, 2, 3], 'other': (0, 1)}, 4),
({'shards': [0, 1, 2, 3], 'shards2': [0, 1]}, RuntimeError),
] , )
def snake_case ( snake_case : Union[str, Any] , snake_case : Optional[int] ) -> int:
"""simple docstring"""
if expected is RuntimeError:
with pytest.raises(snake_case ):
_number_of_shards_in_gen_kwargs(snake_case )
else:
lowerCAmelCase = _number_of_shards_in_gen_kwargs(snake_case )
assert out == expected
| 514
| 0
|
"""simple docstring"""
import argparse
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration
_UpperCamelCase = [
# tf -> hf
('/', '.'),
('layer_', 'layers.'),
('kernel', 'weight'),
('beta', 'bias'),
('gamma', 'weight'),
('pegasus', 'model'),
]
_UpperCamelCase = [
('.output.dense', '.fc2'),
('intermediate.LayerNorm', 'final_layer_norm'),
('intermediate.dense', 'fc1'),
]
_UpperCamelCase = (
INIT_COMMON
+ [
('attention.self.LayerNorm', 'self_attn_layer_norm'),
('attention.output.dense', 'self_attn.out_proj'),
('attention.self', 'self_attn'),
('attention.encdec.LayerNorm', 'encoder_attn_layer_norm'),
('attention.encdec_output.dense', 'encoder_attn.out_proj'),
('attention.encdec', 'encoder_attn'),
('key', 'k_proj'),
('value', 'v_proj'),
('query', 'q_proj'),
('decoder.LayerNorm', 'decoder.layernorm_embedding'),
]
+ END_COMMON
)
_UpperCamelCase = (
INIT_COMMON
+ [
('embeddings.word_embeddings', 'shared.weight'),
('embeddings.position_embeddings', 'embed_positions.weight'),
('attention.self.LayerNorm', 'self_attn_layer_norm'),
('attention.output.dense', 'self_attn.output'),
('attention.self', 'self_attn.self'),
('encoder.LayerNorm', 'encoder.layernorm_embedding'),
]
+ END_COMMON
)
_UpperCamelCase = [
'encdec/key/bias',
'encdec/query/bias',
'encdec/value/bias',
'self/key/bias',
'self/query/bias',
'self/value/bias',
'encdec_output/dense/bias',
'attention/output/dense/bias',
]
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
for tf_name, hf_name in patterns:
__lowerCamelCase : List[str] =k.replace(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return k
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE : dict , SCREAMING_SNAKE_CASE : dict ):
'''simple docstring'''
__lowerCamelCase : int =BigBirdPegasusConfig(**SCREAMING_SNAKE_CASE )
__lowerCamelCase : List[Any] =BigBirdPegasusForConditionalGeneration(SCREAMING_SNAKE_CASE )
__lowerCamelCase : Optional[Any] =torch_model.state_dict()
__lowerCamelCase : List[Any] ={}
# separating decoder weights
__lowerCamelCase : Optional[Any] ={k: tf_weights[k] for k in tf_weights if k.startswith('''pegasus/decoder''' )}
__lowerCamelCase : Any ={k: tf_weights[k] for k in tf_weights if not k.startswith('''pegasus/decoder''' )}
for k, v in tqdm(decoder_weights.items() , '''tf -> hf conversion''' ):
__lowerCamelCase : List[str] =[k.endswith(SCREAMING_SNAKE_CASE ) for ending in KEYS_TO_IGNORE]
if any(SCREAMING_SNAKE_CASE ):
continue
__lowerCamelCase : Union[str, Any] =DECODER_PATTERNS
__lowerCamelCase : Optional[int] =rename_state_dict_key(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if new_k not in state_dict:
raise ValueError(F'could not find new key {new_k} in state dict. (converted from {k})' )
if any(True if i in k else False for i in ['''dense''', '''query''', '''key''', '''value'''] ):
__lowerCamelCase : Dict =v.T
__lowerCamelCase : Any =torch.from_numpy(SCREAMING_SNAKE_CASE )
assert v.shape == state_dict[new_k].shape, F'{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'
for k, v in tqdm(remaining_weights.items() , '''tf -> hf conversion''' ):
__lowerCamelCase : Optional[Any] =[k.endswith(SCREAMING_SNAKE_CASE ) for ending in KEYS_TO_IGNORE]
if any(SCREAMING_SNAKE_CASE ):
continue
__lowerCamelCase : List[Any] =REMAINING_PATTERNS
__lowerCamelCase : Union[str, Any] =rename_state_dict_key(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings":
raise ValueError(F'could not find new key {new_k} in state dict. (converted from {k})' )
if any(True if i in k else False for i in ['''dense''', '''query''', '''key''', '''value'''] ):
__lowerCamelCase : Tuple =v.T
__lowerCamelCase : Any =torch.from_numpy(SCREAMING_SNAKE_CASE )
if k != "pegasus/embeddings/position_embeddings":
assert v.shape == state_dict[new_k].shape, F'{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'
__lowerCamelCase : str =mapping["model.embed_positions.weight"]
__lowerCamelCase : Tuple =mapping.pop('''model.embed_positions.weight''' )
__lowerCamelCase : Dict =torch_model.load_state_dict(SCREAMING_SNAKE_CASE , strict=SCREAMING_SNAKE_CASE )
__lowerCamelCase : List[Any] =[
k
for k in missing
if k
not in [
"final_logits_bias",
"model.encoder.embed_tokens.weight",
"model.decoder.embed_tokens.weight",
"lm_head.weight",
]
]
assert unexpected_missing == [], F'no matches found for the following torch keys {unexpected_missing}'
assert extra == [], F'no matches found for the following tf keys {extra}'
return torch_model
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
__lowerCamelCase : Optional[Any] =tf.train.list_variables(SCREAMING_SNAKE_CASE )
__lowerCamelCase : Tuple ={}
__lowerCamelCase : Tuple =["global_step"]
for name, shape in tqdm(SCREAMING_SNAKE_CASE , desc='''converting tf checkpoint to dict''' ):
__lowerCamelCase : Tuple =any(pat in name for pat in ignore_name )
if skip_key:
continue
__lowerCamelCase : List[str] =tf.train.load_variable(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowerCamelCase : str =array
return tf_weights
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : dict ):
'''simple docstring'''
__lowerCamelCase : Dict =get_tf_weights_as_numpy(SCREAMING_SNAKE_CASE )
__lowerCamelCase : Any =convert_bigbird_pegasus(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
torch_model.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
parser.add_argument('--tf_ckpt_path', type=str, help='passed to tf.train.list_variables')
parser.add_argument('--save_dir', default=None, type=str, help='Path to the output PyTorch model.')
_UpperCamelCase = parser.parse_args()
_UpperCamelCase = {}
convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
| 179
|
'''simple docstring'''
def A__ ( A : Any): # noqa: E741
'''simple docstring'''
UpperCamelCase : List[Any] = len(A)
UpperCamelCase : Any = 0
UpperCamelCase : Optional[Any] = [0] * n
UpperCamelCase : Union[str, Any] = [False] * n
UpperCamelCase : Dict = [False] * n
def dfs(A : Optional[int] , A : Dict , A : List[Any] , A : int):
if parent == root:
out_edge_count += 1
UpperCamelCase : Union[str, Any] = True
UpperCamelCase : Any = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
UpperCamelCase : int = dfs(A , A , A , A)
UpperCamelCase : Any = min(low[at] , low[to])
# AP found via bridge
if at < low[to]:
UpperCamelCase : int = True
# AP found via cycle
if at == low[to]:
UpperCamelCase : Any = True
else:
UpperCamelCase : List[Any] = min(low[at] , A)
return out_edge_count
for i in range(A):
if not visited[i]:
UpperCamelCase : Union[str, Any] = 0
UpperCamelCase : List[Any] = dfs(A , A , -1 , A)
UpperCamelCase : Dict = out_edge_count > 1
for x in range(len(A)):
if is_art[x] is True:
print(A)
# Adjacency list of graph
lowerCAmelCase_ = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data)
| 173
| 0
|
import functools
def __lowerCAmelCase ( A_ : str , A_ : int ) -> int:
if not isinstance(a__ , a__ ) or not all(isinstance(a__ , a__ ) for day in days ):
raise ValueError("The parameter days should be a list of integers" )
if len(a__ ) != 3 or not all(isinstance(a__ , a__ ) for cost in costs ):
raise ValueError("The parameter costs should be a list of three integers" )
if len(a__ ) == 0:
return 0
if min(a__ ) <= 0:
raise ValueError("All days elements should be greater than 0" )
if max(a__ ) >= 3_66:
raise ValueError("All days elements should be less than 366" )
__UpperCAmelCase = set(a__ )
@functools.cache
def dynamic_programming(A_ : Tuple ) -> int:
if index > 3_65:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 716
|
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation
def __lowerCAmelCase ( A_ : Tuple ) -> Any:
__UpperCAmelCase = 3_84
__UpperCAmelCase = 7
if "tiny" in model_name:
__UpperCAmelCase = 96
__UpperCAmelCase = (2, 2, 6, 2)
__UpperCAmelCase = (3, 6, 12, 24)
elif "small" in model_name:
__UpperCAmelCase = 96
__UpperCAmelCase = (2, 2, 18, 2)
__UpperCAmelCase = (3, 6, 12, 24)
elif "base" in model_name:
__UpperCAmelCase = 1_28
__UpperCAmelCase = (2, 2, 18, 2)
__UpperCAmelCase = (4, 8, 16, 32)
__UpperCAmelCase = 12
__UpperCAmelCase = 5_12
elif "large" in model_name:
__UpperCAmelCase = 1_92
__UpperCAmelCase = (2, 2, 18, 2)
__UpperCAmelCase = (6, 12, 24, 48)
__UpperCAmelCase = 12
__UpperCAmelCase = 7_68
# set label information
__UpperCAmelCase = 1_50
__UpperCAmelCase = "huggingface/label-files"
__UpperCAmelCase = "ade20k-id2label.json"
__UpperCAmelCase = json.load(open(hf_hub_download(A_ , A_ , repo_type="dataset" ) , "r" ) )
__UpperCAmelCase = {int(A_ ): v for k, v in idalabel.items()}
__UpperCAmelCase = {v: k for k, v in idalabel.items()}
__UpperCAmelCase = SwinConfig(
embed_dim=A_ , depths=A_ , num_heads=A_ , window_size=A_ , out_features=["stage1", "stage2", "stage3", "stage4"] , )
__UpperCAmelCase = UperNetConfig(
backbone_config=A_ , auxiliary_in_channels=A_ , num_labels=A_ , idalabel=A_ , labelaid=A_ , )
return config
def __lowerCAmelCase ( A_ : Optional[int] ) -> Optional[Any]:
__UpperCAmelCase = []
# fmt: off
# stem
rename_keys.append(("backbone.patch_embed.projection.weight", "backbone.embeddings.patch_embeddings.projection.weight") )
rename_keys.append(("backbone.patch_embed.projection.bias", "backbone.embeddings.patch_embeddings.projection.bias") )
rename_keys.append(("backbone.patch_embed.norm.weight", "backbone.embeddings.norm.weight") )
rename_keys.append(("backbone.patch_embed.norm.bias", "backbone.embeddings.norm.bias") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.norm1.weight''', F'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') )
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.norm1.bias''', F'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') )
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table''', F'''backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') )
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index''', F'''backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') )
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight''', F'''backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') )
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias''', F'''backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') )
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.norm2.weight''', F'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') )
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.norm2.bias''', F'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') )
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight''', F'''backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') )
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias''', F'''backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') )
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight''', F'''backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight''') )
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias''', F'''backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias''') )
if i < 3:
rename_keys.append((F'''backbone.stages.{i}.downsample.reduction.weight''', F'''backbone.encoder.layers.{i}.downsample.reduction.weight''') )
rename_keys.append((F'''backbone.stages.{i}.downsample.norm.weight''', F'''backbone.encoder.layers.{i}.downsample.norm.weight''') )
rename_keys.append((F'''backbone.stages.{i}.downsample.norm.bias''', F'''backbone.encoder.layers.{i}.downsample.norm.bias''') )
rename_keys.append((F'''backbone.norm{i}.weight''', F'''backbone.hidden_states_norms.stage{i+1}.weight''') )
rename_keys.append((F'''backbone.norm{i}.bias''', F'''backbone.hidden_states_norms.stage{i+1}.bias''') )
# decode head
rename_keys.extend(
[
("decode_head.conv_seg.weight", "decode_head.classifier.weight"),
("decode_head.conv_seg.bias", "decode_head.classifier.bias"),
("auxiliary_head.conv_seg.weight", "auxiliary_head.classifier.weight"),
("auxiliary_head.conv_seg.bias", "auxiliary_head.classifier.bias"),
] )
# fmt: on
return rename_keys
def __lowerCAmelCase ( A_ : List[Any] , A_ : List[str] , A_ : Optional[Any] ) -> List[str]:
__UpperCAmelCase = dct.pop(A_ )
__UpperCAmelCase = val
def __lowerCAmelCase ( A_ : Tuple , A_ : Dict ) -> Tuple:
__UpperCAmelCase = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
__UpperCAmelCase = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
__UpperCAmelCase = state_dict.pop(F'''backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight''' )
__UpperCAmelCase = state_dict.pop(F'''backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
__UpperCAmelCase = in_proj_weight[:dim, :]
__UpperCAmelCase = in_proj_bias[: dim]
__UpperCAmelCase = in_proj_weight[
dim : dim * 2, :
]
__UpperCAmelCase = in_proj_bias[
dim : dim * 2
]
__UpperCAmelCase = in_proj_weight[
-dim :, :
]
__UpperCAmelCase = in_proj_bias[-dim :]
# fmt: on
def __lowerCAmelCase ( A_ : List[Any] ) -> int:
__UpperCAmelCase , __UpperCAmelCase = x.shape
__UpperCAmelCase = x.reshape(A_ , 4 , in_channel // 4 )
__UpperCAmelCase = x[:, [0, 2, 1, 3], :].transpose(1 , 2 ).reshape(A_ , A_ )
return x
def __lowerCAmelCase ( A_ : Tuple ) -> int:
__UpperCAmelCase , __UpperCAmelCase = x.shape
__UpperCAmelCase = x.reshape(A_ , in_channel // 4 , 4 )
__UpperCAmelCase = x[:, :, [0, 2, 1, 3]].transpose(1 , 2 ).reshape(A_ , A_ )
return x
def __lowerCAmelCase ( A_ : Union[str, Any] ) -> str:
__UpperCAmelCase = x.shape[0]
__UpperCAmelCase = x.reshape(4 , in_channel // 4 )
__UpperCAmelCase = x[[0, 2, 1, 3], :].transpose(0 , 1 ).reshape(A_ )
return x
def __lowerCAmelCase ( A_ : Optional[int] ) -> str:
__UpperCAmelCase = x.shape[0]
__UpperCAmelCase = x.reshape(in_channel // 4 , 4 )
__UpperCAmelCase = x[:, [0, 2, 1, 3]].transpose(0 , 1 ).reshape(A_ )
return x
def __lowerCAmelCase ( A_ : Any , A_ : str , A_ : Any ) -> Dict:
__UpperCAmelCase = {
"upernet-swin-tiny": "https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth",
"upernet-swin-small": "https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth",
"upernet-swin-base": "https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth",
"upernet-swin-large": "https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth",
}
__UpperCAmelCase = model_name_to_url[model_name]
__UpperCAmelCase = torch.hub.load_state_dict_from_url(A_ , map_location="cpu" , file_name=A_ )[
"state_dict"
]
for name, param in state_dict.items():
print(A_ , param.shape )
__UpperCAmelCase = get_upernet_config(A_ )
__UpperCAmelCase = UperNetForSemanticSegmentation(A_ )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
__UpperCAmelCase = state_dict.pop(A_ )
if "bn" in key:
__UpperCAmelCase = key.replace("bn" , "batch_norm" )
__UpperCAmelCase = val
# rename keys
__UpperCAmelCase = create_rename_keys(A_ )
for src, dest in rename_keys:
rename_key(A_ , A_ , A_ )
read_in_q_k_v(A_ , config.backbone_config )
# fix downsample parameters
for key, value in state_dict.items():
if "downsample" in key:
if "reduction" in key:
__UpperCAmelCase = reverse_correct_unfold_reduction_order(A_ )
if "norm" in key:
__UpperCAmelCase = reverse_correct_unfold_norm_order(A_ )
model.load_state_dict(A_ )
# verify on image
__UpperCAmelCase = "https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg"
__UpperCAmelCase = Image.open(requests.get(A_ , stream=A_ ).raw ).convert("RGB" )
__UpperCAmelCase = SegformerImageProcessor()
__UpperCAmelCase = processor(A_ , return_tensors="pt" ).pixel_values
with torch.no_grad():
__UpperCAmelCase = model(A_ )
__UpperCAmelCase = outputs.logits
print(logits.shape )
print("First values of logits:" , logits[0, 0, :3, :3] )
# assert values
if model_name == "upernet-swin-tiny":
__UpperCAmelCase = torch.tensor(
[[-7.59_58, -7.59_58, -7.43_02], [-7.59_58, -7.59_58, -7.43_02], [-7.47_97, -7.47_97, -7.30_68]] )
elif model_name == "upernet-swin-small":
__UpperCAmelCase = torch.tensor(
[[-7.19_21, -7.19_21, -6.95_32], [-7.19_21, -7.19_21, -6.95_32], [-7.09_08, -7.09_08, -6.85_34]] )
elif model_name == "upernet-swin-base":
__UpperCAmelCase = torch.tensor(
[[-6.58_51, -6.58_51, -6.43_30], [-6.58_51, -6.58_51, -6.43_30], [-6.47_63, -6.47_63, -6.32_54]] )
elif model_name == "upernet-swin-large":
__UpperCAmelCase = torch.tensor(
[[-7.52_97, -7.52_97, -7.38_02], [-7.52_97, -7.52_97, -7.38_02], [-7.40_44, -7.40_44, -7.25_86]] )
print("Logits:" , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , A_ , atol=1e-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(A_ )
print(F'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(A_ )
if push_to_hub:
print(F'''Pushing model and processor for {model_name} to hub''' )
model.push_to_hub(F'''openmmlab/{model_name}''' )
processor.push_to_hub(F'''openmmlab/{model_name}''' )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""upernet-swin-tiny""",
type=str,
choices=[F"upernet-swin-{size}" for size in ["""tiny""", """small""", """base""", """large"""]],
help="""Name of the Swin + UperNet model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
a_ = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 286
| 0
|
def A_ ( a , a ):
"""simple docstring"""
return 1 if input_a == input_a else 0
def A_ ( ):
"""simple docstring"""
assert xnor_gate(0 , 0 ) == 1
assert xnor_gate(0 , 1 ) == 0
assert xnor_gate(1 , 0 ) == 0
assert xnor_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(xnor_gate(0, 0))
print(xnor_gate(0, 1))
print(xnor_gate(1, 0))
print(xnor_gate(1, 1))
| 511
|
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class SCREAMING_SNAKE_CASE_ ( _UpperCamelCase ):
"""simple docstring"""
def lowerCamelCase__ ( self : Any , lowerCAmelCase : str ) -> List[Any]:
"""simple docstring"""
with open(lowerCAmelCase , encoding="""utf-8""" ) as input_file:
__UpperCamelCase : Union[str, Any] = re.compile(R"""(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)""" )
__UpperCamelCase : str = input_file.read()
__UpperCamelCase : Optional[int] = regexp.search(lowerCAmelCase )
return match
def lowerCamelCase__ ( self : Optional[Any] , lowerCAmelCase : str ) -> Union[str, Any]:
"""simple docstring"""
with open(lowerCAmelCase , encoding="""utf-8""" ) as input_file:
__UpperCamelCase : Dict = re.compile(R"""#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()""" , re.DOTALL )
__UpperCamelCase : str = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
__UpperCamelCase : List[Any] = regexp.finditer(lowerCAmelCase )
__UpperCamelCase : int = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def lowerCamelCase__ ( self : List[Any] ) -> List[str]:
"""simple docstring"""
__UpperCamelCase : Optional[int] = Path("""./datasets""" )
__UpperCamelCase : Optional[int] = list(dataset_paths.absolute().glob("""**/*.py""" ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(lowerCAmelCase ) ):
raise AssertionError(F'''open(...) must use utf-8 encoding in {dataset}''' )
def lowerCamelCase__ ( self : Optional[Any] ) -> Any:
"""simple docstring"""
__UpperCamelCase : Optional[int] = Path("""./datasets""" )
__UpperCamelCase : Optional[int] = list(dataset_paths.absolute().glob("""**/*.py""" ) )
for dataset in dataset_files:
if self._no_print_statements(str(lowerCAmelCase ) ):
raise AssertionError(F'''print statement found in {dataset}. Use datasets.logger/logging instead.''' )
| 279
| 0
|
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
lowerCAmelCase__ = {
"""vocab_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
lowerCAmelCase__ = {
"""vocab_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
lowerCAmelCase__ = {
"""vocab_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"""
),
},
}
lowerCAmelCase__ = {
"""facebook/dpr-ctx_encoder-single-nq-base""": 5_1_2,
"""facebook/dpr-ctx_encoder-multiset-base""": 5_1_2,
}
lowerCAmelCase__ = {
"""facebook/dpr-question_encoder-single-nq-base""": 5_1_2,
"""facebook/dpr-question_encoder-multiset-base""": 5_1_2,
}
lowerCAmelCase__ = {
"""facebook/dpr-reader-single-nq-base""": 5_1_2,
"""facebook/dpr-reader-multiset-base""": 5_1_2,
}
lowerCAmelCase__ = {
"""facebook/dpr-ctx_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-ctx_encoder-multiset-base""": {"""do_lower_case""": True},
}
lowerCAmelCase__ = {
"""facebook/dpr-question_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-question_encoder-multiset-base""": {"""do_lower_case""": True},
}
lowerCAmelCase__ = {
"""facebook/dpr-reader-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-reader-multiset-base""": {"""do_lower_case""": True},
}
class a__ ( snake_case ):
"""simple docstring"""
__lowerCamelCase = VOCAB_FILES_NAMES
__lowerCamelCase = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
__lowerCamelCase = DPRContextEncoderTokenizer
class a__ ( snake_case ):
"""simple docstring"""
__lowerCamelCase = VOCAB_FILES_NAMES
__lowerCamelCase = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
__lowerCamelCase = DPRQuestionEncoderTokenizer
lowerCAmelCase__ = collections.namedtuple(
"""DPRSpanPrediction""", ["""span_score""", """relevance_score""", """doc_id""", """start_index""", """end_index""", """text"""]
)
lowerCAmelCase__ = collections.namedtuple("""DPRReaderOutput""", ["""start_logits""", """end_logits""", """relevance_logits"""])
lowerCAmelCase__ = R"""
Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.
It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),
using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`
with the format:
[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>
Args:
questions (`str` or `List[str]`):
The questions to be encoded. You can specify one question for many passages. In this case, the question
will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in
`titles` or `texts`.
titles (`str` or `List[str]`):
The passages titles to be encoded. This can be a string or a list of strings if there are several passages.
texts (`str` or `List[str]`):
The passages texts to be encoded. This can be a string or a list of strings if there are several passages.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
if provided).
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch
of pairs) is provided.
- `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the first
sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the
second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `'tf'`: Return TensorFlow `tf.constant` objects.
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return Numpy `np.ndarray` objects.
return_attention_mask (`bool`, *optional*):
Whether or not to return the attention mask. If not set, will return the attention mask according to the
specific tokenizer's default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
Return:
`Dict[str, List[List[int]]]`: A dictionary with the following keys:
- `input_ids`: List of token ids to be fed to a model.
- `attention_mask`: List of indices specifying which tokens should be attended to by the model.
"""
@add_start_docstrings(snake_case )
class a__ :
"""simple docstring"""
def __call__( self , lowercase , lowercase = None , lowercase = None , lowercase = False , lowercase = False , lowercase = None , lowercase = None , lowercase = None , **lowercase , ) -> BatchEncoding:
'''simple docstring'''
if titles is None and texts is None:
return super().__call__(
lowercase , padding=lowercase , truncation=lowercase , max_length=lowercase , return_tensors=lowercase , return_attention_mask=lowercase , **lowercase , )
elif titles is None or texts is None:
A__ = titles if texts is None else texts
return super().__call__(
lowercase , lowercase , padding=lowercase , truncation=lowercase , max_length=lowercase , return_tensors=lowercase , return_attention_mask=lowercase , **lowercase , )
A__ = titles if not isinstance(lowercase , lowercase ) else [titles]
A__ = texts if not isinstance(lowercase , lowercase ) else [texts]
A__ = len(lowercase )
A__ = questions if not isinstance(lowercase , lowercase ) else [questions] * n_passages
assert len(lowercase ) == len(
lowercase ), F'There should be as many titles than texts but got {len(lowercase )} titles and {len(lowercase )} texts.'
A__ = super().__call__(lowercase , lowercase , padding=lowercase , truncation=lowercase )["input_ids"]
A__ = super().__call__(lowercase , add_special_tokens=lowercase , padding=lowercase , truncation=lowercase )["input_ids"]
A__ = {
"input_ids": [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(lowercase , lowercase )
]
}
if return_attention_mask is not False:
A__ = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
A__ = attention_mask
return self.pad(lowercase , padding=lowercase , max_length=lowercase , return_tensors=lowercase )
def UpperCamelCase ( self , lowercase , lowercase , lowercase = 16 , lowercase = 64 , lowercase = 4 , ) -> List[DPRSpanPrediction]:
'''simple docstring'''
A__ = reader_input["input_ids"]
A__ , A__ , A__ = reader_output[:3]
A__ = len(lowercase )
A__ = sorted(range(lowercase ) , reverse=lowercase , key=relevance_logits.__getitem__ )
A__ = []
for doc_id in sorted_docs:
A__ = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
A__ = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
A__ = sequence_ids.index(self.pad_token_id )
else:
A__ = len(lowercase )
A__ = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=lowercase , top_spans=lowercase , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=lowercase , start_index=lowercase , end_index=lowercase , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(lowercase ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def UpperCamelCase ( self , lowercase , lowercase , lowercase , lowercase , ) -> List[DPRSpanPrediction]:
'''simple docstring'''
A__ = []
for start_index, start_score in enumerate(lowercase ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
A__ = sorted(lowercase , key=lambda lowercase : x[1] , reverse=lowercase )
A__ = []
for (start_index, end_index), score in scores:
assert start_index <= end_index, F'Wrong span indices: [{start_index}:{end_index}]'
A__ = end_index - start_index + 1
assert length <= max_answer_length, F'Span is too long: {length} > {max_answer_length}'
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(lowercase ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(snake_case )
class a__ ( snake_case , snake_case ):
"""simple docstring"""
__lowerCamelCase = VOCAB_FILES_NAMES
__lowerCamelCase = READER_PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase = READER_PRETRAINED_INIT_CONFIGURATION
__lowerCamelCase = ['input_ids', 'attention_mask']
__lowerCamelCase = DPRReaderTokenizer
| 626
|
import inspect
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
lowerCAmelCase__ = """src/transformers"""
# This is to make sure the transformers module imported is the one in the repo.
lowerCAmelCase__ = direct_transformers_import(PATH_TO_TRANSFORMERS)
lowerCAmelCase__ = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
lowerCAmelCase__ = re.compile(R"""\[(.+?)\]\((https://huggingface\.co/.+?)\)""")
lowerCAmelCase__ = {
"""DecisionTransformerConfig""",
"""EncoderDecoderConfig""",
"""MusicgenConfig""",
"""RagConfig""",
"""SpeechEncoderDecoderConfig""",
"""TimmBackboneConfig""",
"""VisionEncoderDecoderConfig""",
"""VisionTextDualEncoderConfig""",
"""LlamaConfig""",
}
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Union[str, Any] ) -> Dict:
'''simple docstring'''
A__ = None
# source code of `config_class`
A__ = inspect.getsource(SCREAMING_SNAKE_CASE_ )
A__ = _re_checkpoint.findall(SCREAMING_SNAKE_CASE_ )
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
for ckpt_name, ckpt_link in checkpoints:
# allow the link to end with `/`
if ckpt_link.endswith("/" ):
A__ = ckpt_link[:-1]
# verify the checkpoint name corresponds to the checkpoint link
A__ = F'https://huggingface.co/{ckpt_name}'
if ckpt_link == ckpt_link_from_name:
A__ = ckpt_name
break
return checkpoint
def lowerCAmelCase__ ( ) -> List[str]:
'''simple docstring'''
A__ = []
for config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in config_class.__module__:
continue
A__ = get_checkpoint_from_config_class(SCREAMING_SNAKE_CASE_ )
A__ = config_class.__name__
if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(SCREAMING_SNAKE_CASE_ )
if len(SCREAMING_SNAKE_CASE_ ) > 0:
A__ = "\n".join(sorted(SCREAMING_SNAKE_CASE_ ) )
raise ValueError(F'The following configurations don\'t contain any valid checkpoint:\n{message}' )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 626
| 1
|
import argparse
import logging
import pickle
from collections import Counter
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
__A = logging.getLogger(__name__)
if __name__ == "__main__":
__A = argparse.ArgumentParser(
description='''Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)'''
)
parser.add_argument(
'''--data_file''', type=str, default='''data/dump.bert-base-uncased.pickle''', help='''The binarized dataset.'''
)
parser.add_argument(
'''--token_counts_dump''', type=str, default='''data/token_counts.bert-base-uncased.pickle''', help='''The dump file.'''
)
parser.add_argument('''--vocab_size''', default=3_0522, type=int)
__A = parser.parse_args()
logger.info(f'Loading data from {args.data_file}')
with open(args.data_file, '''rb''') as fp:
__A = pickle.load(fp)
logger.info('''Counting occurrences for MLM.''')
__A = Counter()
for tk_ids in data:
counter.update(tk_ids)
__A = [0] * args.vocab_size
for k, v in counter.items():
__A = v
logger.info(f'Dump to {args.token_counts_dump}')
with open(args.token_counts_dump, '''wb''') as handle:
pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
| 593
|
import argparse
import requests
import torch
from PIL import Image
from torchvision.transforms import Compose, Normalize, Resize, ToTensor
from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor
def __a ( lowerCAmelCase_ : Dict ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_= SwinaSRConfig()
if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
UpperCAmelCase_= 4
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
UpperCAmelCase_= 4
UpperCAmelCase_= 48
UpperCAmelCase_= """pixelshuffle_aux"""
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
UpperCAmelCase_= [6, 6, 6, 6]
UpperCAmelCase_= 60
UpperCAmelCase_= [6, 6, 6, 6]
UpperCAmelCase_= """pixelshuffledirect"""
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
UpperCAmelCase_= 4
UpperCAmelCase_= """nearest+conv"""
elif "Swin2SR_Jpeg_dynamic" in checkpoint_url:
UpperCAmelCase_= 1
UpperCAmelCase_= 1
UpperCAmelCase_= 1_26
UpperCAmelCase_= 7
UpperCAmelCase_= 255.0
UpperCAmelCase_= """"""
return config
def __a ( lowerCAmelCase_ : Optional[int] ,lowerCAmelCase_ : Optional[Any] ) -> Any:
'''simple docstring'''
if "patch_embed.proj" in name and "layers" not in name:
UpperCAmelCase_= name.replace("""patch_embed.proj""" ,"""embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
UpperCAmelCase_= name.replace("""patch_embed.norm""" ,"""embeddings.patch_embeddings.layernorm""" )
if "layers" in name:
UpperCAmelCase_= name.replace("""layers""" ,"""encoder.stages""" )
if "residual_group.blocks" in name:
UpperCAmelCase_= name.replace("""residual_group.blocks""" ,"""layers""" )
if "attn.proj" in name:
UpperCAmelCase_= name.replace("""attn.proj""" ,"""attention.output.dense""" )
if "attn" in name:
UpperCAmelCase_= name.replace("""attn""" ,"""attention.self""" )
if "norm1" in name:
UpperCAmelCase_= name.replace("""norm1""" ,"""layernorm_before""" )
if "norm2" in name:
UpperCAmelCase_= name.replace("""norm2""" ,"""layernorm_after""" )
if "mlp.fc1" in name:
UpperCAmelCase_= name.replace("""mlp.fc1""" ,"""intermediate.dense""" )
if "mlp.fc2" in name:
UpperCAmelCase_= name.replace("""mlp.fc2""" ,"""output.dense""" )
if "q_bias" in name:
UpperCAmelCase_= name.replace("""q_bias""" ,"""query.bias""" )
if "k_bias" in name:
UpperCAmelCase_= name.replace("""k_bias""" ,"""key.bias""" )
if "v_bias" in name:
UpperCAmelCase_= name.replace("""v_bias""" ,"""value.bias""" )
if "cpb_mlp" in name:
UpperCAmelCase_= name.replace("""cpb_mlp""" ,"""continuous_position_bias_mlp""" )
if "patch_embed.proj" in name:
UpperCAmelCase_= name.replace("""patch_embed.proj""" ,"""patch_embed.projection""" )
if name == "norm.weight":
UpperCAmelCase_= """layernorm.weight"""
if name == "norm.bias":
UpperCAmelCase_= """layernorm.bias"""
if "conv_first" in name:
UpperCAmelCase_= name.replace("""conv_first""" ,"""first_convolution""" )
if (
"upsample" in name
or "conv_before_upsample" in name
or "conv_bicubic" in name
or "conv_up" in name
or "conv_hr" in name
or "conv_last" in name
or "aux" in name
):
# heads
if "conv_last" in name:
UpperCAmelCase_= name.replace("""conv_last""" ,"""final_convolution""" )
if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]:
if "conv_before_upsample.0" in name:
UpperCAmelCase_= name.replace("""conv_before_upsample.0""" ,"""conv_before_upsample""" )
if "upsample.0" in name:
UpperCAmelCase_= name.replace("""upsample.0""" ,"""upsample.convolution_0""" )
if "upsample.2" in name:
UpperCAmelCase_= name.replace("""upsample.2""" ,"""upsample.convolution_1""" )
UpperCAmelCase_= """upsample.""" + name
elif config.upsampler == "pixelshuffledirect":
UpperCAmelCase_= name.replace("""upsample.0.weight""" ,"""upsample.conv.weight""" )
UpperCAmelCase_= name.replace("""upsample.0.bias""" ,"""upsample.conv.bias""" )
else:
pass
else:
UpperCAmelCase_= """swin2sr.""" + name
return name
def __a ( lowerCAmelCase_ : Union[str, Any] ,lowerCAmelCase_ : Dict ) -> List[Any]:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
UpperCAmelCase_= orig_state_dict.pop(lowerCAmelCase_ )
if "qkv" in key:
UpperCAmelCase_= key.split(""".""" )
UpperCAmelCase_= int(key_split[1] )
UpperCAmelCase_= int(key_split[4] )
UpperCAmelCase_= config.embed_dim
if "weight" in key:
UpperCAmelCase_= val[:dim, :]
UpperCAmelCase_= val[dim : dim * 2, :]
UpperCAmelCase_= val[-dim:, :]
else:
UpperCAmelCase_= val[:dim]
UpperCAmelCase_= val[dim : dim * 2]
UpperCAmelCase_= val[-dim:]
pass
else:
UpperCAmelCase_= val
return orig_state_dict
def __a ( lowerCAmelCase_ : Dict ,lowerCAmelCase_ : List[Any] ,lowerCAmelCase_ : List[Any] ) -> int:
'''simple docstring'''
UpperCAmelCase_= get_config(lowerCAmelCase_ )
UpperCAmelCase_= SwinaSRForImageSuperResolution(lowerCAmelCase_ )
model.eval()
UpperCAmelCase_= torch.hub.load_state_dict_from_url(lowerCAmelCase_ ,map_location="""cpu""" )
UpperCAmelCase_= convert_state_dict(lowerCAmelCase_ ,lowerCAmelCase_ )
UpperCAmelCase_, UpperCAmelCase_= model.load_state_dict(lowerCAmelCase_ ,strict=lowerCAmelCase_ )
if len(lowerCAmelCase_ ) > 0:
raise ValueError("""Missing keys when converting: {}""".format(lowerCAmelCase_ ) )
for key in unexpected_keys:
if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key):
raise ValueError(F"""Unexpected key {key} in state_dict""" )
# verify values
UpperCAmelCase_= """https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true"""
UpperCAmelCase_= Image.open(requests.get(lowerCAmelCase_ ,stream=lowerCAmelCase_ ).raw ).convert("""RGB""" )
UpperCAmelCase_= SwinaSRImageProcessor()
# pixel_values = processor(image, return_tensors="pt").pixel_values
UpperCAmelCase_= 1_26 if """Jpeg""" in checkpoint_url else 2_56
UpperCAmelCase_= Compose(
[
Resize((image_size, image_size) ),
ToTensor(),
Normalize(mean=[0.485, 0.456, 0.406] ,std=[0.229, 0.224, 0.225] ),
] )
UpperCAmelCase_= transforms(lowerCAmelCase_ ).unsqueeze(0 )
if config.num_channels == 1:
UpperCAmelCase_= pixel_values[:, 0, :, :].unsqueeze(1 )
UpperCAmelCase_= model(lowerCAmelCase_ )
# assert values
if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url:
UpperCAmelCase_= torch.Size([1, 3, 5_12, 5_12] )
UpperCAmelCase_= torch.tensor(
[[-0.7_087, -0.7_138, -0.6_721], [-0.8_340, -0.8_095, -0.7_298], [-0.9_149, -0.8_414, -0.7_940]] )
elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
UpperCAmelCase_= torch.Size([1, 3, 10_24, 10_24] )
UpperCAmelCase_= torch.tensor(
[[-0.7_775, -0.8_105, -0.8_933], [-0.7_764, -0.8_356, -0.9_225], [-0.7_976, -0.8_686, -0.9_579]] )
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
# TODO values didn't match exactly here
UpperCAmelCase_= torch.Size([1, 3, 10_24, 10_24] )
UpperCAmelCase_= torch.tensor(
[[-0.8_035, -0.7_504, -0.7_491], [-0.8_538, -0.8_124, -0.7_782], [-0.8_804, -0.8_651, -0.8_493]] )
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
UpperCAmelCase_= torch.Size([1, 3, 5_12, 5_12] )
UpperCAmelCase_= torch.tensor(
[[-0.7_669, -0.8_662, -0.8_767], [-0.8_810, -0.9_962, -0.9_820], [-0.9_340, -1.0_322, -1.1_149]] )
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
UpperCAmelCase_= torch.Size([1, 3, 10_24, 10_24] )
UpperCAmelCase_= torch.tensor(
[[-0.5_238, -0.5_557, -0.6_321], [-0.6_016, -0.5_903, -0.6_391], [-0.6_244, -0.6_334, -0.6_889]] )
assert (
outputs.reconstruction.shape == expected_shape
), F"""Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}"""
assert torch.allclose(outputs.reconstruction[0, 0, :3, :3] ,lowerCAmelCase_ ,atol=1E-3 )
print("""Looks ok!""" )
UpperCAmelCase_= {
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth""": (
"""swin2SR-classical-sr-x2-64"""
),
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth""": (
"""swin2SR-classical-sr-x4-64"""
),
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth""": (
"""swin2SR-compressed-sr-x4-48"""
),
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth""": (
"""swin2SR-lightweight-x2-64"""
),
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth""": (
"""swin2SR-realworld-sr-x4-64-bsrgan-psnr"""
),
}
UpperCAmelCase_= url_to_name[checkpoint_url]
if pytorch_dump_folder_path is not None:
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowerCAmelCase_ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
processor.save_pretrained(lowerCAmelCase_ )
if push_to_hub:
model.push_to_hub(F"""caidas/{model_name}""" )
processor.push_to_hub(F"""caidas/{model_name}""" )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth''',
type=str,
help='''URL of the original Swin2SR checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Whether to push the converted model to the hub.''')
__A = parser.parse_args()
convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 593
| 1
|
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase_ : List[str] = logging.get_logger()
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = True ):
"""simple docstring"""
print(f"""Converting {name}...""" )
with torch.no_grad():
if hidden_sizes == 128:
if name[-1] == "S":
A_ : Optional[Any] = timm.create_model('levit_128s' , pretrained=_UpperCAmelCase )
else:
A_ : List[str] = timm.create_model('levit_128' , pretrained=_UpperCAmelCase )
if hidden_sizes == 192:
A_ : Optional[int] = timm.create_model('levit_192' , pretrained=_UpperCAmelCase )
if hidden_sizes == 256:
A_ : Dict = timm.create_model('levit_256' , pretrained=_UpperCAmelCase )
if hidden_sizes == 384:
A_ : Optional[int] = timm.create_model('levit_384' , pretrained=_UpperCAmelCase )
from_model.eval()
A_ : Optional[int] = LevitForImageClassificationWithTeacher(_UpperCAmelCase ).eval()
A_ : Optional[int] = OrderedDict()
A_ : Dict = from_model.state_dict()
A_ : Any = list(from_model.state_dict().keys() )
A_ : Any = list(our_model.state_dict().keys() )
print(len(_UpperCAmelCase ) , len(_UpperCAmelCase ) )
for i in range(len(_UpperCAmelCase ) ):
A_ : Tuple = weights[og_keys[i]]
our_model.load_state_dict(_UpperCAmelCase )
A_ : List[Any] = torch.randn((2, 3, 224, 224) )
A_ : List[Any] = from_model(_UpperCAmelCase )
A_ : Union[str, Any] = our_model(_UpperCAmelCase ).logits
assert torch.allclose(_UpperCAmelCase , _UpperCAmelCase ), "The model logits don't match the original one."
A_ : str = name
print(_UpperCAmelCase )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
A_ : Union[str, Any] = LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(f"""Pushed {checkpoint_name}""" )
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = True ):
"""simple docstring"""
A_ : List[str] = 'imagenet-1k-id2label.json'
A_ : Union[str, Any] = 1000
A_ : Dict = (1, num_labels)
A_ : str = 'huggingface/label-files'
A_ : Dict = num_labels
A_ : Optional[int] = json.load(open(hf_hub_download(_UpperCAmelCase , _UpperCAmelCase , repo_type='dataset' ) , 'r' ) )
A_ : Union[str, Any] = {int(_UpperCAmelCase ): v for k, v in idalabel.items()}
A_ : Optional[Any] = idalabel
A_ : Optional[Any] = {v: k for k, v in idalabel.items()}
A_ : Any = partial(_UpperCAmelCase , num_labels=_UpperCAmelCase , idalabel=_UpperCAmelCase , labelaid=_UpperCAmelCase )
A_ : Union[str, Any] = {
'levit-128S': 128,
'levit-128': 128,
'levit-192': 192,
'levit-256': 256,
'levit-384': 384,
}
A_ : List[Any] = {
'levit-128S': ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'levit-128': ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'levit-192': ImageNetPreTrainedConfig(
hidden_sizes=[192, 288, 384] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'levit-256': ImageNetPreTrainedConfig(
hidden_sizes=[256, 384, 512] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'levit-384': ImageNetPreTrainedConfig(
hidden_sizes=[384, 512, 768] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] , _UpperCAmelCase , names_to_config[model_name] , _UpperCAmelCase , _UpperCAmelCase )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
return config, expected_shape
if __name__ == "__main__":
lowerCamelCase_ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default=None,
type=str,
help='The name of the model you wish to convert, it must be one of the supported Levit* architecture,',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='levit-dump-folder/',
type=Path,
required=False,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub')
parser.add_argument(
'--no-push_to_hub',
dest='push_to_hub',
action='store_false',
help='Do not push model and image processor to the hub',
)
lowerCamelCase_ : Tuple = parser.parse_args()
lowerCamelCase_ : Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 302
|
"""simple docstring"""
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class _UpperCAmelCase :
'''simple docstring'''
lowercase_ : int
lowercase_ : int
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self , snake_case_ ):
"""simple docstring"""
A_ : list[list[Edge]] = [[] for _ in range(snake_case_ )]
A_ : Optional[int] = size
def __getitem__( self , snake_case_ ):
"""simple docstring"""
return iter(self._graph[vertex] )
@property
def lowerCamelCase_ ( self ):
"""simple docstring"""
return self._size
def lowerCamelCase_ ( self , snake_case_ , snake_case_ , snake_case_ ):
"""simple docstring"""
if weight not in (0, 1):
raise ValueError('Edge weight must be either 0 or 1.' )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError('Vertex indexes must be in [0; size).' )
self._graph[from_vertex].append(Edge(snake_case_ , snake_case_ ) )
def lowerCamelCase_ ( self , snake_case_ , snake_case_ ):
"""simple docstring"""
A_ : List[str] = deque([start_vertex] )
A_ : list[int | None] = [None] * self.size
A_ : Optional[Any] = 0
while queue:
A_ : Union[str, Any] = queue.popleft()
A_ : Tuple = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
A_ : int = current_distance + edge.weight
A_ : Union[str, Any] = distances[edge.destination_vertex]
if (
isinstance(snake_case_ , snake_case_ )
and new_distance >= dest_vertex_distance
):
continue
A_ : Union[str, Any] = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError('No path from start_vertex to finish_vertex.' )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 302
| 1
|
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format='''%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s''',
datefmt='''%Y-%m-%d %H:%M:%S''',
level=os.environ.get('''LOGLEVEL''', '''INFO''').upper(),
stream=sys.stdout,
)
__a : List[Any] = logging.getLogger(__name__)
__a : int = {'''facebook/bart-base''': BartForConditionalGeneration}
__a : List[str] = {'''facebook/bart-base''': BartTokenizer}
def snake_case_ ( ) -> List[Any]:
lowercase__ : Any = argparse.ArgumentParser(description="Export Bart model + Beam Search to ONNX graph." )
parser.add_argument(
"--validation_file" ,type=SCREAMING_SNAKE_CASE_ ,default=SCREAMING_SNAKE_CASE_ ,help="A csv or a json file containing the validation data." )
parser.add_argument(
"--max_length" ,type=SCREAMING_SNAKE_CASE_ ,default=5 ,help="The maximum total input sequence length after tokenization." ,)
parser.add_argument(
"--num_beams" ,type=SCREAMING_SNAKE_CASE_ ,default=SCREAMING_SNAKE_CASE_ ,help=(
"Number of beams to use for evaluation. This argument will be "
"passed to ``model.generate``, which is used during ``evaluate`` and ``predict``."
) ,)
parser.add_argument(
"--model_name_or_path" ,type=SCREAMING_SNAKE_CASE_ ,help="Path to pretrained model or model identifier from huggingface.co/models." ,required=SCREAMING_SNAKE_CASE_ ,)
parser.add_argument(
"--config_name" ,type=SCREAMING_SNAKE_CASE_ ,default=SCREAMING_SNAKE_CASE_ ,help="Pretrained config name or path if not the same as model_name" ,)
parser.add_argument(
"--device" ,type=SCREAMING_SNAKE_CASE_ ,default="cpu" ,help="Device where the model will be run" ,)
parser.add_argument("--output_file_path" ,type=SCREAMING_SNAKE_CASE_ ,default=SCREAMING_SNAKE_CASE_ ,help="Where to store the final ONNX file." )
lowercase__ : str = parser.parse_args()
return args
def snake_case_ ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_="cpu" ) -> str:
lowercase__ : int = model_dict[model_name].from_pretrained(SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ )
lowercase__ : List[str] = tokenizer_dict[model_name].from_pretrained(SCREAMING_SNAKE_CASE_ )
if model_name in ["facebook/bart-base"]:
lowercase__ : Any = 0
lowercase__ : List[str] = None
lowercase__ : int = 0
return huggingface_model, tokenizer
def snake_case_ ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) -> Any:
model.eval()
lowercase__ : List[Any] = None
lowercase__ : Optional[Any] = torch.jit.script(BARTBeamSearchGenerator(SCREAMING_SNAKE_CASE_ ) )
with torch.no_grad():
lowercase__ : str = "My friends are cool but they eat too many carbs."
lowercase__ : Optional[Any] = tokenizer([ARTICLE_TO_SUMMARIZE] ,max_length=10_24 ,return_tensors="pt" ).to(model.device )
lowercase__ : Optional[int] = model.generate(
inputs["input_ids"] ,attention_mask=inputs["attention_mask"] ,num_beams=SCREAMING_SNAKE_CASE_ ,max_length=SCREAMING_SNAKE_CASE_ ,early_stopping=SCREAMING_SNAKE_CASE_ ,decoder_start_token_id=model.config.decoder_start_token_id ,)
torch.onnx.export(
SCREAMING_SNAKE_CASE_ ,(
inputs["input_ids"],
inputs["attention_mask"],
num_beams,
max_length,
model.config.decoder_start_token_id,
) ,SCREAMING_SNAKE_CASE_ ,opset_version=14 ,input_names=["input_ids", "attention_mask", "num_beams", "max_length", "decoder_start_token_id"] ,output_names=["output_ids"] ,dynamic_axes={
"input_ids": {0: "batch", 1: "seq"},
"output_ids": {0: "batch", 1: "seq_out"},
} ,example_outputs=SCREAMING_SNAKE_CASE_ ,)
logger.info("Model exported to {}".format(SCREAMING_SNAKE_CASE_ ) )
lowercase__ : Tuple = remove_dup_initializers(os.path.abspath(SCREAMING_SNAKE_CASE_ ) )
logger.info("Deduplicated and optimized model written to {}".format(SCREAMING_SNAKE_CASE_ ) )
lowercase__ : int = onnxruntime.InferenceSession(SCREAMING_SNAKE_CASE_ )
lowercase__ : Tuple = ort_sess.run(
SCREAMING_SNAKE_CASE_ ,{
"input_ids": inputs["input_ids"].cpu().numpy(),
"attention_mask": inputs["attention_mask"].cpu().numpy(),
"num_beams": np.array(SCREAMING_SNAKE_CASE_ ),
"max_length": np.array(SCREAMING_SNAKE_CASE_ ),
"decoder_start_token_id": np.array(model.config.decoder_start_token_id ),
} ,)
np.testing.assert_allclose(summary_ids.cpu().numpy() ,ort_out[0] ,rtol=1E-3 ,atol=1E-3 )
logger.info("Model outputs from torch and ONNX Runtime are similar." )
logger.info("Success." )
def snake_case_ ( ) -> List[Any]:
lowercase__ : Optional[Any] = parse_args()
lowercase__ : List[Any] = 5
lowercase__ : List[Any] = 4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" ,datefmt="%m/%d/%Y %H:%M:%S" ,level=logging.INFO ,)
logger.setLevel(logging.INFO )
transformers.utils.logging.set_verbosity_error()
lowercase__ : Optional[int] = torch.device(args.device )
lowercase__ , lowercase__ : Any = load_model_tokenizer(args.model_name_or_path ,SCREAMING_SNAKE_CASE_ )
if model.config.decoder_start_token_id is None:
raise ValueError("Make sure that `config.decoder_start_token_id` is correctly defined" )
model.to(SCREAMING_SNAKE_CASE_ )
if args.max_length:
lowercase__ : Tuple = args.max_length
if args.num_beams:
lowercase__ : str = args.num_beams
if args.output_file_path:
lowercase__ : str = args.output_file_path
else:
lowercase__ : Tuple = "BART.onnx"
logger.info("Exporting model to ONNX" )
export_and_validate_model(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
main()
| 397
|
import itertools
import random
import unittest
import numpy as np
from transformers import BatchFeature, SpeechTaFeatureExtractor
from transformers.testing_utils import require_torch
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
__a : List[str] = random.Random()
def snake_case_ ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_=1.0 ,SCREAMING_SNAKE_CASE_=None ,SCREAMING_SNAKE_CASE_=None ) -> Optional[int]:
if rng is None:
lowercase__ : Optional[Any] = global_rng
lowercase__ : Union[str, Any] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
class UpperCAmelCase( unittest.TestCase ):
"""simple docstring"""
def __init__( self , lowerCamelCase , lowerCamelCase=7 , lowerCamelCase=400 , lowerCamelCase=2000 , lowerCamelCase=1 , lowerCamelCase=0.0 , lowerCamelCase=16000 , lowerCamelCase=True , lowerCamelCase=80 , lowerCamelCase=16 , lowerCamelCase=64 , lowerCamelCase="hann_window" , lowerCamelCase=80 , lowerCamelCase=7600 , lowerCamelCase=1E-10 , lowerCamelCase=True , ) -> int:
"""simple docstring"""
lowercase__ : Optional[int] = parent
lowercase__ : Optional[Any] = batch_size
lowercase__ : Dict = min_seq_length
lowercase__ : Optional[int] = max_seq_length
lowercase__ : str = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
lowercase__ : List[Any] = feature_size
lowercase__ : Union[str, Any] = padding_value
lowercase__ : Dict = sampling_rate
lowercase__ : int = do_normalize
lowercase__ : Union[str, Any] = num_mel_bins
lowercase__ : Optional[Any] = hop_length
lowercase__ : Tuple = win_length
lowercase__ : Any = win_function
lowercase__ : Optional[Any] = fmin
lowercase__ : str = fmax
lowercase__ : Union[str, Any] = mel_floor
lowercase__ : str = return_attention_mask
def __a ( self ) -> Any:
"""simple docstring"""
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"do_normalize": self.do_normalize,
"num_mel_bins": self.num_mel_bins,
"hop_length": self.hop_length,
"win_length": self.win_length,
"win_function": self.win_function,
"fmin": self.fmin,
"fmax": self.fmax,
"mel_floor": self.mel_floor,
"return_attention_mask": self.return_attention_mask,
}
def __a ( self , lowerCamelCase=False , lowerCamelCase=False ) -> List[str]:
"""simple docstring"""
def _flatten(lowerCamelCase ):
return list(itertools.chain(*lowerCamelCase ) )
if equal_length:
lowercase__ : Optional[int] = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
lowercase__ : List[str] = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
lowercase__ : Dict = [np.asarray(lowerCamelCase ) for x in speech_inputs]
return speech_inputs
def __a ( self , lowerCamelCase=False , lowerCamelCase=False ) -> Optional[int]:
"""simple docstring"""
if equal_length:
lowercase__ : Union[str, Any] = [floats_list((self.max_seq_length, self.num_mel_bins) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
lowercase__ : Tuple = [
floats_list((x, self.num_mel_bins) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
lowercase__ : List[str] = [np.asarray(lowerCamelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
class UpperCAmelCase( snake_case_ , unittest.TestCase ):
"""simple docstring"""
a : List[Any] = SpeechTaFeatureExtractor
def __a ( self ) -> Tuple:
"""simple docstring"""
lowercase__ : Union[str, Any] = SpeechTaFeatureExtractionTester(self )
def __a ( self , lowerCamelCase ) -> List[Any]:
"""simple docstring"""
self.assertTrue(np.all(np.mean(lowerCamelCase , axis=0 ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(lowerCamelCase , axis=0 ) - 1 ) < 1E-3 ) )
def __a ( self ) -> List[str]:
"""simple docstring"""
lowercase__ : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
lowercase__ : str = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
lowercase__ : str = [np.asarray(lowerCamelCase ) for speech_input in speech_inputs]
# Test not batched input
lowercase__ : int = feat_extract(speech_inputs[0] , return_tensors="np" ).input_values
lowercase__ : Union[str, Any] = feat_extract(np_speech_inputs[0] , return_tensors="np" ).input_values
self.assertTrue(np.allclose(lowerCamelCase , lowerCamelCase , atol=1E-3 ) )
# Test batched
lowercase__ : Optional[int] = feat_extract(lowerCamelCase , return_tensors="np" ).input_values
lowercase__ : Union[str, Any] = feat_extract(lowerCamelCase , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCamelCase , lowerCamelCase ):
self.assertTrue(np.allclose(lowerCamelCase , lowerCamelCase , atol=1E-3 ) )
def __a ( self ) -> Any:
"""simple docstring"""
lowercase__ : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowercase__ : List[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
lowercase__ : Any = ["longest", "max_length", "do_not_pad"]
lowercase__ : List[Any] = [None, 1600, None]
for max_length, padding in zip(lowerCamelCase , lowerCamelCase ):
lowercase__ : Optional[int] = feat_extract(lowerCamelCase , padding=lowerCamelCase , max_length=lowerCamelCase , return_tensors="np" )
lowercase__ : List[str] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self.assertTrue(input_values[0][800:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self.assertTrue(input_values[0][1000:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def __a ( self ) -> Any:
"""simple docstring"""
lowercase__ : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowercase__ : Dict = range(800 , 1400 , 200 )
lowercase__ : List[str] = [floats_list((1, x) )[0] for x in lengths]
lowercase__ : Tuple = ["longest", "max_length", "do_not_pad"]
lowercase__ : str = [None, 1600, None]
for max_length, padding in zip(lowerCamelCase , lowerCamelCase ):
lowercase__ : List[str] = feat_extract(lowerCamelCase , max_length=lowerCamelCase , padding=lowerCamelCase )
lowercase__ : Any = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def __a ( self ) -> Optional[Any]:
"""simple docstring"""
lowercase__ : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowercase__ : List[str] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
lowercase__ : Tuple = feat_extract(
lowerCamelCase , truncation=lowerCamelCase , max_length=1000 , padding="max_length" , return_tensors="np" )
lowercase__ : Optional[Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def __a ( self ) -> Any:
"""simple docstring"""
lowercase__ : int = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowercase__ : Any = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
lowercase__ : Tuple = feat_extract(
lowerCamelCase , truncation=lowerCamelCase , max_length=1000 , padding="longest" , return_tensors="np" )
lowercase__ : Dict = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1000) )
lowercase__ : str = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
lowercase__ : Union[str, Any] = feat_extract(
lowerCamelCase , truncation=lowerCamelCase , max_length=2000 , padding="longest" , return_tensors="np" )
lowercase__ : Union[str, Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1200) )
def __a ( self ) -> Any:
"""simple docstring"""
lowercase__ : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowercase__ : Tuple = np.random.rand(100 ).astype(np.floataa )
lowercase__ : int = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
lowercase__ : Tuple = feature_extractor.pad([{"input_values": inputs}] , return_tensors="np" )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
lowercase__ : Dict = feature_extractor.pad([{"input_values": inputs}] , return_tensors="pt" )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def __a ( self ) -> str:
"""simple docstring"""
lowercase__ : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
lowercase__ : List[str] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
lowercase__ : List[str] = [np.asarray(lowerCamelCase ) for speech_input in speech_inputs]
# Test feature size
lowercase__ : str = feature_extractor(audio_target=lowerCamelCase , padding=lowerCamelCase , return_tensors="np" ).input_values
self.assertTrue(input_values.ndim == 3 )
self.assertTrue(input_values.shape[-1] == feature_extractor.num_mel_bins )
# Test not batched input
lowercase__ : Union[str, Any] = feature_extractor(speech_inputs[0] , return_tensors="np" ).input_values
lowercase__ : Optional[Any] = feature_extractor(np_speech_inputs[0] , return_tensors="np" ).input_values
self.assertTrue(np.allclose(lowerCamelCase , lowerCamelCase , atol=1E-3 ) )
# Test batched
lowercase__ : Dict = feature_extractor(lowerCamelCase , return_tensors="np" ).input_values
lowercase__ : List[str] = feature_extractor(lowerCamelCase , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCamelCase , lowerCamelCase ):
self.assertTrue(np.allclose(lowerCamelCase , lowerCamelCase , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
lowercase__ : str = [floats_list((1, x) )[0] for x in (800, 800, 800)]
lowercase__ : Optional[Any] = np.asarray(lowerCamelCase )
lowercase__ : List[Any] = feature_extractor(lowerCamelCase , return_tensors="np" ).input_values
lowercase__ : List[str] = feature_extractor(lowerCamelCase , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCamelCase , lowerCamelCase ):
self.assertTrue(np.allclose(lowerCamelCase , lowerCamelCase , atol=1E-3 ) )
def __a ( self ) -> str:
"""simple docstring"""
lowercase__ : Dict = self.feat_extract_tester.prepare_inputs_for_target()
lowercase__ : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict )
lowercase__ : Dict = feat_extract.model_input_names[0]
lowercase__ : int = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(lowerCamelCase ) == len(lowerCamelCase ) for x, y in zip(lowerCamelCase , processed_features[input_name] ) ) )
lowercase__ : Optional[int] = self.feat_extract_tester.prepare_inputs_for_target(equal_length=lowerCamelCase )
lowercase__ : List[Any] = BatchFeature({input_name: speech_inputs} , tensor_type="np" )
lowercase__ : Optional[int] = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
lowercase__ : Tuple = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def __a ( self ) -> Tuple:
"""simple docstring"""
lowercase__ : Dict = self.feat_extract_tester.prepare_inputs_for_target(equal_length=lowerCamelCase )
lowercase__ : List[str] = self.feature_extraction_class(**self.feat_extract_dict )
lowercase__ : Optional[Any] = feat_extract.model_input_names[0]
lowercase__ : Dict = BatchFeature({input_name: speech_inputs} , tensor_type="pt" )
lowercase__ : List[str] = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
lowercase__ : int = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def __a ( self ) -> Tuple:
"""simple docstring"""
lowercase__ : Dict = self.feature_extraction_class(**self.feat_extract_dict )
lowercase__ : Optional[Any] = self.feat_extract_tester.prepare_inputs_for_target()
lowercase__ : Optional[Any] = feat_extract.model_input_names[0]
lowercase__ : Optional[Any] = BatchFeature({input_name: speech_inputs} )
lowercase__ : Optional[int] = feat_extract.num_mel_bins # hack!
lowercase__ : Optional[int] = feat_extract.pad(lowerCamelCase , padding="longest" , return_tensors="np" )[input_name]
lowercase__ : Optional[int] = feat_extract.pad(lowerCamelCase , padding="longest" , return_tensors="pt" )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1E-2 )
def __a ( self ) -> Tuple:
"""simple docstring"""
lowercase__ : Tuple = self.feat_extract_dict
lowercase__ : int = True
lowercase__ : Optional[Any] = self.feature_extraction_class(**lowerCamelCase )
lowercase__ : Optional[int] = self.feat_extract_tester.prepare_inputs_for_target()
lowercase__ : Union[str, Any] = [len(lowerCamelCase ) for x in speech_inputs]
lowercase__ : Any = feat_extract.model_input_names[0]
lowercase__ : Optional[int] = BatchFeature({input_name: speech_inputs} )
lowercase__ : int = feat_extract.num_mel_bins # hack!
lowercase__ : int = feat_extract.pad(lowerCamelCase , padding="longest" , return_tensors="np" )
self.assertIn("attention_mask" , lowerCamelCase )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , lowerCamelCase )
def __a ( self ) -> Dict:
"""simple docstring"""
lowercase__ : List[Any] = self.feat_extract_dict
lowercase__ : Optional[int] = True
lowercase__ : List[Any] = self.feature_extraction_class(**lowerCamelCase )
lowercase__ : Optional[int] = self.feat_extract_tester.prepare_inputs_for_target()
lowercase__ : List[str] = [len(lowerCamelCase ) for x in speech_inputs]
lowercase__ : Any = feat_extract.model_input_names[0]
lowercase__ : Dict = BatchFeature({input_name: speech_inputs} )
lowercase__ : int = min(lowerCamelCase )
lowercase__ : List[str] = feat_extract.num_mel_bins # hack!
lowercase__ : Dict = feat_extract.pad(
lowerCamelCase , padding="max_length" , max_length=lowerCamelCase , truncation=lowerCamelCase , return_tensors="np" )
self.assertIn("attention_mask" , lowerCamelCase )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
def __a ( self , lowerCamelCase ) -> List[Any]:
"""simple docstring"""
from datasets import load_dataset
lowercase__ : Any = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
lowercase__ : int = ds.sort("id" ).select(range(lowerCamelCase ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def __a ( self ) -> List[str]:
"""simple docstring"""
lowercase__ : List[str] = torch.tensor(
[2.3_804E-03, 2.0_752E-03, 1.9_836E-03, 2.1_057E-03, 1.6_174E-03,
3.0_518E-04, 9.1_553E-05, 3.3_569E-04, 9.7_656E-04, 1.8_311E-03,
2.0_142E-03, 2.1_057E-03, 1.7_395E-03, 4.5_776E-04, -3.9_673E-04,
4.5_776E-04, 1.0_071E-03, 9.1_553E-05, 4.8_828E-04, 1.1_597E-03,
7.3_242E-04, 9.4_604E-04, 1.8_005E-03, 1.8_311E-03, 8.8_501E-04,
4.2_725E-04, 4.8_828E-04, 7.3_242E-04, 1.0_986E-03, 2.1_057E-03] )
# fmt: on
lowercase__ : List[Any] = self._load_datasamples(1 )
lowercase__ : int = SpeechTaFeatureExtractor()
lowercase__ : Tuple = feature_extractor(lowerCamelCase , return_tensors="pt" ).input_values
self.assertEquals(input_values.shape , (1, 93680) )
self.assertTrue(torch.allclose(input_values[0, :30] , lowerCamelCase , atol=1E-6 ) )
def __a ( self ) -> int:
"""simple docstring"""
lowercase__ : Optional[int] = torch.tensor(
[-2.68_70, -3.01_04, -3.13_56, -3.53_52, -3.00_44, -3.03_53, -3.47_19, -3.67_77,
-3.15_20, -2.94_35, -2.65_53, -2.87_95, -2.99_44, -2.59_21, -3.02_79, -3.03_86,
-3.08_64, -3.12_91, -3.23_53, -2.74_44, -2.68_31, -2.72_87, -3.17_61, -3.15_71,
-3.27_26, -3.05_82, -3.10_07, -3.45_33, -3.46_95, -3.09_98] )
# fmt: on
lowercase__ : Any = self._load_datasamples(1 )
lowercase__ : List[Any] = SpeechTaFeatureExtractor()
lowercase__ : int = feature_extractor(audio_target=lowerCamelCase , return_tensors="pt" ).input_values
self.assertEquals(input_values.shape , (1, 366, 80) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , lowerCamelCase , atol=1E-4 ) )
| 397
| 1
|
"""simple docstring"""
def __lowercase ( a : int = 10**9 ) -> int:
__snake_case : List[str] =1
__snake_case : Union[str, Any] =2
__snake_case : int =0
__snake_case : str =0
__snake_case : str =0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
__snake_case : Tuple =2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(F'''{solution() = }''')
| 497
|
"""simple docstring"""
import tempfile
import unittest
import numpy as np
from diffusers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionPipeline,
PNDMScheduler,
)
from diffusers.utils.testing_utils import is_onnx_available, nightly, require_onnxruntime, require_torch_gpu
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _lowercase ( lowerCAmelCase , unittest.TestCase ):
_a : Tuple = '''hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'''
def _UpperCamelCase ( self : Dict , a : Optional[Any]=0 ):
"""simple docstring"""
__snake_case : List[str] =np.random.RandomState(a )
__snake_case : Union[str, Any] ={
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def _UpperCamelCase ( self : str ):
"""simple docstring"""
__snake_case : Dict =OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
pipe.set_progress_bar_config(disable=a )
__snake_case : Tuple =self.get_dummy_inputs()
__snake_case : List[str] =pipe(**a ).images
__snake_case : int =image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
__snake_case : str =np.array([0.6_5_0_7_2, 0.5_8_4_9_2, 0.4_8_2_1_9, 0.5_5_5_2_1, 0.5_3_1_8_0, 0.5_5_9_3_9, 0.5_0_6_9_7, 0.3_9_8_0_0, 0.4_6_4_5_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
__snake_case : Dict =OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
__snake_case : Dict =PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=a )
pipe.set_progress_bar_config(disable=a )
__snake_case : List[Any] =self.get_dummy_inputs()
__snake_case : Dict =pipe(**a ).images
__snake_case : Union[str, Any] =image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
__snake_case : Optional[Any] =np.array([0.6_5_8_6_3, 0.5_9_4_2_5, 0.4_9_3_2_6, 0.5_6_3_1_3, 0.5_3_8_7_5, 0.5_6_6_2_7, 0.5_1_0_6_5, 0.3_9_7_7_7, 0.4_6_3_3_0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
__snake_case : Any =OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
__snake_case : Any =LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=a )
__snake_case : int =self.get_dummy_inputs()
__snake_case : List[str] =pipe(**a ).images
__snake_case : str =image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
__snake_case : int =np.array([0.5_3_7_5_5, 0.6_0_7_8_6, 0.4_7_4_0_2, 0.4_9_4_8_8, 0.5_1_8_6_9, 0.4_9_8_1_9, 0.4_7_9_8_5, 0.3_8_9_5_7, 0.4_4_2_7_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _UpperCamelCase ( self : List[str] ):
"""simple docstring"""
__snake_case : Optional[int] =OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
__snake_case : Optional[Any] =EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=a )
__snake_case : str =self.get_dummy_inputs()
__snake_case : int =pipe(**a ).images
__snake_case : str =image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
__snake_case : Union[str, Any] =np.array([0.5_3_7_5_5, 0.6_0_7_8_6, 0.4_7_4_0_2, 0.4_9_4_8_8, 0.5_1_8_6_9, 0.4_9_8_1_9, 0.4_7_9_8_5, 0.3_8_9_5_7, 0.4_4_2_7_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
__snake_case : Any =OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
__snake_case : List[str] =EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=a )
__snake_case : List[str] =self.get_dummy_inputs()
__snake_case : Dict =pipe(**a ).images
__snake_case : List[Any] =image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
__snake_case : Tuple =np.array([0.5_3_8_1_7, 0.6_0_8_1_2, 0.4_7_3_8_4, 0.4_9_5_3_0, 0.5_1_8_9_4, 0.4_9_8_1_4, 0.4_7_9_8_4, 0.3_8_9_5_8, 0.4_4_2_7_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _UpperCamelCase ( self : Dict ):
"""simple docstring"""
__snake_case : Tuple =OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
__snake_case : Any =DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=a )
__snake_case : Tuple =self.get_dummy_inputs()
__snake_case : Tuple =pipe(**a ).images
__snake_case : Optional[Any] =image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
__snake_case : Dict =np.array([0.5_3_8_9_5, 0.6_0_8_0_8, 0.4_7_9_3_3, 0.4_9_6_0_8, 0.5_1_8_8_6, 0.4_9_9_5_0, 0.4_8_0_5_3, 0.3_8_9_5_7, 0.4_4_2_0_0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _UpperCamelCase ( self : int ):
"""simple docstring"""
__snake_case : Dict =OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
pipe.set_progress_bar_config(disable=a )
__snake_case : Optional[int] =self.get_dummy_inputs()
__snake_case : Any =3 * [inputs['''prompt''']]
# forward
__snake_case : Any =pipe(**a )
__snake_case : str =output.images[0, -3:, -3:, -1]
__snake_case : Tuple =self.get_dummy_inputs()
__snake_case : Any =3 * [inputs.pop('''prompt''' )]
__snake_case : Optional[Any] =pipe.tokenizer(
a , padding='''max_length''' , max_length=pipe.tokenizer.model_max_length , truncation=a , return_tensors='''np''' , )
__snake_case : List[Any] =text_inputs['''input_ids''']
__snake_case : str =pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0]
__snake_case : Optional[Any] =prompt_embeds
# forward
__snake_case : Dict =pipe(**a )
__snake_case : Any =output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
def _UpperCamelCase ( self : Any ):
"""simple docstring"""
__snake_case : Dict =OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
pipe.set_progress_bar_config(disable=a )
__snake_case : List[Any] =self.get_dummy_inputs()
__snake_case : Optional[Any] =3 * ['''this is a negative prompt''']
__snake_case : List[str] =negative_prompt
__snake_case : str =3 * [inputs['''prompt''']]
# forward
__snake_case : int =pipe(**a )
__snake_case : Union[str, Any] =output.images[0, -3:, -3:, -1]
__snake_case : Tuple =self.get_dummy_inputs()
__snake_case : Union[str, Any] =3 * [inputs.pop('''prompt''' )]
__snake_case : Optional[int] =[]
for p in [prompt, negative_prompt]:
__snake_case : Optional[int] =pipe.tokenizer(
a , padding='''max_length''' , max_length=pipe.tokenizer.model_max_length , truncation=a , return_tensors='''np''' , )
__snake_case : Optional[Any] =text_inputs['''input_ids''']
embeds.append(pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0] )
__snake_case , __snake_case : Optional[Any] =embeds
# forward
__snake_case : Any =pipe(**a )
__snake_case : Union[str, Any] =output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
@nightly
@require_onnxruntime
@require_torch_gpu
class _lowercase ( unittest.TestCase ):
@property
def _UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def _UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
__snake_case : List[str] =ort.SessionOptions()
__snake_case : Optional[int] =False
return options
def _UpperCamelCase ( self : Any ):
"""simple docstring"""
__snake_case : List[Any] =OnnxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''onnx''' , safety_checker=a , feature_extractor=a , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=a )
__snake_case : List[str] ='''A painting of a squirrel eating a burger'''
np.random.seed(0 )
__snake_case : Tuple =sd_pipe([prompt] , guidance_scale=6.0 , num_inference_steps=1_0 , output_type='''np''' )
__snake_case : Union[str, Any] =output.images
__snake_case : Optional[Any] =image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__snake_case : Any =np.array([0.0_4_5_2, 0.0_3_9_0, 0.0_0_8_7, 0.0_3_5_0, 0.0_6_1_7, 0.0_3_6_4, 0.0_5_4_4, 0.0_5_2_3, 0.0_7_2_0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
__snake_case : List[str] =DDIMScheduler.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , subfolder='''scheduler''' , revision='''onnx''' )
__snake_case : List[Any] =OnnxStableDiffusionPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , revision='''onnx''' , scheduler=a , safety_checker=a , feature_extractor=a , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=a )
__snake_case : Optional[Any] ='''open neural network exchange'''
__snake_case : Optional[int] =np.random.RandomState(0 )
__snake_case : int =sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=1_0 , generator=a , output_type='''np''' )
__snake_case : Union[str, Any] =output.images
__snake_case : str =image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__snake_case : Union[str, Any] =np.array([0.2_8_6_7, 0.1_9_7_4, 0.1_4_8_1, 0.7_2_9_4, 0.7_2_5_1, 0.6_6_6_7, 0.4_1_9_4, 0.5_6_4_2, 0.6_4_8_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _UpperCamelCase ( self : Tuple ):
"""simple docstring"""
__snake_case : List[str] =LMSDiscreteScheduler.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , subfolder='''scheduler''' , revision='''onnx''' )
__snake_case : int =OnnxStableDiffusionPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , revision='''onnx''' , scheduler=a , safety_checker=a , feature_extractor=a , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=a )
__snake_case : Optional[int] ='''open neural network exchange'''
__snake_case : Optional[Any] =np.random.RandomState(0 )
__snake_case : Any =sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=1_0 , generator=a , output_type='''np''' )
__snake_case : Optional[int] =output.images
__snake_case : Any =image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__snake_case : Optional[int] =np.array([0.2_3_0_6, 0.1_9_5_9, 0.1_5_9_3, 0.6_5_4_9, 0.6_3_9_4, 0.5_4_0_8, 0.5_0_6_5, 0.6_0_1_0, 0.6_1_6_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
__snake_case : Union[str, Any] =0
def test_callback_fn(a : int , a : int , a : np.ndarray ) -> None:
__snake_case : Dict =True
nonlocal number_of_steps
number_of_steps += 1
if step == 0:
assert latents.shape == (1, 4, 6_4, 6_4)
__snake_case : Union[str, Any] =latents[0, -3:, -3:, -1]
__snake_case : str =np.array(
[-0.6_7_7_2, -0.3_8_3_5, -1.2_4_5_6, 0.1_9_0_5, -1.0_9_7_4, 0.6_9_6_7, -1.9_3_5_3, 0.0_1_7_8, 1.0_1_6_7] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1e-3
elif step == 5:
assert latents.shape == (1, 4, 6_4, 6_4)
__snake_case : List[Any] =latents[0, -3:, -3:, -1]
__snake_case : List[Any] =np.array(
[-0.3_3_5_1, 0.2_2_4_1, -0.1_8_3_7, -0.2_3_2_5, -0.6_5_7_7, 0.3_3_9_3, -0.0_2_4_1, 0.5_8_9_9, 1.3_8_7_5] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1e-3
__snake_case : str =False
__snake_case : int =OnnxStableDiffusionPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , revision='''onnx''' , safety_checker=a , feature_extractor=a , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=a )
__snake_case : List[Any] ='''Andromeda galaxy in a bottle'''
__snake_case : Optional[int] =np.random.RandomState(0 )
pipe(
prompt=a , num_inference_steps=5 , guidance_scale=7.5 , generator=a , callback=a , callback_steps=1 , )
assert test_callback_fn.has_been_called
assert number_of_steps == 6
def _UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
__snake_case : Optional[Any] =OnnxStableDiffusionPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , revision='''onnx''' , safety_checker=a , feature_extractor=a , provider=self.gpu_provider , sess_options=self.gpu_options , )
assert isinstance(a , a )
assert pipe.safety_checker is None
__snake_case : int =pipe('''example prompt''' , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(a )
__snake_case : List[Any] =OnnxStableDiffusionPipeline.from_pretrained(a )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
__snake_case : Any =pipe('''example prompt''' , num_inference_steps=2 ).images[0]
assert image is not None
| 497
| 1
|
'''simple docstring'''
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE__ ):
__a : Any = len(UpperCAmelCase_ )
for i in range(length - 1 ):
__a : Union[str, Any] = i
for k in range(i + 1 , UpperCAmelCase_ ):
if collection[k] < collection[least]:
__a : Any = k
if least != i:
__a : List[Any] = (collection[i], collection[least])
return collection
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = input("Enter numbers separated by a comma:\n").strip()
SCREAMING_SNAKE_CASE_ = [int(item) for item in user_input.split(",")]
print(selection_sort(unsorted))
| 597
|
from __future__ import annotations
from typing import Any
class UpperCAmelCase__ :
'''simple docstring'''
def __init__( self : Dict , UpperCamelCase : int = 6 ):
"""simple docstring"""
_lowercase : Node | None = None
_lowercase : Node | None = None
self.create_linked_list(UpperCamelCase )
def lowerCAmelCase_ ( self : List[str] , UpperCamelCase : int ):
"""simple docstring"""
_lowercase : Union[str, Any] = Node()
_lowercase : Any = current_node
_lowercase : List[Any] = current_node
_lowercase : str = current_node
for _ in range(1 , UpperCamelCase ):
_lowercase : Any = Node()
_lowercase : Union[str, Any] = current_node
_lowercase : List[str] = previous_node
_lowercase : List[str] = current_node
_lowercase : str = self.front
_lowercase : Optional[int] = previous_node
def lowerCAmelCase_ ( self : List[Any] ):
"""simple docstring"""
return (
self.front == self.rear
and self.front is not None
and self.front.data is None
)
def lowerCAmelCase_ ( self : int ):
"""simple docstring"""
self.check_can_perform_operation()
return self.front.data if self.front else None
def lowerCAmelCase_ ( self : Any , UpperCamelCase : Any ):
"""simple docstring"""
if self.rear is None:
return
self.check_is_full()
if not self.is_empty():
_lowercase : List[str] = self.rear.next
if self.rear:
_lowercase : Union[str, Any] = data
def lowerCAmelCase_ ( self : Any ):
"""simple docstring"""
self.check_can_perform_operation()
if self.rear is None or self.front is None:
return None
if self.front == self.rear:
_lowercase : Optional[int] = self.front.data
_lowercase : Any = None
return data
_lowercase : Union[str, Any] = self.front
_lowercase : int = old_front.next
_lowercase : Any = old_front.data
_lowercase : Any = None
return data
def lowerCAmelCase_ ( self : Tuple ):
"""simple docstring"""
if self.is_empty():
raise Exception('''Empty Queue''' )
def lowerCAmelCase_ ( self : Any ):
"""simple docstring"""
if self.rear and self.rear.next == self.front:
raise Exception('''Full Queue''' )
class UpperCAmelCase__ :
'''simple docstring'''
def __init__( self : int ):
"""simple docstring"""
_lowercase : Any | None = None
_lowercase : Node | None = None
_lowercase : Node | None = None
if __name__ == "__main__":
import doctest
doctest.testmod()
| 322
| 0
|
"""simple docstring"""
import argparse
import math
import os
import torch
from neural_compressor.utils.pytorch import load
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel
def lowercase_ ( ) -> Any:
lowerCAmelCase__ : List[str] = argparse.ArgumentParser()
parser.add_argument(
"""-m""" , """--pretrained_model_name_or_path""" , type=lowercase_ , default=lowercase_ , required=lowercase_ , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , )
parser.add_argument(
"""-c""" , """--caption""" , type=lowercase_ , default="""robotic cat with wings""" , help="""Text used to generate images.""" , )
parser.add_argument(
"""-n""" , """--images_num""" , type=lowercase_ , default=4 , help="""How much images to generate.""" , )
parser.add_argument(
"""-s""" , """--seed""" , type=lowercase_ , default=42 , help="""Seed for random process.""" , )
parser.add_argument(
"""-ci""" , """--cuda_id""" , type=lowercase_ , default=0 , help="""cuda_id.""" , )
lowerCAmelCase__ : Optional[int] = parser.parse_args()
return args
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Tuple:
if not len(lowercase_ ) == rows * cols:
raise ValueError("""The specified number of rows and columns are not correct.""" )
lowerCAmelCase__ : Union[str, Any] = imgs[0].size
lowerCAmelCase__ : Optional[int] = Image.new("""RGB""" , size=(cols * w, rows * h) )
lowerCAmelCase__ : Tuple = grid.size
for i, img in enumerate(lowercase_ ):
grid.paste(lowercase_ , box=(i % cols * w, i // cols * h) )
return grid
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase="robotic cat with wings" , __UpperCAmelCase=7.5 , __UpperCAmelCase=50 , __UpperCAmelCase=1 , __UpperCAmelCase=42 , ) -> int:
lowerCAmelCase__ : List[str] = torch.Generator(pipeline.device ).manual_seed(lowercase_ )
lowerCAmelCase__ : int = pipeline(
lowercase_ , guidance_scale=lowercase_ , num_inference_steps=lowercase_ , generator=lowercase_ , num_images_per_prompt=lowercase_ , ).images
lowerCAmelCase__ : Dict = int(math.sqrt(lowercase_ ) )
lowerCAmelCase__ : Optional[int] = image_grid(lowercase_ , rows=_rows , cols=num_images_per_prompt // _rows )
return grid, images
_A = parse_args()
# Load models and create wrapper for stable diffusion
_A = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder="""tokenizer""")
_A = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="""text_encoder""")
_A = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder="""vae""")
_A = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="""unet""")
_A = StableDiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer
)
_A = lambda images, clip_input: (images, False)
if os.path.exists(os.path.join(args.pretrained_model_name_or_path, """best_model.pt""")):
_A = load(args.pretrained_model_name_or_path, model=unet)
unet.eval()
setattr(pipeline, """unet""", unet)
else:
_A = unet.to(torch.device("""cuda""", args.cuda_id))
_A = pipeline.to(unet.device)
_A = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed)
grid.save(os.path.join(args.pretrained_model_name_or_path, """{}.png""".format("""_""".join(args.caption.split()))))
_A = os.path.join(args.pretrained_model_name_or_path, """_""".join(args.caption.split()))
os.makedirs(dirname, exist_ok=True)
for idx, image in enumerate(images):
image.save(os.path.join(dirname, """{}.png""".format(idx + 1)))
| 720
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from accelerate import PartialState
from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce
def lowercase_ ( __UpperCAmelCase ) -> Tuple:
return (torch.arange(state.num_processes ) + 1.0 + (state.num_processes * state.process_index)).to(state.device )
def lowercase_ ( __UpperCAmelCase ) -> Tuple:
lowerCAmelCase__ : Optional[int] = create_tensor(__UpperCAmelCase )
lowerCAmelCase__ : List[Any] = gather(__UpperCAmelCase )
assert gathered_tensor.tolist() == list(range(1 , state.num_processes**2 + 1 ) )
def lowercase_ ( __UpperCAmelCase ) -> List[Any]:
lowerCAmelCase__ : Any = [state.process_index]
lowerCAmelCase__ : Dict = gather_object(__UpperCAmelCase )
assert len(__UpperCAmelCase ) == state.num_processes, f"""{gathered_obj}, {len(__UpperCAmelCase )} != {state.num_processes}"""
assert gathered_obj == list(range(state.num_processes ) ), f"""{gathered_obj} != {list(range(state.num_processes ) )}"""
def lowercase_ ( __UpperCAmelCase ) -> Dict:
lowerCAmelCase__ : Union[str, Any] = create_tensor(__UpperCAmelCase )
lowerCAmelCase__ : Any = broadcast(__UpperCAmelCase )
assert broadcasted_tensor.shape == torch.Size([state.num_processes] )
assert broadcasted_tensor.tolist() == list(range(1 , state.num_processes + 1 ) )
def lowercase_ ( __UpperCAmelCase ) -> Union[str, Any]:
# We need to pad the tensor with one more element if we are the main process
# to ensure that we can pad
if state.is_main_process:
lowerCAmelCase__ : int = torch.arange(state.num_processes + 1 ).to(state.device )
else:
lowerCAmelCase__ : Optional[Any] = torch.arange(state.num_processes ).to(state.device )
lowerCAmelCase__ : Any = pad_across_processes(__UpperCAmelCase )
assert padded_tensor.shape == torch.Size([state.num_processes + 1] )
if not state.is_main_process:
assert padded_tensor.tolist() == list(range(0 , state.num_processes ) ) + [0]
def lowercase_ ( __UpperCAmelCase ) -> Optional[Any]:
# For now runs on only two processes
if state.num_processes != 2:
return
lowerCAmelCase__ : Union[str, Any] = create_tensor(__UpperCAmelCase )
lowerCAmelCase__ : Any = reduce(__UpperCAmelCase , """sum""" )
lowerCAmelCase__ : Union[str, Any] = torch.tensor([4.0, 6] ).to(state.device )
assert torch.allclose(__UpperCAmelCase , __UpperCAmelCase ), f"""{reduced_tensor} != {truth_tensor}"""
def lowercase_ ( __UpperCAmelCase ) -> List[str]:
# For now runs on only two processes
if state.num_processes != 2:
return
lowerCAmelCase__ : List[str] = create_tensor(__UpperCAmelCase )
lowerCAmelCase__ : Any = reduce(__UpperCAmelCase , """mean""" )
lowerCAmelCase__ : str = torch.tensor([2.0, 3] ).to(state.device )
assert torch.allclose(__UpperCAmelCase , __UpperCAmelCase ), f"""{reduced_tensor} != {truth_tensor}"""
def lowercase_ ( __UpperCAmelCase ) -> Dict:
# For xla_spawn (TPUs)
main()
def lowercase_ ( ) -> Optional[int]:
lowerCAmelCase__ : str = PartialState()
state.print(f"""State: {state}""" )
state.print("""testing gather""" )
test_gather(__UpperCAmelCase )
state.print("""testing gather_object""" )
test_gather_object(__UpperCAmelCase )
state.print("""testing broadcast""" )
test_broadcast(__UpperCAmelCase )
state.print("""testing pad_across_processes""" )
test_pad_across_processes(__UpperCAmelCase )
state.print("""testing reduce_sum""" )
test_reduce_sum(__UpperCAmelCase )
state.print("""testing reduce_mean""" )
test_reduce_mean(__UpperCAmelCase )
if __name__ == "__main__":
main()
| 507
| 0
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class UpperCAmelCase ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = KandinskyVaaControlnetImgaImgPipeline
SCREAMING_SNAKE_CASE_ = ['image_embeds', 'negative_image_embeds', 'image', 'hint']
SCREAMING_SNAKE_CASE_ = ['image_embeds', 'negative_image_embeds', 'image', 'hint']
SCREAMING_SNAKE_CASE_ = [
'generator',
'height',
'width',
'strength',
'guidance_scale',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
SCREAMING_SNAKE_CASE_ = False
@property
def UpperCamelCase( self ) -> Tuple:
'''simple docstring'''
return 32
@property
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
return 32
@property
def UpperCamelCase( self ) -> int:
'''simple docstring'''
return self.time_input_dim
@property
def UpperCamelCase( self ) -> str:
'''simple docstring'''
return self.time_input_dim * 4
@property
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
return 100
@property
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase_ = {
'in_channels': 8,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'image_hint',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
lowerCamelCase_ = UNetaDConditionModel(**SCREAMING_SNAKE_CASE_ )
return model
@property
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def UpperCamelCase( self ) -> str:
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase_ = VQModel(**self.dummy_movq_kwargs )
return model
def UpperCamelCase( self ) -> Tuple:
'''simple docstring'''
lowerCamelCase_ = self.dummy_unet
lowerCamelCase_ = self.dummy_movq
lowerCamelCase_ = {
'num_train_timesteps': 1000,
'beta_schedule': 'linear',
'beta_start': 0.00_085,
'beta_end': 0.012,
'clip_sample': False,
'set_alpha_to_one': False,
'steps_offset': 0,
'prediction_type': 'epsilon',
'thresholding': False,
}
lowerCamelCase_ = DDIMScheduler(**SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = {
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=0 ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(SCREAMING_SNAKE_CASE_ ) ).to(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
SCREAMING_SNAKE_CASE_ )
# create init_image
lowerCamelCase_ = floats_tensor((1, 3, 64, 64) , rng=random.Random(SCREAMING_SNAKE_CASE_ ) ).to(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCamelCase_ = Image.fromarray(np.uinta(SCREAMING_SNAKE_CASE_ ) ).convert('RGB' ).resize((256, 256) )
# create hint
lowerCamelCase_ = floats_tensor((1, 3, 64, 64) , rng=random.Random(SCREAMING_SNAKE_CASE_ ) ).to(SCREAMING_SNAKE_CASE_ )
if str(SCREAMING_SNAKE_CASE_ ).startswith('mps' ):
lowerCamelCase_ = torch.manual_seed(SCREAMING_SNAKE_CASE_ )
else:
lowerCamelCase_ = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = {
'image': init_image,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'hint': hint,
'generator': generator,
'height': 64,
'width': 64,
'num_inference_steps': 10,
'guidance_scale': 7.0,
'strength': 0.2,
'output_type': 'np',
}
return inputs
def UpperCamelCase( self ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase_ = 'cpu'
lowerCamelCase_ = self.get_dummy_components()
lowerCamelCase_ = self.pipeline_class(**SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = pipe(**self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ ) )
lowerCamelCase_ = output.images
lowerCamelCase_ = pipe(
**self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ ) , return_dict=SCREAMING_SNAKE_CASE_ , )[0]
lowerCamelCase_ = image[0, -3:, -3:, -1]
lowerCamelCase_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCamelCase_ = np.array(
[0.54_985_034, 0.55_509_365, 0.52_561_504, 0.5_570_494, 0.5_593_818, 0.5_263_979, 0.50_285_643, 0.5_069_846, 0.51_196_736] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase( self ) -> Tuple:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
lowerCamelCase_ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy' )
lowerCamelCase_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
lowerCamelCase_ = init_image.resize((512, 512) )
lowerCamelCase_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/hint_image_cat.png' )
lowerCamelCase_ = torch.from_numpy(np.array(SCREAMING_SNAKE_CASE_ ) ).float() / 255.0
lowerCamelCase_ = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
lowerCamelCase_ = 'A robot, 4k photo'
lowerCamelCase_ = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-prior' , torch_dtype=torch.floataa )
pipe_prior.to(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-controlnet-depth' , torch_dtype=torch.floataa )
lowerCamelCase_ = pipeline.to(SCREAMING_SNAKE_CASE_ )
pipeline.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = torch.Generator(device='cpu' ).manual_seed(0 )
lowerCamelCase_ ,lowerCamelCase_ = pipe_prior(
SCREAMING_SNAKE_CASE_ , image=SCREAMING_SNAKE_CASE_ , strength=0.85 , generator=SCREAMING_SNAKE_CASE_ , negative_prompt='' , ).to_tuple()
lowerCamelCase_ = pipeline(
image=SCREAMING_SNAKE_CASE_ , image_embeds=SCREAMING_SNAKE_CASE_ , negative_image_embeds=SCREAMING_SNAKE_CASE_ , hint=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , num_inference_steps=100 , height=512 , width=512 , strength=0.5 , output_type='np' , )
lowerCamelCase_ = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
| 42
|
"""simple docstring"""
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
__magic_name__ = "https://www.indeed.co.in/jobs?q=mobile+app+development&l="
def _lowerCAmelCase ( UpperCamelCase_ = "mumbai" ):
__SCREAMING_SNAKE_CASE = BeautifulSoup(requests.get(url + location ).content , """html.parser""" )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all("""div""" , attrs={"""data-tn-component""": """organicJob"""} ):
__SCREAMING_SNAKE_CASE = job.find("""a""" , attrs={"""data-tn-element""": """jobTitle"""} ).text.strip()
__SCREAMING_SNAKE_CASE = job.find("""span""" , {"""class""": """company"""} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs("Bangalore"), 1):
print(F"""Job {i:>2} is {job[0]} at {job[1]}""")
| 155
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE__ : List[Any] = {"""configuration_xlnet""": ["""XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XLNetConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ["""XLNetTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : int = ["""XLNetTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [
"""XLNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XLNetForMultipleChoice""",
"""XLNetForQuestionAnswering""",
"""XLNetForQuestionAnsweringSimple""",
"""XLNetForSequenceClassification""",
"""XLNetForTokenClassification""",
"""XLNetLMHeadModel""",
"""XLNetModel""",
"""XLNetPreTrainedModel""",
"""load_tf_weights_in_xlnet""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = [
"""TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFXLNetForMultipleChoice""",
"""TFXLNetForQuestionAnsweringSimple""",
"""TFXLNetForSequenceClassification""",
"""TFXLNetForTokenClassification""",
"""TFXLNetLMHeadModel""",
"""TFXLNetMainLayer""",
"""TFXLNetModel""",
"""TFXLNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 233
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ : int = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Any = {
"""xlm-roberta-base""": """https://huggingface.co/xlm-roberta-base/resolve/main/config.json""",
"""xlm-roberta-large""": """https://huggingface.co/xlm-roberta-large/resolve/main/config.json""",
"""xlm-roberta-large-finetuned-conll02-dutch""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json"""
),
"""xlm-roberta-large-finetuned-conll02-spanish""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json"""
),
"""xlm-roberta-large-finetuned-conll03-english""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json"""
),
"""xlm-roberta-large-finetuned-conll03-german""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json"""
),
}
class __lowerCAmelCase( lowerCAmelCase__ ):
__snake_case : List[str] = 'xlm-roberta'
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE : List[str]=30_522 , SCREAMING_SNAKE_CASE : List[str]=768 , SCREAMING_SNAKE_CASE : List[str]=12 , SCREAMING_SNAKE_CASE : str=12 , SCREAMING_SNAKE_CASE : Optional[Any]=3_072 , SCREAMING_SNAKE_CASE : int="gelu" , SCREAMING_SNAKE_CASE : str=0.1 , SCREAMING_SNAKE_CASE : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE : Union[str, Any]=512 , SCREAMING_SNAKE_CASE : Union[str, Any]=2 , SCREAMING_SNAKE_CASE : str=0.02 , SCREAMING_SNAKE_CASE : List[str]=1E-12 , SCREAMING_SNAKE_CASE : List[str]=1 , SCREAMING_SNAKE_CASE : Any=0 , SCREAMING_SNAKE_CASE : Optional[int]=2 , SCREAMING_SNAKE_CASE : Union[str, Any]="absolute" , SCREAMING_SNAKE_CASE : Any=True , SCREAMING_SNAKE_CASE : Dict=None , **SCREAMING_SNAKE_CASE : int , ):
"""simple docstring"""
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE , bos_token_id=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ :Optional[Any] = vocab_size
SCREAMING_SNAKE_CASE_ :int = hidden_size
SCREAMING_SNAKE_CASE_ :Tuple = num_hidden_layers
SCREAMING_SNAKE_CASE_ :Tuple = num_attention_heads
SCREAMING_SNAKE_CASE_ :List[Any] = hidden_act
SCREAMING_SNAKE_CASE_ :Optional[Any] = intermediate_size
SCREAMING_SNAKE_CASE_ :Tuple = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ :Optional[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ :List[str] = max_position_embeddings
SCREAMING_SNAKE_CASE_ :Union[str, Any] = type_vocab_size
SCREAMING_SNAKE_CASE_ :Any = initializer_range
SCREAMING_SNAKE_CASE_ :List[Any] = layer_norm_eps
SCREAMING_SNAKE_CASE_ :Any = position_embedding_type
SCREAMING_SNAKE_CASE_ :List[Any] = use_cache
SCREAMING_SNAKE_CASE_ :int = classifier_dropout
class __lowerCAmelCase( lowerCAmelCase__ ):
@property
def _lowercase ( self : Optional[int] ):
"""simple docstring"""
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE_ :str = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
SCREAMING_SNAKE_CASE_ :Optional[int] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 233
| 1
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
'distilbert-base-uncased': 'https://huggingface.co/distilbert-base-uncased/resolve/main/config.json',
'distilbert-base-uncased-distilled-squad': (
'https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json'
),
'distilbert-base-cased': 'https://huggingface.co/distilbert-base-cased/resolve/main/config.json',
'distilbert-base-cased-distilled-squad': (
'https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json'
),
'distilbert-base-german-cased': 'https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json',
'distilbert-base-multilingual-cased': (
'https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json'
),
'distilbert-base-uncased-finetuned-sst-2-english': (
'https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json'
),
}
class __a( _a ):
"""simple docstring"""
lowerCAmelCase = '''distilbert'''
lowerCAmelCase = {
'''hidden_size''': '''dim''',
'''num_attention_heads''': '''n_heads''',
'''num_hidden_layers''': '''n_layers''',
}
def __init__( self ,_SCREAMING_SNAKE_CASE=30_522 ,_SCREAMING_SNAKE_CASE=512 ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=6 ,_SCREAMING_SNAKE_CASE=12 ,_SCREAMING_SNAKE_CASE=768 ,_SCREAMING_SNAKE_CASE=4 * 768 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE="gelu" ,_SCREAMING_SNAKE_CASE=0.02 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.2 ,_SCREAMING_SNAKE_CASE=0 ,**_SCREAMING_SNAKE_CASE ,) -> Tuple:
UpperCAmelCase_ : Dict = vocab_size
UpperCAmelCase_ : List[Any] = max_position_embeddings
UpperCAmelCase_ : Dict = sinusoidal_pos_embds
UpperCAmelCase_ : Optional[Any] = n_layers
UpperCAmelCase_ : Any = n_heads
UpperCAmelCase_ : Optional[int] = dim
UpperCAmelCase_ : str = hidden_dim
UpperCAmelCase_ : Optional[Any] = dropout
UpperCAmelCase_ : Tuple = attention_dropout
UpperCAmelCase_ : Union[str, Any] = activation
UpperCAmelCase_ : Union[str, Any] = initializer_range
UpperCAmelCase_ : Dict = qa_dropout
UpperCAmelCase_ : Optional[int] = seq_classif_dropout
super().__init__(**_SCREAMING_SNAKE_CASE ,pad_token_id=_SCREAMING_SNAKE_CASE )
class __a( _a ):
"""simple docstring"""
@property
def a__ ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
UpperCAmelCase_ : int = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
UpperCAmelCase_ : List[Any] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 30
|
def __lowerCamelCase ( __a :Optional[Any] ) -> Tuple:
"""simple docstring"""
A__ = len(__a )
while cur > 1:
# Find the maximum number in arr
A__ = arr.index(max(arr[0:cur] ) )
# Reverse from 0 to mi
A__ = arr[mi::-1] + arr[mi + 1 : len(__a )]
# Reverse whole list
A__ = arr[cur - 1 :: -1] + arr[cur : len(__a )]
cur -= 1
return arr
if __name__ == "__main__":
A : List[str] = input('''Enter numbers separated by a comma:\n''').strip()
A : int = [int(item) for item in user_input.split(''',''')]
print(pancake_sort(unsorted))
| 176
| 0
|
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from .config import config_command_parser
from .config_args import default_config_file, load_config_from_file # noqa: F401
from .default import default_command_parser
from .update import update_command_parser
def _a ( _SCREAMING_SNAKE_CASE : Union[str, Any]=None ):
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser(add_help=_SCREAMING_SNAKE_CASE , allow_abbrev=_SCREAMING_SNAKE_CASE )
# The main config parser
_SCREAMING_SNAKE_CASE = config_command_parser(_SCREAMING_SNAKE_CASE )
# The subparser to add commands to
_SCREAMING_SNAKE_CASE = config_parser.add_subparsers(title="subcommands" , dest="subcommand" )
# Then add other parsers with the parent parser
default_command_parser(_SCREAMING_SNAKE_CASE , parents=[parent_parser] )
update_command_parser(_SCREAMING_SNAKE_CASE , parents=[parent_parser] )
return config_parser
def _a ( ):
_SCREAMING_SNAKE_CASE = get_config_parser()
_SCREAMING_SNAKE_CASE = config_parser.parse_args()
if not hasattr(_SCREAMING_SNAKE_CASE , "func" ):
config_parser.print_help()
exit(1 )
# Run
args.func(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 493
|
'''simple docstring'''
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
a : int = LayoutLMTokenizer
a : Optional[int] = LayoutLMTokenizerFast
a : Optional[int] = True
a : Any = True
def lowercase ( self ):
super().setUp()
_SCREAMING_SNAKE_CASE = [
"[UNK]",
"[CLS]",
"[SEP]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
_SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def lowercase ( self , **UpperCamelCase ):
return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase )
def lowercase ( self , UpperCamelCase ):
_SCREAMING_SNAKE_CASE = "UNwant\u00E9d,running"
_SCREAMING_SNAKE_CASE = "unwanted, running"
return input_text, output_text
def lowercase ( self ):
_SCREAMING_SNAKE_CASE = self.tokenizer_class(self.vocab_file )
_SCREAMING_SNAKE_CASE = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(UpperCamelCase , ["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase ) , [7, 4, 5, 10, 8, 9] )
def lowercase ( self ):
pass
| 493
| 1
|
"""simple docstring"""
import gc
import unittest
import torch
from parameterized import parameterized
from diffusers import AutoencoderKL
from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class __snake_case ( _lowercase , _lowercase , unittest.TestCase):
snake_case__ : Tuple = AutoencoderKL
snake_case__ : Optional[int] = "sample"
snake_case__ : Optional[Any] = 1e-2
@property
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = 4
_lowerCamelCase : Optional[Any] = 3
_lowerCamelCase : List[str] = (3_2, 3_2)
_lowerCamelCase : Any = floats_tensor((batch_size, num_channels) + sizes ).to(__lowerCAmelCase )
return {"sample": image}
@property
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
return (3, 3_2, 3_2)
@property
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
return (3, 3_2, 3_2)
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : Any = {
'''block_out_channels''': [3_2, 6_4],
'''in_channels''': 3,
'''out_channels''': 3,
'''down_block_types''': ['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''],
'''up_block_types''': ['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''],
'''latent_channels''': 4,
}
_lowerCamelCase : Tuple = self.dummy_input
return init_dict, inputs_dict
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
pass
@unittest.skipIf(torch_device == '''mps''' , '''Gradient checkpointing skipped on MPS''' )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : Union[str, Any] = self.prepare_init_args_and_inputs_for_common()
_lowerCamelCase : List[str] = self.model_class(**__lowerCAmelCase )
model.to(__lowerCAmelCase )
assert not model.is_gradient_checkpointing and model.training
_lowerCamelCase : Union[str, Any] = model(**__lowerCAmelCase ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model.zero_grad()
_lowerCamelCase : List[Any] = torch.randn_like(__lowerCAmelCase )
_lowerCamelCase : Tuple = (out - labels).mean()
loss.backward()
# re-instantiate the model now enabling gradient checkpointing
_lowerCamelCase : List[Any] = self.model_class(**__lowerCAmelCase )
# clone model
model_a.load_state_dict(model.state_dict() )
model_a.to(__lowerCAmelCase )
model_a.enable_gradient_checkpointing()
assert model_a.is_gradient_checkpointing and model_a.training
_lowerCamelCase : List[Any] = model_a(**__lowerCAmelCase ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model_a.zero_grad()
_lowerCamelCase : Any = (out_a - labels).mean()
loss_a.backward()
# compare the output and parameters gradients
self.assertTrue((loss - loss_a).abs() < 1E-5 )
_lowerCamelCase : Optional[int] = dict(model.named_parameters() )
_lowerCamelCase : Any = dict(model_a.named_parameters() )
for name, param in named_params.items():
self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5E-5 ) )
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : List[Any] = AutoencoderKL.from_pretrained('''fusing/autoencoder-kl-dummy''' , output_loading_info=__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 )
model.to(__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
_lowerCamelCase : List[Any] = AutoencoderKL.from_pretrained('''fusing/autoencoder-kl-dummy''' )
_lowerCamelCase : int = model.to(__lowerCAmelCase )
model.eval()
if torch_device == "mps":
_lowerCamelCase : int = torch.manual_seed(0 )
else:
_lowerCamelCase : str = torch.Generator(device=__lowerCAmelCase ).manual_seed(0 )
_lowerCamelCase : int = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
_lowerCamelCase : Optional[int] = image.to(__lowerCAmelCase )
with torch.no_grad():
_lowerCamelCase : str = model(__lowerCAmelCase , sample_posterior=__lowerCAmelCase , generator=__lowerCAmelCase ).sample
_lowerCamelCase : List[str] = output[0, -1, -3:, -3:].flatten().cpu()
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
if torch_device == "mps":
_lowerCamelCase : str = torch.tensor(
[
-4.0078E-01,
-3.8323E-04,
-1.2681E-01,
-1.1462E-01,
2.0095E-01,
1.0893E-01,
-8.8247E-02,
-3.0361E-01,
-9.8644E-03,
] )
elif torch_device == "cpu":
_lowerCamelCase : Optional[int] = torch.tensor(
[-0.13_52, 0.08_78, 0.04_19, -0.08_18, -0.10_69, 0.06_88, -0.14_58, -0.44_46, -0.00_26] )
else:
_lowerCamelCase : List[str] = torch.tensor(
[-0.24_21, 0.46_42, 0.25_07, -0.04_38, 0.06_82, 0.31_60, -0.20_18, -0.07_27, 0.24_85] )
self.assertTrue(torch_all_close(__lowerCAmelCase , __lowerCAmelCase , rtol=1E-2 ) )
@slow
class __snake_case ( unittest.TestCase):
def SCREAMING_SNAKE_CASE ( self : Any , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any ):
"""simple docstring"""
return f'''gaussian_noise_s={seed}_shape={"_".join([str(__lowerCAmelCase ) for s in shape] )}.npy'''
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE ( self : int , __lowerCAmelCase : Dict=0 , __lowerCAmelCase : Tuple=(4, 3, 5_1_2, 5_1_2) , __lowerCAmelCase : Tuple=False ):
"""simple docstring"""
_lowerCamelCase : str = torch.floataa if fpaa else torch.floataa
_lowerCamelCase : List[Any] = torch.from_numpy(load_hf_numpy(self.get_file_format(__lowerCAmelCase , __lowerCAmelCase ) ) ).to(__lowerCAmelCase ).to(__lowerCAmelCase )
return image
def SCREAMING_SNAKE_CASE ( self : Dict , __lowerCAmelCase : List[str]="CompVis/stable-diffusion-v1-4" , __lowerCAmelCase : List[str]=False ):
"""simple docstring"""
_lowerCamelCase : Any = '''fp16''' if fpaa else None
_lowerCamelCase : str = torch.floataa if fpaa else torch.floataa
_lowerCamelCase : Any = AutoencoderKL.from_pretrained(
__lowerCAmelCase , subfolder='''vae''' , torch_dtype=__lowerCAmelCase , revision=__lowerCAmelCase , )
model.to(__lowerCAmelCase ).eval()
return model
def SCREAMING_SNAKE_CASE ( self : int , __lowerCAmelCase : int=0 ):
"""simple docstring"""
if torch_device == "mps":
return torch.manual_seed(__lowerCAmelCase )
return torch.Generator(device=__lowerCAmelCase ).manual_seed(__lowerCAmelCase )
@parameterized.expand(
[
# fmt: off
[3_3, [-0.16_03, 0.98_78, -0.04_95, -0.07_90, -0.27_09, 0.83_75, -0.20_60, -0.08_24], [-0.23_95, 0.00_98, 0.01_02, -0.07_09, -0.28_40, -0.02_74, -0.07_18, -0.18_24]],
[4_7, [-0.23_76, 0.11_68, 0.13_32, -0.48_40, -0.25_08, -0.07_91, -0.04_93, -0.40_89], [0.03_50, 0.08_47, 0.04_67, 0.03_44, -0.08_42, -0.05_47, -0.06_33, -0.11_31]],
# fmt: on
] )
def SCREAMING_SNAKE_CASE ( self : str , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : Dict = self.get_sd_vae_model()
_lowerCamelCase : List[str] = self.get_sd_image(__lowerCAmelCase )
_lowerCamelCase : Optional[int] = self.get_generator(__lowerCAmelCase )
with torch.no_grad():
_lowerCamelCase : Any = model(__lowerCAmelCase , generator=__lowerCAmelCase , sample_posterior=__lowerCAmelCase ).sample
assert sample.shape == image.shape
_lowerCamelCase : Optional[int] = sample[-1, -2:, -2:, :2].flatten().float().cpu()
_lowerCamelCase : Tuple = torch.tensor(expected_slice_mps if torch_device == '''mps''' else expected_slice )
assert torch_all_close(__lowerCAmelCase , __lowerCAmelCase , atol=3E-3 )
@parameterized.expand(
[
# fmt: off
[3_3, [-0.05_13, 0.02_89, 1.37_99, 0.21_66, -0.25_73, -0.08_71, 0.51_03, -0.09_99]],
[4_7, [-0.41_28, -0.13_20, -0.37_04, 0.19_65, -0.41_16, -0.23_32, -0.33_40, 0.22_47]],
# fmt: on
] )
@require_torch_gpu
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Tuple ):
"""simple docstring"""
_lowerCamelCase : List[str] = self.get_sd_vae_model(fpaa=__lowerCAmelCase )
_lowerCamelCase : int = self.get_sd_image(__lowerCAmelCase , fpaa=__lowerCAmelCase )
_lowerCamelCase : Optional[int] = self.get_generator(__lowerCAmelCase )
with torch.no_grad():
_lowerCamelCase : Optional[Any] = model(__lowerCAmelCase , generator=__lowerCAmelCase , sample_posterior=__lowerCAmelCase ).sample
assert sample.shape == image.shape
_lowerCamelCase : Union[str, Any] = sample[-1, -2:, :2, -2:].flatten().float().cpu()
_lowerCamelCase : Optional[Any] = torch.tensor(__lowerCAmelCase )
assert torch_all_close(__lowerCAmelCase , __lowerCAmelCase , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[3_3, [-0.16_09, 0.98_66, -0.04_87, -0.07_77, -0.27_16, 0.83_68, -0.20_55, -0.08_14], [-0.23_95, 0.00_98, 0.01_02, -0.07_09, -0.28_40, -0.02_74, -0.07_18, -0.18_24]],
[4_7, [-0.23_77, 0.11_47, 0.13_33, -0.48_41, -0.25_06, -0.08_05, -0.04_91, -0.40_85], [0.03_50, 0.08_47, 0.04_67, 0.03_44, -0.08_42, -0.05_47, -0.06_33, -0.11_31]],
# fmt: on
] )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Tuple , __lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = self.get_sd_vae_model()
_lowerCamelCase : int = self.get_sd_image(__lowerCAmelCase )
with torch.no_grad():
_lowerCamelCase : List[str] = model(__lowerCAmelCase ).sample
assert sample.shape == image.shape
_lowerCamelCase : Optional[int] = sample[-1, -2:, -2:, :2].flatten().float().cpu()
_lowerCamelCase : Tuple = torch.tensor(expected_slice_mps if torch_device == '''mps''' else expected_slice )
assert torch_all_close(__lowerCAmelCase , __lowerCAmelCase , atol=3E-3 )
@parameterized.expand(
[
# fmt: off
[1_3, [-0.20_51, -0.18_03, -0.23_11, -0.21_14, -0.32_92, -0.35_74, -0.29_53, -0.33_23]],
[3_7, [-0.26_32, -0.26_25, -0.21_99, -0.27_41, -0.45_39, -0.49_90, -0.37_20, -0.49_25]],
# fmt: on
] )
@require_torch_gpu
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : Dict = self.get_sd_vae_model()
_lowerCamelCase : Any = self.get_sd_image(__lowerCAmelCase , shape=(3, 4, 6_4, 6_4) )
with torch.no_grad():
_lowerCamelCase : List[Any] = model.decode(__lowerCAmelCase ).sample
assert list(sample.shape ) == [3, 3, 5_1_2, 5_1_2]
_lowerCamelCase : Tuple = sample[-1, -2:, :2, -2:].flatten().cpu()
_lowerCamelCase : List[Any] = torch.tensor(__lowerCAmelCase )
assert torch_all_close(__lowerCAmelCase , __lowerCAmelCase , atol=1E-3 )
@parameterized.expand(
[
# fmt: off
[2_7, [-0.03_69, 0.02_07, -0.07_76, -0.06_82, -0.17_47, -0.19_30, -0.14_65, -0.20_39]],
[1_6, [-0.16_28, -0.21_34, -0.27_47, -0.26_42, -0.37_74, -0.44_04, -0.36_87, -0.42_77]],
# fmt: on
] )
@require_torch_gpu
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Any ):
"""simple docstring"""
_lowerCamelCase : str = self.get_sd_vae_model(fpaa=__lowerCAmelCase )
_lowerCamelCase : int = self.get_sd_image(__lowerCAmelCase , shape=(3, 4, 6_4, 6_4) , fpaa=__lowerCAmelCase )
with torch.no_grad():
_lowerCamelCase : Dict = model.decode(__lowerCAmelCase ).sample
assert list(sample.shape ) == [3, 3, 5_1_2, 5_1_2]
_lowerCamelCase : Dict = sample[-1, -2:, :2, -2:].flatten().float().cpu()
_lowerCamelCase : Union[str, Any] = torch.tensor(__lowerCAmelCase )
assert torch_all_close(__lowerCAmelCase , __lowerCAmelCase , atol=5E-3 )
@parameterized.expand([(1_3,), (1_6,), (2_7,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason='''xformers is not required when using PyTorch 2.0.''' )
def SCREAMING_SNAKE_CASE ( self : Any , __lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : str = self.get_sd_vae_model(fpaa=__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = self.get_sd_image(__lowerCAmelCase , shape=(3, 4, 6_4, 6_4) , fpaa=__lowerCAmelCase )
with torch.no_grad():
_lowerCamelCase : List[str] = model.decode(__lowerCAmelCase ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
_lowerCamelCase : Optional[int] = model.decode(__lowerCAmelCase ).sample
assert list(sample.shape ) == [3, 3, 5_1_2, 5_1_2]
assert torch_all_close(__lowerCAmelCase , __lowerCAmelCase , atol=1E-1 )
@parameterized.expand([(1_3,), (1_6,), (3_7,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason='''xformers is not required when using PyTorch 2.0.''' )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __lowerCAmelCase : List[Any] ):
"""simple docstring"""
_lowerCamelCase : Dict = self.get_sd_vae_model()
_lowerCamelCase : List[str] = self.get_sd_image(__lowerCAmelCase , shape=(3, 4, 6_4, 6_4) )
with torch.no_grad():
_lowerCamelCase : Optional[int] = model.decode(__lowerCAmelCase ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
_lowerCamelCase : Any = model.decode(__lowerCAmelCase ).sample
assert list(sample.shape ) == [3, 3, 5_1_2, 5_1_2]
assert torch_all_close(__lowerCAmelCase , __lowerCAmelCase , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[3_3, [-0.30_01, 0.09_18, -2.69_84, -3.97_20, -3.20_99, -5.03_53, 1.73_38, -0.20_65, 3.42_67]],
[4_7, [-1.50_30, -4.38_71, -6.03_55, -9.11_57, -1.66_61, -2.78_53, 2.16_07, -5.08_23, 2.56_33]],
# fmt: on
] )
def SCREAMING_SNAKE_CASE ( self : Any , __lowerCAmelCase : int , __lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = self.get_sd_vae_model()
_lowerCamelCase : Optional[Any] = self.get_sd_image(__lowerCAmelCase )
_lowerCamelCase : List[str] = self.get_generator(__lowerCAmelCase )
with torch.no_grad():
_lowerCamelCase : Tuple = model.encode(__lowerCAmelCase ).latent_dist
_lowerCamelCase : Tuple = dist.sample(generator=__lowerCAmelCase )
assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]]
_lowerCamelCase : Optional[Any] = sample[0, -1, -3:, -3:].flatten().cpu()
_lowerCamelCase : str = torch.tensor(__lowerCAmelCase )
_lowerCamelCase : Optional[int] = 3E-3 if torch_device != '''mps''' else 1E-2
assert torch_all_close(__lowerCAmelCase , __lowerCAmelCase , atol=__lowerCAmelCase )
| 83
|
def A__ (snake_case : float , snake_case : int ) -> float:
if digit_amount > 0:
return round(number - int(snake_case ) , snake_case )
return number - int(snake_case )
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.345, 1))
print(decimal_isolate(35.345, 2))
print(decimal_isolate(35.345, 3))
print(decimal_isolate(-14.789, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.123, 1))
print(decimal_isolate(-14.123, 2))
print(decimal_isolate(-14.123, 3))
| 279
| 0
|
from typing import List, Optional
import numpy as np
from ...processing_utils import ProcessorMixin
from ...utils import to_numpy
class UpperCamelCase_ ( A ):
'''simple docstring'''
a :List[str] = 'EncodecFeatureExtractor'
a :int = ('T5Tokenizer', 'T5TokenizerFast')
def __init__( self , _UpperCAmelCase , _UpperCAmelCase):
super().__init__(_UpperCAmelCase , _UpperCAmelCase)
lowerCAmelCase_ = self.feature_extractor
lowerCAmelCase_ = False
def lowercase__ ( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=True):
return self.tokenizer.get_decoder_prompt_ids(task=_UpperCAmelCase , language=_UpperCAmelCase , no_timestamps=_UpperCAmelCase)
def __call__( self , *_UpperCAmelCase , **_UpperCAmelCase):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*_UpperCAmelCase , **_UpperCAmelCase)
lowerCAmelCase_ = kwargs.pop('''audio''' , _UpperCAmelCase)
lowerCAmelCase_ = kwargs.pop('''sampling_rate''' , _UpperCAmelCase)
lowerCAmelCase_ = kwargs.pop('''text''' , _UpperCAmelCase)
if len(_UpperCAmelCase) > 0:
lowerCAmelCase_ = args[0]
lowerCAmelCase_ = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''')
if text is not None:
lowerCAmelCase_ = self.tokenizer(_UpperCAmelCase , **_UpperCAmelCase)
if audio is not None:
lowerCAmelCase_ = self.feature_extractor(_UpperCAmelCase , *_UpperCAmelCase , sampling_rate=_UpperCAmelCase , **_UpperCAmelCase)
if audio is None:
return inputs
elif text is None:
return audio_inputs
else:
lowerCAmelCase_ = audio_inputs['''input_values''']
if "padding_mask" in audio_inputs:
lowerCAmelCase_ = audio_inputs['''padding_mask''']
return inputs
def lowercase__ ( self , *_UpperCAmelCase , **_UpperCAmelCase):
lowerCAmelCase_ = kwargs.pop('''audio''' , _UpperCAmelCase)
lowerCAmelCase_ = kwargs.pop('''padding_mask''' , _UpperCAmelCase)
if len(_UpperCAmelCase) > 0:
lowerCAmelCase_ = args[0]
lowerCAmelCase_ = args[1:]
if audio_values is not None:
return self._decode_audio(_UpperCAmelCase , padding_mask=_UpperCAmelCase)
else:
return self.tokenizer.batch_decode(*_UpperCAmelCase , **_UpperCAmelCase)
def lowercase__ ( self , *_UpperCAmelCase , **_UpperCAmelCase):
return self.tokenizer.decode(*_UpperCAmelCase , **_UpperCAmelCase)
def lowercase__ ( self , _UpperCAmelCase , _UpperCAmelCase = None):
lowerCAmelCase_ = to_numpy(_UpperCAmelCase)
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = audio_values.shape
if padding_mask is None:
return list(_UpperCAmelCase)
lowerCAmelCase_ = to_numpy(_UpperCAmelCase)
# match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding**
# token (so that the generated audio values are **not** treated as padded tokens)
lowerCAmelCase_ = seq_len - padding_mask.shape[-1]
lowerCAmelCase_ = 1 - self.feature_extractor.padding_value
lowerCAmelCase_ = np.pad(_UpperCAmelCase , ((0, 0), (0, difference)) , '''constant''' , constant_values=_UpperCAmelCase)
lowerCAmelCase_ = audio_values.tolist()
for i in range(_UpperCAmelCase):
lowerCAmelCase_ = np.asarray(audio_values[i])[
padding_mask[i][None, :] != self.feature_extractor.padding_value
]
lowerCAmelCase_ = sliced_audio.reshape(_UpperCAmelCase , -1)
return audio_values
| 413
|
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all feature extractors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...feature_extraction_utils import FeatureExtractionMixin
from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
_snake_case = logging.get_logger(__name__)
_snake_case = OrderedDict(
[
("audio-spectrogram-transformer", "ASTFeatureExtractor"),
("beit", "BeitFeatureExtractor"),
("chinese_clip", "ChineseCLIPFeatureExtractor"),
("clap", "ClapFeatureExtractor"),
("clip", "CLIPFeatureExtractor"),
("clipseg", "ViTFeatureExtractor"),
("conditional_detr", "ConditionalDetrFeatureExtractor"),
("convnext", "ConvNextFeatureExtractor"),
("cvt", "ConvNextFeatureExtractor"),
("data2vec-audio", "Wav2Vec2FeatureExtractor"),
("data2vec-vision", "BeitFeatureExtractor"),
("deformable_detr", "DeformableDetrFeatureExtractor"),
("deit", "DeiTFeatureExtractor"),
("detr", "DetrFeatureExtractor"),
("dinat", "ViTFeatureExtractor"),
("donut-swin", "DonutFeatureExtractor"),
("dpt", "DPTFeatureExtractor"),
("encodec", "EncodecFeatureExtractor"),
("flava", "FlavaFeatureExtractor"),
("glpn", "GLPNFeatureExtractor"),
("groupvit", "CLIPFeatureExtractor"),
("hubert", "Wav2Vec2FeatureExtractor"),
("imagegpt", "ImageGPTFeatureExtractor"),
("layoutlmv2", "LayoutLMv2FeatureExtractor"),
("layoutlmv3", "LayoutLMv3FeatureExtractor"),
("levit", "LevitFeatureExtractor"),
("maskformer", "MaskFormerFeatureExtractor"),
("mctct", "MCTCTFeatureExtractor"),
("mobilenet_v1", "MobileNetV1FeatureExtractor"),
("mobilenet_v2", "MobileNetV2FeatureExtractor"),
("mobilevit", "MobileViTFeatureExtractor"),
("nat", "ViTFeatureExtractor"),
("owlvit", "OwlViTFeatureExtractor"),
("perceiver", "PerceiverFeatureExtractor"),
("poolformer", "PoolFormerFeatureExtractor"),
("regnet", "ConvNextFeatureExtractor"),
("resnet", "ConvNextFeatureExtractor"),
("segformer", "SegformerFeatureExtractor"),
("sew", "Wav2Vec2FeatureExtractor"),
("sew-d", "Wav2Vec2FeatureExtractor"),
("speech_to_text", "Speech2TextFeatureExtractor"),
("speecht5", "SpeechT5FeatureExtractor"),
("swiftformer", "ViTFeatureExtractor"),
("swin", "ViTFeatureExtractor"),
("swinv2", "ViTFeatureExtractor"),
("table-transformer", "DetrFeatureExtractor"),
("timesformer", "VideoMAEFeatureExtractor"),
("tvlt", "TvltFeatureExtractor"),
("unispeech", "Wav2Vec2FeatureExtractor"),
("unispeech-sat", "Wav2Vec2FeatureExtractor"),
("van", "ConvNextFeatureExtractor"),
("videomae", "VideoMAEFeatureExtractor"),
("vilt", "ViltFeatureExtractor"),
("vit", "ViTFeatureExtractor"),
("vit_mae", "ViTFeatureExtractor"),
("vit_msn", "ViTFeatureExtractor"),
("wav2vec2", "Wav2Vec2FeatureExtractor"),
("wav2vec2-conformer", "Wav2Vec2FeatureExtractor"),
("wavlm", "Wav2Vec2FeatureExtractor"),
("whisper", "WhisperFeatureExtractor"),
("xclip", "CLIPFeatureExtractor"),
("yolos", "YolosFeatureExtractor"),
]
)
_snake_case = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES)
def lowerCamelCase_ ( A : str ):
"""simple docstring"""
for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items():
if class_name in extractors:
lowerCAmelCase_ = model_type_to_module_name(A )
lowerCAmelCase_ = importlib.import_module(F'.{module_name}' , '''transformers.models''' )
try:
return getattr(A , A )
except AttributeError:
continue
for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items():
if getattr(A , '''__name__''' , A ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
lowerCAmelCase_ = importlib.import_module('''transformers''' )
if hasattr(A , A ):
return getattr(A , A )
return None
def lowerCamelCase_ ( A : Union[str, os.PathLike] , A : Optional[Union[str, os.PathLike]] = None , A : bool = False , A : bool = False , A : Optional[Dict[str, str]] = None , A : Optional[Union[bool, str]] = None , A : Optional[str] = None , A : bool = False , **A : Any , ):
"""simple docstring"""
lowerCAmelCase_ = get_file_from_repo(
A , A , cache_dir=A , force_download=A , resume_download=A , proxies=A , use_auth_token=A , revision=A , local_files_only=A , )
if resolved_config_file is None:
logger.info(
'''Could not locate the feature extractor configuration file, will try to use the model config instead.''' )
return {}
with open(A , encoding='''utf-8''' ) as reader:
return json.load(A )
class UpperCamelCase_ :
'''simple docstring'''
def __init__( self):
raise EnvironmentError(
'''AutoFeatureExtractor is designed to be instantiated '''
'''using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method.''')
@classmethod
@replace_list_option_in_docstrings(_UpperCAmelCase)
def lowercase__ ( cls , _UpperCAmelCase , **_UpperCAmelCase):
lowerCAmelCase_ = kwargs.pop('''config''' , _UpperCAmelCase)
lowerCAmelCase_ = kwargs.pop('''trust_remote_code''' , _UpperCAmelCase)
lowerCAmelCase_ = True
lowerCAmelCase_ , lowerCAmelCase_ = FeatureExtractionMixin.get_feature_extractor_dict(_UpperCAmelCase , **_UpperCAmelCase)
lowerCAmelCase_ = config_dict.get('''feature_extractor_type''' , _UpperCAmelCase)
lowerCAmelCase_ = None
if "AutoFeatureExtractor" in config_dict.get('''auto_map''' , {}):
lowerCAmelCase_ = config_dict['''auto_map''']['''AutoFeatureExtractor''']
# If we don't find the feature extractor class in the feature extractor config, let's try the model config.
if feature_extractor_class is None and feature_extractor_auto_map is None:
if not isinstance(_UpperCAmelCase , _UpperCAmelCase):
lowerCAmelCase_ = AutoConfig.from_pretrained(_UpperCAmelCase , **_UpperCAmelCase)
# It could be in `config.feature_extractor_type``
lowerCAmelCase_ = getattr(_UpperCAmelCase , '''feature_extractor_type''' , _UpperCAmelCase)
if hasattr(_UpperCAmelCase , '''auto_map''') and "AutoFeatureExtractor" in config.auto_map:
lowerCAmelCase_ = config.auto_map['''AutoFeatureExtractor''']
if feature_extractor_class is not None:
lowerCAmelCase_ = feature_extractor_class_from_name(_UpperCAmelCase)
lowerCAmelCase_ = feature_extractor_auto_map is not None
lowerCAmelCase_ = feature_extractor_class is not None or type(_UpperCAmelCase) in FEATURE_EXTRACTOR_MAPPING
lowerCAmelCase_ = resolve_trust_remote_code(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
if has_remote_code and trust_remote_code:
lowerCAmelCase_ = get_class_from_dynamic_module(
_UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase)
lowerCAmelCase_ = kwargs.pop('''code_revision''' , _UpperCAmelCase)
if os.path.isdir(_UpperCAmelCase):
feature_extractor_class.register_for_auto_class()
return feature_extractor_class.from_dict(_UpperCAmelCase , **_UpperCAmelCase)
elif feature_extractor_class is not None:
return feature_extractor_class.from_dict(_UpperCAmelCase , **_UpperCAmelCase)
# Last try: we use the FEATURE_EXTRACTOR_MAPPING.
elif type(_UpperCAmelCase) in FEATURE_EXTRACTOR_MAPPING:
lowerCAmelCase_ = FEATURE_EXTRACTOR_MAPPING[type(_UpperCAmelCase)]
return feature_extractor_class.from_dict(_UpperCAmelCase , **_UpperCAmelCase)
raise ValueError(
f'Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a '
f'`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following '
f'`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys())}')
@staticmethod
def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase):
FEATURE_EXTRACTOR_MAPPING.register(_UpperCAmelCase , _UpperCAmelCase)
| 413
| 1
|
import warnings
from pathlib import Path
from typing import List, Tuple, Union
import fire
from torch import nn
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer, PreTrainedModel
from transformers.utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
def UpperCamelCase ( snake_case__ : nn.ModuleList , snake_case__ : nn.ModuleList , snake_case__ : List[int] ) -> List[Any]:
UpperCamelCase : Optional[int] = nn.ModuleList([src_layers[i] for i in layers_to_copy] )
assert len(UpperCamelCase_ ) == len(UpperCamelCase_ ), F"""{len(UpperCamelCase_ )} != {len(UpperCamelCase_ )}"""
dest_layers.load_state_dict(layers_to_copy.state_dict() )
__UpperCAmelCase = {
# maps num layers in teacher -> num_layers in student -> which teacher layers to copy.
# 12: bart, 16: pegasus, 6: marian/Helsinki-NLP
12: {
1: [0], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher
2: [0, 6],
3: [0, 6, 11],
4: [0, 4, 8, 11],
6: [0, 2, 4, 7, 9, 11],
9: [0, 1, 2, 4, 5, 7, 9, 10, 11],
12: list(range(12)),
},
16: { # maps num layers in student -> which teacher layers to copy
1: [0],
2: [0, 15],
3: [0, 8, 15],
4: [0, 5, 10, 15],
6: [0, 3, 6, 9, 12, 15],
8: [0, 2, 4, 6, 8, 10, 12, 15],
9: [0, 1, 3, 5, 7, 9, 11, 13, 15],
12: [0, 1, 2, 3, 4, 5, 6, 7, 9, 11, 13, 15],
16: list(range(16)),
},
6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))},
}
__UpperCAmelCase = {
# maps num layers in student -> which teacher layers to copy.
6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]},
12: {1: [11], 2: [5, 11], 3: [3, 7, 11], 6: [1, 3, 5, 8, 10, 11]},
16: {1: [15], 4: [4, 9, 12, 15], 8: [1, 3, 5, 7, 9, 11, 13, 15]},
}
def UpperCamelCase ( snake_case__ : Tuple , snake_case__ : Dict ) -> Any:
try:
UpperCamelCase : List[str] = LAYERS_TO_COPY[n_teacher][n_student]
return val
except KeyError:
if n_student != n_teacher:
warnings.warn(
F"""no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first"""
F""" {n_student}""" )
return list(range(UpperCamelCase_ ) )
def UpperCamelCase ( snake_case__ : int , snake_case__ : Dict ) -> Optional[int]:
if n_student > n_teacher:
raise ValueError(F"""Cannot perform intermediate supervision for student {n_student} > teacher {n_teacher}""" )
elif n_teacher == n_student:
return list(range(UpperCamelCase_ ) )
elif n_student == 1:
return [n_teacher - 1]
else:
return LAYERS_TO_SUPERVISE[n_teacher][n_student]
def UpperCamelCase ( snake_case__ : Union[str, PreTrainedModel] , snake_case__ : Union[str, Path] = "student" , snake_case__ : Union[int, None] = None , snake_case__ : Union[int, None] = None , snake_case__ : Optional[int]=False , snake_case__ : Optional[Any]=None , snake_case__ : Tuple=None , **snake_case__ : str , ) -> Any:
UpperCamelCase : Union[str, Any] = '''encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher.'''
assert (e is not None) or (d is not None), _msg
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
AutoTokenizer.from_pretrained(UpperCamelCase_ ).save_pretrained(UpperCamelCase_ ) # purely for convenience
UpperCamelCase : List[Any] = AutoModelForSeqaSeqLM.from_pretrained(UpperCamelCase_ ).eval()
else:
assert isinstance(UpperCamelCase_ , UpperCamelCase_ ), F"""teacher must be a model or string got type {type(UpperCamelCase_ )}"""
UpperCamelCase : List[Any] = teacher.config.to_diff_dict()
try:
UpperCamelCase : Tuple = teacher.config.encoder_layers, teacher.config.decoder_layers
if e is None:
UpperCamelCase : int = teacher_e
if d is None:
UpperCamelCase : Union[str, Any] = teacher_d
init_kwargs.update({'encoder_layers': e, 'decoder_layers': d} )
except AttributeError: # T5
if hasattr(teacher.config , 'num_encoder_layers' ):
UpperCamelCase : str = teacher.config.num_encoder_layers, teacher.config.num_decoder_layers
else:
UpperCamelCase : str = teacher.config.num_layers, teacher.config.num_decoder_layers
if e is None:
UpperCamelCase : Union[str, Any] = teacher_e
if d is None:
UpperCamelCase : Any = teacher_d
if hasattr(teacher.config , 'num_encoder_layers' ):
init_kwargs.update({'num_encoder_layers': e, 'num_decoder_layers': d} )
else:
init_kwargs.update({'num_layers': e, 'num_decoder_layers': d} )
# Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs
init_kwargs.update(UpperCamelCase_ )
# Copy weights
UpperCamelCase : Any = teacher.config_class(**UpperCamelCase_ )
UpperCamelCase : List[str] = AutoModelForSeqaSeqLM.from_config(UpperCamelCase_ )
# Start by copying the full teacher state dict this will copy the first N teacher layers to the student.
UpperCamelCase : str = student.load_state_dict(teacher.state_dict() , strict=UpperCamelCase_ )
assert info.missing_keys == [], info.missing_keys # every student key should have a teacher keys.
if copy_first_teacher_layers: # Our copying is done. We just log and save
UpperCamelCase : Union[str, Any] = list(range(UpperCamelCase_ ) ), list(range(UpperCamelCase_ ) )
logger.info(
F"""Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to"""
F""" {save_path}""" )
student.save_pretrained(UpperCamelCase_ )
return student, e_layers_to_copy, d_layers_to_copy
# Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer.
if e_layers_to_copy is None:
UpperCamelCase : List[int] = pick_layers_to_copy(UpperCamelCase_ , UpperCamelCase_ )
if d_layers_to_copy is None:
UpperCamelCase : List[int] = pick_layers_to_copy(UpperCamelCase_ , UpperCamelCase_ )
try:
if hasattr(
UpperCamelCase_ , 'prophetnet' ): # For ProphetNet, student.model.encoder.layers is called student.prophetnet.encoder.layers
copy_layers(teacher.prophetnet.encoder.layers , student.prophetnet.encoder.layers , UpperCamelCase_ )
copy_layers(teacher.prophetnet.decoder.layers , student.prophetnet.decoder.layers , UpperCamelCase_ )
else:
copy_layers(teacher.model.encoder.layers , student.model.encoder.layers , UpperCamelCase_ )
copy_layers(teacher.model.decoder.layers , student.model.decoder.layers , UpperCamelCase_ )
except AttributeError: # For t5, student.model.encoder.layers is called student.encoder.block
copy_layers(teacher.encoder.block , student.encoder.block , UpperCamelCase_ )
copy_layers(teacher.decoder.block , student.decoder.block , UpperCamelCase_ )
logger.info(
F"""Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}""" )
UpperCamelCase : Dict = {
'''teacher_type''': teacher.config.model_type,
'''copied_encoder_layers''': e_layers_to_copy,
'''copied_decoder_layers''': d_layers_to_copy,
}
student.save_pretrained(UpperCamelCase_ )
# Save information about copying for easier reproducibility
return student, e_layers_to_copy, d_layers_to_copy
if __name__ == "__main__":
fire.Fire(create_student_by_copying_alternating_layers)
| 40
|
'''simple docstring'''
from functools import reduce
__lowerCamelCase = (
'''73167176531330624919225119674426574742355349194934'''
'''96983520312774506326239578318016984801869478851843'''
'''85861560789112949495459501737958331952853208805511'''
'''12540698747158523863050715693290963295227443043557'''
'''66896648950445244523161731856403098711121722383113'''
'''62229893423380308135336276614282806444486645238749'''
'''30358907296290491560440772390713810515859307960866'''
'''70172427121883998797908792274921901699720888093776'''
'''65727333001053367881220235421809751254540594752243'''
'''52584907711670556013604839586446706324415722155397'''
'''53697817977846174064955149290862569321978468622482'''
'''83972241375657056057490261407972968652414535100474'''
'''82166370484403199890008895243450658541227588666881'''
'''16427171479924442928230863465674813919123162824586'''
'''17866458359124566529476545682848912883142607690042'''
'''24219022671055626321111109370544217506941658960408'''
'''07198403850962455444362981230987879927244284909188'''
'''84580156166097919133875499200524063689912560717606'''
'''05886116467109405077541002256983155200055935729725'''
'''71636269561882670428252483600823257530420752963450'''
)
def a__ ( UpperCamelCase_ : str = N ):
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda UpperCamelCase_, UpperCamelCase_ : str(int(UpperCamelCase_ ) * int(UpperCamelCase_ ) ), n[i : i + 13] ) )
for i in range(len(UpperCamelCase_ ) - 12 ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 467
| 0
|
"""simple docstring"""
import argparse
import os.path as osp
import re
import torch
from safetensors.torch import load_file, save_file
# =================#
# UNet Conversion #
# =================#
SCREAMING_SNAKE_CASE_ = [
# (stable-diffusion, HF Diffusers)
('''time_embed.0.weight''', '''time_embedding.linear_1.weight'''),
('''time_embed.0.bias''', '''time_embedding.linear_1.bias'''),
('''time_embed.2.weight''', '''time_embedding.linear_2.weight'''),
('''time_embed.2.bias''', '''time_embedding.linear_2.bias'''),
('''input_blocks.0.0.weight''', '''conv_in.weight'''),
('''input_blocks.0.0.bias''', '''conv_in.bias'''),
('''out.0.weight''', '''conv_norm_out.weight'''),
('''out.0.bias''', '''conv_norm_out.bias'''),
('''out.2.weight''', '''conv_out.weight'''),
('''out.2.bias''', '''conv_out.bias'''),
]
SCREAMING_SNAKE_CASE_ = [
# (stable-diffusion, HF Diffusers)
('''in_layers.0''', '''norm1'''),
('''in_layers.2''', '''conv1'''),
('''out_layers.0''', '''norm2'''),
('''out_layers.3''', '''conv2'''),
('''emb_layers.1''', '''time_emb_proj'''),
('''skip_connection''', '''conv_shortcut'''),
]
SCREAMING_SNAKE_CASE_ = []
# hardcoded number of downblocks and resnets/attentions...
# would need smarter logic for other networks.
for i in range(4):
# loop over downblocks/upblocks
for j in range(2):
# loop over resnets/attentions for downblocks
SCREAMING_SNAKE_CASE_ = F"down_blocks.{i}.resnets.{j}."
SCREAMING_SNAKE_CASE_ = F"input_blocks.{3*i + j + 1}.0."
unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix))
if i < 3:
# no attention layers in down_blocks.3
SCREAMING_SNAKE_CASE_ = F"down_blocks.{i}.attentions.{j}."
SCREAMING_SNAKE_CASE_ = F"input_blocks.{3*i + j + 1}.1."
unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix))
for j in range(3):
# loop over resnets/attentions for upblocks
SCREAMING_SNAKE_CASE_ = F"up_blocks.{i}.resnets.{j}."
SCREAMING_SNAKE_CASE_ = F"output_blocks.{3*i + j}.0."
unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix))
if i > 0:
# no attention layers in up_blocks.0
SCREAMING_SNAKE_CASE_ = F"up_blocks.{i}.attentions.{j}."
SCREAMING_SNAKE_CASE_ = F"output_blocks.{3*i + j}.1."
unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix))
if i < 3:
# no downsample in down_blocks.3
SCREAMING_SNAKE_CASE_ = F"down_blocks.{i}.downsamplers.0.conv."
SCREAMING_SNAKE_CASE_ = F"input_blocks.{3*(i+1)}.0.op."
unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix))
# no upsample in up_blocks.3
SCREAMING_SNAKE_CASE_ = F"up_blocks.{i}.upsamplers.0."
SCREAMING_SNAKE_CASE_ = F"output_blocks.{3*i + 2}.{1 if i == 0 else 2}."
unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix))
SCREAMING_SNAKE_CASE_ = '''mid_block.attentions.0.'''
SCREAMING_SNAKE_CASE_ = '''middle_block.1.'''
unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix))
for j in range(2):
SCREAMING_SNAKE_CASE_ = F"mid_block.resnets.{j}."
SCREAMING_SNAKE_CASE_ = F"middle_block.{2*j}."
unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix))
def lowercase (_lowerCAmelCase ):
# buyer beware: this is a *brittle* function,
# and correct output requires that all of these pieces interact in
# the exact order in which I have arranged them.
__lowerCAmelCase = {k: k for k in unet_state_dict.keys()}
for sd_name, hf_name in unet_conversion_map:
__lowerCAmelCase = sd_name
for k, v in mapping.items():
if "resnets" in k:
for sd_part, hf_part in unet_conversion_map_resnet:
__lowerCAmelCase = v.replace(_lowerCAmelCase , _lowerCAmelCase )
__lowerCAmelCase = v
for k, v in mapping.items():
for sd_part, hf_part in unet_conversion_map_layer:
__lowerCAmelCase = v.replace(_lowerCAmelCase , _lowerCAmelCase )
__lowerCAmelCase = v
__lowerCAmelCase = {v: unet_state_dict[k] for k, v in mapping.items()}
return new_state_dict
# ================#
# VAE Conversion #
# ================#
SCREAMING_SNAKE_CASE_ = [
# (stable-diffusion, HF Diffusers)
('''nin_shortcut''', '''conv_shortcut'''),
('''norm_out''', '''conv_norm_out'''),
('''mid.attn_1.''', '''mid_block.attentions.0.'''),
]
for i in range(4):
# down_blocks have two resnets
for j in range(2):
SCREAMING_SNAKE_CASE_ = F"encoder.down_blocks.{i}.resnets.{j}."
SCREAMING_SNAKE_CASE_ = F"encoder.down.{i}.block.{j}."
vae_conversion_map.append((sd_down_prefix, hf_down_prefix))
if i < 3:
SCREAMING_SNAKE_CASE_ = F"down_blocks.{i}.downsamplers.0."
SCREAMING_SNAKE_CASE_ = F"down.{i}.downsample."
vae_conversion_map.append((sd_downsample_prefix, hf_downsample_prefix))
SCREAMING_SNAKE_CASE_ = F"up_blocks.{i}.upsamplers.0."
SCREAMING_SNAKE_CASE_ = F"up.{3-i}.upsample."
vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix))
# up_blocks have three resnets
# also, up blocks in hf are numbered in reverse from sd
for j in range(3):
SCREAMING_SNAKE_CASE_ = F"decoder.up_blocks.{i}.resnets.{j}."
SCREAMING_SNAKE_CASE_ = F"decoder.up.{3-i}.block.{j}."
vae_conversion_map.append((sd_up_prefix, hf_up_prefix))
# this part accounts for mid blocks in both the encoder and the decoder
for i in range(2):
SCREAMING_SNAKE_CASE_ = F"mid_block.resnets.{i}."
SCREAMING_SNAKE_CASE_ = F"mid.block_{i+1}."
vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix))
SCREAMING_SNAKE_CASE_ = [
# (stable-diffusion, HF Diffusers)
('''norm.''', '''group_norm.'''),
('''q.''', '''query.'''),
('''k.''', '''key.'''),
('''v.''', '''value.'''),
('''proj_out.''', '''proj_attn.'''),
]
def lowercase (_lowerCAmelCase ):
# convert HF linear weights to SD conv2d weights
return w.reshape(*w.shape , 1 , 1 )
def lowercase (_lowerCAmelCase ):
__lowerCAmelCase = {k: k for k in vae_state_dict.keys()}
for k, v in mapping.items():
for sd_part, hf_part in vae_conversion_map:
__lowerCAmelCase = v.replace(_lowerCAmelCase , _lowerCAmelCase )
__lowerCAmelCase = v
for k, v in mapping.items():
if "attentions" in k:
for sd_part, hf_part in vae_conversion_map_attn:
__lowerCAmelCase = v.replace(_lowerCAmelCase , _lowerCAmelCase )
__lowerCAmelCase = v
__lowerCAmelCase = {v: vae_state_dict[k] for k, v in mapping.items()}
__lowerCAmelCase = ["""q""", """k""", """v""", """proj_out"""]
for k, v in new_state_dict.items():
for weight_name in weights_to_convert:
if f"""mid.attn_1.{weight_name}.weight""" in k:
print(f"""Reshaping {k} for SD format""" )
__lowerCAmelCase = reshape_weight_for_sd(_lowerCAmelCase )
return new_state_dict
# =========================#
# Text Encoder Conversion #
# =========================#
SCREAMING_SNAKE_CASE_ = [
# (stable-diffusion, HF Diffusers)
('''resblocks.''', '''text_model.encoder.layers.'''),
('''ln_1''', '''layer_norm1'''),
('''ln_2''', '''layer_norm2'''),
('''.c_fc.''', '''.fc1.'''),
('''.c_proj.''', '''.fc2.'''),
('''.attn''', '''.self_attn'''),
('''ln_final.''', '''transformer.text_model.final_layer_norm.'''),
('''token_embedding.weight''', '''transformer.text_model.embeddings.token_embedding.weight'''),
('''positional_embedding''', '''transformer.text_model.embeddings.position_embedding.weight'''),
]
SCREAMING_SNAKE_CASE_ = {re.escape(x[1]): x[0] for x in textenc_conversion_lst}
SCREAMING_SNAKE_CASE_ = re.compile('''|'''.join(protected.keys()))
# Ordering is from https://github.com/pytorch/pytorch/blob/master/test/cpp/api/modules.cpp
SCREAMING_SNAKE_CASE_ = {'''q''': 0, '''k''': 1, '''v''': 2}
def lowercase (_lowerCAmelCase ):
__lowerCAmelCase = {}
__lowerCAmelCase = {}
__lowerCAmelCase = {}
for k, v in text_enc_dict.items():
if (
k.endswith(""".self_attn.q_proj.weight""" )
or k.endswith(""".self_attn.k_proj.weight""" )
or k.endswith(""".self_attn.v_proj.weight""" )
):
__lowerCAmelCase = k[: -len(""".q_proj.weight""" )]
__lowerCAmelCase = k[-len("""q_proj.weight""" )]
if k_pre not in capture_qkv_weight:
__lowerCAmelCase = [None, None, None]
__lowerCAmelCase = v
continue
if (
k.endswith(""".self_attn.q_proj.bias""" )
or k.endswith(""".self_attn.k_proj.bias""" )
or k.endswith(""".self_attn.v_proj.bias""" )
):
__lowerCAmelCase = k[: -len(""".q_proj.bias""" )]
__lowerCAmelCase = k[-len("""q_proj.bias""" )]
if k_pre not in capture_qkv_bias:
__lowerCAmelCase = [None, None, None]
__lowerCAmelCase = v
continue
__lowerCAmelCase = textenc_pattern.sub(lambda _lowerCAmelCase : protected[re.escape(m.group(0 ) )] , _lowerCAmelCase )
__lowerCAmelCase = v
for k_pre, tensors in capture_qkv_weight.items():
if None in tensors:
raise Exception("""CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing""" )
__lowerCAmelCase = textenc_pattern.sub(lambda _lowerCAmelCase : protected[re.escape(m.group(0 ) )] , _lowerCAmelCase )
__lowerCAmelCase = torch.cat(_lowerCAmelCase )
for k_pre, tensors in capture_qkv_bias.items():
if None in tensors:
raise Exception("""CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing""" )
__lowerCAmelCase = textenc_pattern.sub(lambda _lowerCAmelCase : protected[re.escape(m.group(0 ) )] , _lowerCAmelCase )
__lowerCAmelCase = torch.cat(_lowerCAmelCase )
return new_state_dict
def lowercase (_lowerCAmelCase ):
return text_enc_dict
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
parser.add_argument('''--model_path''', default=None, type=str, required=True, help='''Path to the model to convert.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument('''--half''', action='''store_true''', help='''Save weights in half precision.''')
parser.add_argument(
'''--use_safetensors''', action='''store_true''', help='''Save weights use safetensors, default is ckpt.'''
)
SCREAMING_SNAKE_CASE_ = parser.parse_args()
assert args.model_path is not None, "Must provide a model path!"
assert args.checkpoint_path is not None, "Must provide a checkpoint path!"
# Path for safetensors
SCREAMING_SNAKE_CASE_ = osp.join(args.model_path, '''unet''', '''diffusion_pytorch_model.safetensors''')
SCREAMING_SNAKE_CASE_ = osp.join(args.model_path, '''vae''', '''diffusion_pytorch_model.safetensors''')
SCREAMING_SNAKE_CASE_ = osp.join(args.model_path, '''text_encoder''', '''model.safetensors''')
# Load models from safetensors if it exists, if it doesn't pytorch
if osp.exists(unet_path):
SCREAMING_SNAKE_CASE_ = load_file(unet_path, device='''cpu''')
else:
SCREAMING_SNAKE_CASE_ = osp.join(args.model_path, '''unet''', '''diffusion_pytorch_model.bin''')
SCREAMING_SNAKE_CASE_ = torch.load(unet_path, map_location='''cpu''')
if osp.exists(vae_path):
SCREAMING_SNAKE_CASE_ = load_file(vae_path, device='''cpu''')
else:
SCREAMING_SNAKE_CASE_ = osp.join(args.model_path, '''vae''', '''diffusion_pytorch_model.bin''')
SCREAMING_SNAKE_CASE_ = torch.load(vae_path, map_location='''cpu''')
if osp.exists(text_enc_path):
SCREAMING_SNAKE_CASE_ = load_file(text_enc_path, device='''cpu''')
else:
SCREAMING_SNAKE_CASE_ = osp.join(args.model_path, '''text_encoder''', '''pytorch_model.bin''')
SCREAMING_SNAKE_CASE_ = torch.load(text_enc_path, map_location='''cpu''')
# Convert the UNet model
SCREAMING_SNAKE_CASE_ = convert_unet_state_dict(unet_state_dict)
SCREAMING_SNAKE_CASE_ = {'''model.diffusion_model.''' + k: v for k, v in unet_state_dict.items()}
# Convert the VAE model
SCREAMING_SNAKE_CASE_ = convert_vae_state_dict(vae_state_dict)
SCREAMING_SNAKE_CASE_ = {'''first_stage_model.''' + k: v for k, v in vae_state_dict.items()}
# Easiest way to identify v2.0 model seems to be that the text encoder (OpenCLIP) is deeper
SCREAMING_SNAKE_CASE_ = '''text_model.encoder.layers.22.layer_norm2.bias''' in text_enc_dict
if is_vaa_model:
# Need to add the tag 'transformer' in advance so we can knock it out from the final layer-norm
SCREAMING_SNAKE_CASE_ = {'''transformer.''' + k: v for k, v in text_enc_dict.items()}
SCREAMING_SNAKE_CASE_ = convert_text_enc_state_dict_vaa(text_enc_dict)
SCREAMING_SNAKE_CASE_ = {'''cond_stage_model.model.''' + k: v for k, v in text_enc_dict.items()}
else:
SCREAMING_SNAKE_CASE_ = convert_text_enc_state_dict(text_enc_dict)
SCREAMING_SNAKE_CASE_ = {'''cond_stage_model.transformer.''' + k: v for k, v in text_enc_dict.items()}
# Put together new checkpoint
SCREAMING_SNAKE_CASE_ = {**unet_state_dict, **vae_state_dict, **text_enc_dict}
if args.half:
SCREAMING_SNAKE_CASE_ = {k: v.half() for k, v in state_dict.items()}
if args.use_safetensors:
save_file(state_dict, args.checkpoint_path)
else:
SCREAMING_SNAKE_CASE_ = {'''state_dict''': state_dict}
torch.save(state_dict, args.checkpoint_path)
| 573
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_video_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import VivitImageProcessor
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_=7 , snake_case_=3 , snake_case_=10 , snake_case_=18 , snake_case_=30 , snake_case_=400 , snake_case_=True , snake_case_=None , snake_case_=True , snake_case_=[0.5, 0.5, 0.5] , snake_case_=[0.5, 0.5, 0.5] , snake_case_=None , ) -> int:
__lowerCAmelCase = size if size is not None else {"""shortest_edge""": 18}
__lowerCAmelCase = crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = num_channels
__lowerCAmelCase = num_frames
__lowerCAmelCase = image_size
__lowerCAmelCase = min_resolution
__lowerCAmelCase = max_resolution
__lowerCAmelCase = do_resize
__lowerCAmelCase = size
__lowerCAmelCase = do_normalize
__lowerCAmelCase = image_mean
__lowerCAmelCase = image_std
__lowerCAmelCase = crop_size
def A__ ( self ) -> Optional[Any]:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class lowerCAmelCase_ ( A__ , unittest.TestCase ):
'''simple docstring'''
_snake_case = VivitImageProcessor if is_vision_available() else None
def A__ ( self ) -> List[Any]:
__lowerCAmelCase = VivitImageProcessingTester(self )
@property
def A__ ( self ) -> Optional[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def A__ ( self ) -> Union[str, Any]:
__lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case_ , """image_mean""" ) )
self.assertTrue(hasattr(snake_case_ , """image_std""" ) )
self.assertTrue(hasattr(snake_case_ , """do_normalize""" ) )
self.assertTrue(hasattr(snake_case_ , """do_resize""" ) )
self.assertTrue(hasattr(snake_case_ , """do_center_crop""" ) )
self.assertTrue(hasattr(snake_case_ , """size""" ) )
def A__ ( self ) -> Union[str, Any]:
__lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 18} )
self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18} )
__lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} )
def A__ ( self ) -> Optional[Any]:
# Initialize image_processing
__lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL videos
__lowerCAmelCase = prepare_video_inputs(self.image_processor_tester , equal_resolution=snake_case_ )
for video in video_inputs:
self.assertIsInstance(snake_case_ , snake_case_ )
self.assertIsInstance(video[0] , Image.Image )
# Test not batched input
__lowerCAmelCase = image_processing(video_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
__lowerCAmelCase = image_processing(snake_case_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def A__ ( self ) -> Tuple:
# Initialize image_processing
__lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowerCAmelCase = prepare_video_inputs(self.image_processor_tester , equal_resolution=snake_case_ , numpify=snake_case_ )
for video in video_inputs:
self.assertIsInstance(snake_case_ , snake_case_ )
self.assertIsInstance(video[0] , np.ndarray )
# Test not batched input
__lowerCAmelCase = image_processing(video_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
__lowerCAmelCase = image_processing(snake_case_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def A__ ( self ) -> Union[str, Any]:
# Initialize image_processing
__lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowerCAmelCase = prepare_video_inputs(self.image_processor_tester , equal_resolution=snake_case_ , torchify=snake_case_ )
for video in video_inputs:
self.assertIsInstance(snake_case_ , snake_case_ )
self.assertIsInstance(video[0] , torch.Tensor )
# Test not batched input
__lowerCAmelCase = image_processing(video_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
__lowerCAmelCase = image_processing(snake_case_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
| 573
| 1
|
from ..utils import DummyObject, requires_backends
class UpperCAmelCase_ ( metaclass=SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A : Any = ['torch']
def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> List[Any]:
requires_backends(self , ["torch"] )
@classmethod
def _lowerCAmelCase ( cls , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> str:
requires_backends(cls , ["torch"] )
@classmethod
def _lowerCAmelCase ( cls , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> List[str]:
requires_backends(cls , ["torch"] )
class UpperCAmelCase_ ( metaclass=SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A : Optional[int] = ['torch']
def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> List[str]:
requires_backends(self , ["torch"] )
@classmethod
def _lowerCAmelCase ( cls , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Optional[Any]:
requires_backends(cls , ["torch"] )
@classmethod
def _lowerCAmelCase ( cls , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
requires_backends(cls , ["torch"] )
class UpperCAmelCase_ ( metaclass=SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A : Optional[Any] = ['torch']
def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> List[str]:
requires_backends(self , ["torch"] )
@classmethod
def _lowerCAmelCase ( cls , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Optional[int]:
requires_backends(cls , ["torch"] )
@classmethod
def _lowerCAmelCase ( cls , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> str:
requires_backends(cls , ["torch"] )
class UpperCAmelCase_ ( metaclass=SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A : int = ['torch']
def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Optional[Any]:
requires_backends(self , ["torch"] )
@classmethod
def _lowerCAmelCase ( cls , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Any:
requires_backends(cls , ["torch"] )
@classmethod
def _lowerCAmelCase ( cls , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Dict:
requires_backends(cls , ["torch"] )
class UpperCAmelCase_ ( metaclass=SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A : str = ['torch']
def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
requires_backends(self , ["torch"] )
@classmethod
def _lowerCAmelCase ( cls , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Dict:
requires_backends(cls , ["torch"] )
@classmethod
def _lowerCAmelCase ( cls , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Any:
requires_backends(cls , ["torch"] )
class UpperCAmelCase_ ( metaclass=SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A : str = ['torch']
def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Dict:
requires_backends(self , ["torch"] )
@classmethod
def _lowerCAmelCase ( cls , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Optional[int]:
requires_backends(cls , ["torch"] )
@classmethod
def _lowerCAmelCase ( cls , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Dict:
requires_backends(cls , ["torch"] )
class UpperCAmelCase_ ( metaclass=SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A : Optional[int] = ['torch']
def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Dict:
requires_backends(self , ["torch"] )
@classmethod
def _lowerCAmelCase ( cls , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Any:
requires_backends(cls , ["torch"] )
@classmethod
def _lowerCAmelCase ( cls , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Dict:
requires_backends(cls , ["torch"] )
class UpperCAmelCase_ ( metaclass=SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A : Optional[Any] = ['torch']
def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Optional[Any]:
requires_backends(self , ["torch"] )
@classmethod
def _lowerCAmelCase ( cls , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
requires_backends(cls , ["torch"] )
@classmethod
def _lowerCAmelCase ( cls , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Optional[int]:
requires_backends(cls , ["torch"] )
class UpperCAmelCase_ ( metaclass=SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A : Dict = ['torch']
def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> int:
requires_backends(self , ["torch"] )
@classmethod
def _lowerCAmelCase ( cls , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Dict:
requires_backends(cls , ["torch"] )
@classmethod
def _lowerCAmelCase ( cls , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> str:
requires_backends(cls , ["torch"] )
class UpperCAmelCase_ ( metaclass=SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A : List[str] = ['torch']
def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> List[str]:
requires_backends(self , ["torch"] )
@classmethod
def _lowerCAmelCase ( cls , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
requires_backends(cls , ["torch"] )
@classmethod
def _lowerCAmelCase ( cls , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Dict:
requires_backends(cls , ["torch"] )
class UpperCAmelCase_ ( metaclass=SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A : Dict = ['torch']
def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Tuple:
requires_backends(self , ["torch"] )
@classmethod
def _lowerCAmelCase ( cls , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Any:
requires_backends(cls , ["torch"] )
@classmethod
def _lowerCAmelCase ( cls , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Any:
requires_backends(cls , ["torch"] )
def lowerCAmelCase__ ( *_a : int , **_a : Dict ):
requires_backends(_a , ["torch"] )
def lowerCAmelCase__ ( *_a : int , **_a : List[Any] ):
requires_backends(_a , ["torch"] )
def lowerCAmelCase__ ( *_a : Tuple , **_a : int ):
requires_backends(_a , ["torch"] )
def lowerCAmelCase__ ( *_a : str , **_a : int ):
requires_backends(_a , ["torch"] )
def lowerCAmelCase__ ( *_a : List[str] , **_a : Optional[Any] ):
requires_backends(_a , ["torch"] )
def lowerCAmelCase__ ( *_a : int , **_a : Dict ):
requires_backends(_a , ["torch"] )
def lowerCAmelCase__ ( *_a : Tuple , **_a : Union[str, Any] ):
requires_backends(_a , ["torch"] )
class UpperCAmelCase_ ( metaclass=SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A : List[str] = ['torch']
def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> List[Any]:
requires_backends(self , ["torch"] )
@classmethod
def _lowerCAmelCase ( cls , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Optional[int]:
requires_backends(cls , ["torch"] )
@classmethod
def _lowerCAmelCase ( cls , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> List[Any]:
requires_backends(cls , ["torch"] )
class UpperCAmelCase_ ( metaclass=SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A : Any = ['torch']
def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Tuple:
requires_backends(self , ["torch"] )
@classmethod
def _lowerCAmelCase ( cls , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> List[str]:
requires_backends(cls , ["torch"] )
@classmethod
def _lowerCAmelCase ( cls , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> List[str]:
requires_backends(cls , ["torch"] )
class UpperCAmelCase_ ( metaclass=SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A : int = ['torch']
def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> str:
requires_backends(self , ["torch"] )
@classmethod
def _lowerCAmelCase ( cls , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Any:
requires_backends(cls , ["torch"] )
@classmethod
def _lowerCAmelCase ( cls , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> List[str]:
requires_backends(cls , ["torch"] )
class UpperCAmelCase_ ( metaclass=SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A : int = ['torch']
def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Optional[Any]:
requires_backends(self , ["torch"] )
@classmethod
def _lowerCAmelCase ( cls , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Dict:
requires_backends(cls , ["torch"] )
@classmethod
def _lowerCAmelCase ( cls , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Optional[int]:
requires_backends(cls , ["torch"] )
class UpperCAmelCase_ ( metaclass=SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A : Optional[Any] = ['torch']
def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Any:
requires_backends(self , ["torch"] )
@classmethod
def _lowerCAmelCase ( cls , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> int:
requires_backends(cls , ["torch"] )
@classmethod
def _lowerCAmelCase ( cls , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Dict:
requires_backends(cls , ["torch"] )
class UpperCAmelCase_ ( metaclass=SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A : Tuple = ['torch']
def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
requires_backends(self , ["torch"] )
@classmethod
def _lowerCAmelCase ( cls , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Any:
requires_backends(cls , ["torch"] )
@classmethod
def _lowerCAmelCase ( cls , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Any:
requires_backends(cls , ["torch"] )
class UpperCAmelCase_ ( metaclass=SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A : List[str] = ['torch']
def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Any:
requires_backends(self , ["torch"] )
@classmethod
def _lowerCAmelCase ( cls , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Optional[int]:
requires_backends(cls , ["torch"] )
@classmethod
def _lowerCAmelCase ( cls , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
requires_backends(cls , ["torch"] )
class UpperCAmelCase_ ( metaclass=SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A : Union[str, Any] = ['torch']
def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> int:
requires_backends(self , ["torch"] )
@classmethod
def _lowerCAmelCase ( cls , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> List[str]:
requires_backends(cls , ["torch"] )
@classmethod
def _lowerCAmelCase ( cls , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Tuple:
requires_backends(cls , ["torch"] )
class UpperCAmelCase_ ( metaclass=SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A : Union[str, Any] = ['torch']
def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Any:
requires_backends(self , ["torch"] )
@classmethod
def _lowerCAmelCase ( cls , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Tuple:
requires_backends(cls , ["torch"] )
@classmethod
def _lowerCAmelCase ( cls , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> List[Any]:
requires_backends(cls , ["torch"] )
class UpperCAmelCase_ ( metaclass=SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A : int = ['torch']
def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> str:
requires_backends(self , ["torch"] )
@classmethod
def _lowerCAmelCase ( cls , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> int:
requires_backends(cls , ["torch"] )
@classmethod
def _lowerCAmelCase ( cls , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> List[str]:
requires_backends(cls , ["torch"] )
class UpperCAmelCase_ ( metaclass=SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A : Union[str, Any] = ['torch']
def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Optional[int]:
requires_backends(self , ["torch"] )
@classmethod
def _lowerCAmelCase ( cls , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Optional[int]:
requires_backends(cls , ["torch"] )
@classmethod
def _lowerCAmelCase ( cls , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> List[str]:
requires_backends(cls , ["torch"] )
class UpperCAmelCase_ ( metaclass=SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A : Optional[Any] = ['torch']
def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Optional[int]:
requires_backends(self , ["torch"] )
@classmethod
def _lowerCAmelCase ( cls , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Optional[Any]:
requires_backends(cls , ["torch"] )
@classmethod
def _lowerCAmelCase ( cls , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> List[Any]:
requires_backends(cls , ["torch"] )
class UpperCAmelCase_ ( metaclass=SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A : Union[str, Any] = ['torch']
def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> List[str]:
requires_backends(self , ["torch"] )
@classmethod
def _lowerCAmelCase ( cls , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Optional[int]:
requires_backends(cls , ["torch"] )
@classmethod
def _lowerCAmelCase ( cls , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Dict:
requires_backends(cls , ["torch"] )
class UpperCAmelCase_ ( metaclass=SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A : Dict = ['torch']
def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> str:
requires_backends(self , ["torch"] )
@classmethod
def _lowerCAmelCase ( cls , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Dict:
requires_backends(cls , ["torch"] )
@classmethod
def _lowerCAmelCase ( cls , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> int:
requires_backends(cls , ["torch"] )
class UpperCAmelCase_ ( metaclass=SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A : str = ['torch']
def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Optional[int]:
requires_backends(self , ["torch"] )
@classmethod
def _lowerCAmelCase ( cls , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Any:
requires_backends(cls , ["torch"] )
@classmethod
def _lowerCAmelCase ( cls , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Any:
requires_backends(cls , ["torch"] )
class UpperCAmelCase_ ( metaclass=SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A : Optional[Any] = ['torch']
def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Any:
requires_backends(self , ["torch"] )
@classmethod
def _lowerCAmelCase ( cls , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Any:
requires_backends(cls , ["torch"] )
@classmethod
def _lowerCAmelCase ( cls , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> List[str]:
requires_backends(cls , ["torch"] )
class UpperCAmelCase_ ( metaclass=SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A : Union[str, Any] = ['torch']
def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Dict:
requires_backends(self , ["torch"] )
@classmethod
def _lowerCAmelCase ( cls , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Any:
requires_backends(cls , ["torch"] )
@classmethod
def _lowerCAmelCase ( cls , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
requires_backends(cls , ["torch"] )
class UpperCAmelCase_ ( metaclass=SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A : Tuple = ['torch']
def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Tuple:
requires_backends(self , ["torch"] )
@classmethod
def _lowerCAmelCase ( cls , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> int:
requires_backends(cls , ["torch"] )
@classmethod
def _lowerCAmelCase ( cls , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> List[Any]:
requires_backends(cls , ["torch"] )
class UpperCAmelCase_ ( metaclass=SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A : Any = ['torch']
def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> int:
requires_backends(self , ["torch"] )
@classmethod
def _lowerCAmelCase ( cls , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> str:
requires_backends(cls , ["torch"] )
@classmethod
def _lowerCAmelCase ( cls , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
requires_backends(cls , ["torch"] )
class UpperCAmelCase_ ( metaclass=SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A : int = ['torch']
def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Optional[int]:
requires_backends(self , ["torch"] )
@classmethod
def _lowerCAmelCase ( cls , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> List[Any]:
requires_backends(cls , ["torch"] )
@classmethod
def _lowerCAmelCase ( cls , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> str:
requires_backends(cls , ["torch"] )
class UpperCAmelCase_ ( metaclass=SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A : Optional[Any] = ['torch']
def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> List[Any]:
requires_backends(self , ["torch"] )
@classmethod
def _lowerCAmelCase ( cls , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> str:
requires_backends(cls , ["torch"] )
@classmethod
def _lowerCAmelCase ( cls , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Optional[Any]:
requires_backends(cls , ["torch"] )
class UpperCAmelCase_ ( metaclass=SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A : Union[str, Any] = ['torch']
def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Optional[Any]:
requires_backends(self , ["torch"] )
@classmethod
def _lowerCAmelCase ( cls , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Optional[int]:
requires_backends(cls , ["torch"] )
@classmethod
def _lowerCAmelCase ( cls , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Any:
requires_backends(cls , ["torch"] )
class UpperCAmelCase_ ( metaclass=SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A : Union[str, Any] = ['torch']
def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Optional[int]:
requires_backends(self , ["torch"] )
@classmethod
def _lowerCAmelCase ( cls , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> int:
requires_backends(cls , ["torch"] )
@classmethod
def _lowerCAmelCase ( cls , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> str:
requires_backends(cls , ["torch"] )
class UpperCAmelCase_ ( metaclass=SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A : List[Any] = ['torch']
def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Optional[Any]:
requires_backends(self , ["torch"] )
@classmethod
def _lowerCAmelCase ( cls , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> str:
requires_backends(cls , ["torch"] )
@classmethod
def _lowerCAmelCase ( cls , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Optional[int]:
requires_backends(cls , ["torch"] )
class UpperCAmelCase_ ( metaclass=SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A : Any = ['torch']
def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> List[Any]:
requires_backends(self , ["torch"] )
@classmethod
def _lowerCAmelCase ( cls , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Tuple:
requires_backends(cls , ["torch"] )
@classmethod
def _lowerCAmelCase ( cls , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Dict:
requires_backends(cls , ["torch"] )
class UpperCAmelCase_ ( metaclass=SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A : Dict = ['torch']
def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> int:
requires_backends(self , ["torch"] )
@classmethod
def _lowerCAmelCase ( cls , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> str:
requires_backends(cls , ["torch"] )
@classmethod
def _lowerCAmelCase ( cls , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Dict:
requires_backends(cls , ["torch"] )
class UpperCAmelCase_ ( metaclass=SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A : Any = ['torch']
def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> str:
requires_backends(self , ["torch"] )
@classmethod
def _lowerCAmelCase ( cls , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> int:
requires_backends(cls , ["torch"] )
@classmethod
def _lowerCAmelCase ( cls , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Optional[Any]:
requires_backends(cls , ["torch"] )
class UpperCAmelCase_ ( metaclass=SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A : Union[str, Any] = ['torch']
def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Tuple:
requires_backends(self , ["torch"] )
@classmethod
def _lowerCAmelCase ( cls , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Dict:
requires_backends(cls , ["torch"] )
@classmethod
def _lowerCAmelCase ( cls , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> str:
requires_backends(cls , ["torch"] )
class UpperCAmelCase_ ( metaclass=SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A : str = ['torch']
def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Optional[int]:
requires_backends(self , ["torch"] )
@classmethod
def _lowerCAmelCase ( cls , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
requires_backends(cls , ["torch"] )
@classmethod
def _lowerCAmelCase ( cls , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
requires_backends(cls , ["torch"] )
class UpperCAmelCase_ ( metaclass=SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A : List[Any] = ['torch']
def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Any:
requires_backends(self , ["torch"] )
@classmethod
def _lowerCAmelCase ( cls , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> List[str]:
requires_backends(cls , ["torch"] )
@classmethod
def _lowerCAmelCase ( cls , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> List[str]:
requires_backends(cls , ["torch"] )
class UpperCAmelCase_ ( metaclass=SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A : List[Any] = ['torch']
def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Optional[int]:
requires_backends(self , ["torch"] )
@classmethod
def _lowerCAmelCase ( cls , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Any:
requires_backends(cls , ["torch"] )
@classmethod
def _lowerCAmelCase ( cls , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Dict:
requires_backends(cls , ["torch"] )
class UpperCAmelCase_ ( metaclass=SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A : Dict = ['torch']
def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Any:
requires_backends(self , ["torch"] )
@classmethod
def _lowerCAmelCase ( cls , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> int:
requires_backends(cls , ["torch"] )
@classmethod
def _lowerCAmelCase ( cls , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Optional[Any]:
requires_backends(cls , ["torch"] )
class UpperCAmelCase_ ( metaclass=SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A : Any = ['torch']
def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Dict:
requires_backends(self , ["torch"] )
@classmethod
def _lowerCAmelCase ( cls , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> int:
requires_backends(cls , ["torch"] )
@classmethod
def _lowerCAmelCase ( cls , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Optional[Any]:
requires_backends(cls , ["torch"] )
class UpperCAmelCase_ ( metaclass=SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A : Optional[Any] = ['torch']
def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Tuple:
requires_backends(self , ["torch"] )
@classmethod
def _lowerCAmelCase ( cls , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
requires_backends(cls , ["torch"] )
@classmethod
def _lowerCAmelCase ( cls , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> int:
requires_backends(cls , ["torch"] )
class UpperCAmelCase_ ( metaclass=SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A : Any = ['torch']
def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> List[Any]:
requires_backends(self , ["torch"] )
@classmethod
def _lowerCAmelCase ( cls , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> int:
requires_backends(cls , ["torch"] )
@classmethod
def _lowerCAmelCase ( cls , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> int:
requires_backends(cls , ["torch"] )
class UpperCAmelCase_ ( metaclass=SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A : Optional[Any] = ['torch']
def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> List[str]:
requires_backends(self , ["torch"] )
@classmethod
def _lowerCAmelCase ( cls , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> List[str]:
requires_backends(cls , ["torch"] )
@classmethod
def _lowerCAmelCase ( cls , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Tuple:
requires_backends(cls , ["torch"] )
class UpperCAmelCase_ ( metaclass=SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A : Dict = ['torch']
def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Dict:
requires_backends(self , ["torch"] )
@classmethod
def _lowerCAmelCase ( cls , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> List[str]:
requires_backends(cls , ["torch"] )
@classmethod
def _lowerCAmelCase ( cls , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> str:
requires_backends(cls , ["torch"] )
class UpperCAmelCase_ ( metaclass=SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A : Optional[int] = ['torch']
def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Optional[Any]:
requires_backends(self , ["torch"] )
@classmethod
def _lowerCAmelCase ( cls , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
requires_backends(cls , ["torch"] )
@classmethod
def _lowerCAmelCase ( cls , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> List[Any]:
requires_backends(cls , ["torch"] )
class UpperCAmelCase_ ( metaclass=SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A : Tuple = ['torch']
def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Tuple:
requires_backends(self , ["torch"] )
@classmethod
def _lowerCAmelCase ( cls , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> List[str]:
requires_backends(cls , ["torch"] )
@classmethod
def _lowerCAmelCase ( cls , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> int:
requires_backends(cls , ["torch"] )
| 568
|
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
lowercase : Dict = logging.get_logger(__name__)
lowercase : Union[str, Any] = '''https://openaipublic.azureedge.net/jukebox/models/'''
lowercase : Union[str, Any] = {
'''jukebox-1b-lyrics''': [
'''5b/vqvae.pth.tar''',
'''5b/prior_level_0.pth.tar''',
'''5b/prior_level_1.pth.tar''',
'''1b_lyrics/prior_level_2.pth.tar''',
],
'''jukebox-5b-lyrics''': [
'''5b/vqvae.pth.tar''',
'''5b/prior_level_0.pth.tar''',
'''5b/prior_level_1.pth.tar''',
'''5b_lyrics/prior_level_2.pth.tar''',
],
}
def lowerCAmelCase__ ( _a : Union[str, Any] ):
if key.endswith(".model.1.bias" ) and len(key.split("." ) ) > 10:
snake_case_ : int = key.replace(".model.1.bias" , ".conv1d_1.bias" )
elif key.endswith(".model.1.weight" ) and len(key.split("." ) ) > 10:
snake_case_ : str = key.replace(".model.1.weight" , ".conv1d_1.weight" )
elif key.endswith(".model.3.bias" ) and len(key.split("." ) ) > 10:
snake_case_ : List[Any] = key.replace(".model.3.bias" , ".conv1d_2.bias" )
elif key.endswith(".model.3.weight" ) and len(key.split("." ) ) > 10:
snake_case_ : int = key.replace(".model.3.weight" , ".conv1d_2.weight" )
if "conditioner_blocks.0." in key:
snake_case_ : int = key.replace("conditioner_blocks.0" , "conditioner_blocks" )
if "prime_prior" in key:
snake_case_ : int = key.replace("prime_prior" , "encoder" )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
snake_case_ : Optional[int] = key.replace(".emb." , "." )
if key.endswith("k" ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace(".k" , ".codebook" )
if "y_emb." in key:
return key.replace("y_emb." , "metadata_embedding." )
if "x_emb.emb." in key:
snake_case_ : str = key.replace("0.x_emb.emb" , "embed_tokens" )
if "prime_state_ln" in key:
return key.replace("prime_state_ln" , "encoder.final_layer_norm" )
if ".ln" in key:
return key.replace(".ln" , ".layer_norm" )
if "_ln" in key:
return key.replace("_ln" , "_layer_norm" )
if "prime_state_proj" in key:
return key.replace("prime_state_proj" , "encoder.proj_in" )
if "prime_x_out" in key:
return key.replace("prime_x_out" , "encoder.lm_head" )
if "prior.x_out" in key:
return key.replace("x_out" , "fc_proj_out" )
if "x_emb" in key:
return key.replace("x_emb" , "embed_tokens" )
return key
def lowerCAmelCase__ ( _a : Union[str, Any] , _a : str , _a : Optional[Any] , _a : Optional[Any] ):
snake_case_ : Any = {}
import re
snake_case_ : Optional[int] = re.compile(R"encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)" )
snake_case_ : Union[str, Any] = re.compile(
R"encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
snake_case_ : int = re.compile(R"encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)" )
snake_case_ : str = re.compile(R"decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)" )
snake_case_ : Any = re.compile(
R"decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
snake_case_ : Dict = re.compile(R"decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)" )
snake_case_ : List[str] = re.compile(R"conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)" )
snake_case_ : Optional[int] = re.compile(
R"conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
snake_case_ : Optional[Any] = re.compile(R"conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)" )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(_a ):
snake_case_ : int = re_encoder_block_conv_in.match(_a )
snake_case_ : Dict = regex_match.groups()
snake_case_ : Tuple = int(groups[2] ) * 2 + int(groups[3] )
snake_case_ : List[Any] = F'''encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}'''
snake_case_ : List[str] = re_encoder_block_conv_in.sub(_a , _a )
elif re_encoder_block_resnet.fullmatch(_a ):
snake_case_ : List[str] = re_encoder_block_resnet.match(_a )
snake_case_ : List[Any] = regex_match.groups()
snake_case_ : Optional[int] = int(groups[2] ) * 2 + int(groups[3] )
snake_case_ : Any = {"1": 1, "3": 2}[groups[-2]]
snake_case_ : Tuple = F'''encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.'''
snake_case_ : Optional[int] = F'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'''
snake_case_ : Any = prefix + resnet_block
snake_case_ : List[Any] = re_encoder_block_resnet.sub(_a , _a )
elif re_encoder_block_proj_out.fullmatch(_a ):
snake_case_ : List[Any] = re_encoder_block_proj_out.match(_a )
snake_case_ : List[str] = regex_match.groups()
snake_case_ : Union[str, Any] = F'''encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}'''
snake_case_ : Any = re_encoder_block_proj_out.sub(_a , _a )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(_a ):
snake_case_ : Tuple = re_decoder_block_conv_out.match(_a )
snake_case_ : List[Any] = regex_match.groups()
snake_case_ : Tuple = int(groups[2] ) * 2 + int(groups[3] ) - 2
snake_case_ : Any = F'''decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}'''
snake_case_ : Dict = re_decoder_block_conv_out.sub(_a , _a )
elif re_decoder_block_resnet.fullmatch(_a ):
snake_case_ : Any = re_decoder_block_resnet.match(_a )
snake_case_ : List[str] = regex_match.groups()
snake_case_ : Tuple = int(groups[2] ) * 2 + int(groups[3] ) - 2
snake_case_ : int = {"1": 1, "3": 2}[groups[-2]]
snake_case_ : Any = F'''decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.'''
snake_case_ : Dict = F'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'''
snake_case_ : str = prefix + resnet_block
snake_case_ : Optional[int] = re_decoder_block_resnet.sub(_a , _a )
elif re_decoder_block_proj_in.fullmatch(_a ):
snake_case_ : Any = re_decoder_block_proj_in.match(_a )
snake_case_ : Optional[int] = regex_match.groups()
snake_case_ : int = F'''decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}'''
snake_case_ : Dict = re_decoder_block_proj_in.sub(_a , _a )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(_a ):
snake_case_ : List[str] = re_prior_cond_conv_out.match(_a )
snake_case_ : List[Any] = regex_match.groups()
snake_case_ : Tuple = int(groups[1] ) * 2 + int(groups[2] ) - 2
snake_case_ : str = F'''conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}'''
snake_case_ : List[Any] = re_prior_cond_conv_out.sub(_a , _a )
elif re_prior_cond_resnet.fullmatch(_a ):
snake_case_ : str = re_prior_cond_resnet.match(_a )
snake_case_ : str = regex_match.groups()
snake_case_ : Optional[Any] = int(groups[1] ) * 2 + int(groups[2] ) - 2
snake_case_ : str = {"1": 1, "3": 2}[groups[-2]]
snake_case_ : List[Any] = F'''conditioner_blocks.upsampler.upsample_block.{block_index}.'''
snake_case_ : Union[str, Any] = F'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'''
snake_case_ : Optional[int] = prefix + resnet_block
snake_case_ : List[str] = re_prior_cond_resnet.sub(_a , _a )
elif re_prior_cond_proj_in.fullmatch(_a ):
snake_case_ : Any = re_prior_cond_proj_in.match(_a )
snake_case_ : int = regex_match.groups()
snake_case_ : Dict = F'''conditioner_blocks.upsampler.proj_in.{groups[-1]}'''
snake_case_ : Union[str, Any] = re_prior_cond_proj_in.sub(_a , _a )
# keep original key
else:
snake_case_ : List[Any] = original_key
snake_case_ : Any = replace_key(_a )
if F'''{key_prefix}.{key}''' not in model_state_dict or key is None:
print(F'''failed converting {original_key} to {key}, does not match''' )
# handle missmatched shape
elif value.shape != model_state_dict[F'''{key_prefix}.{key}'''].shape:
snake_case_ : List[Any] = model_state_dict[F'''{key_prefix}.{key}''']
print(F'''{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match''' )
snake_case_ : Dict = original_key
snake_case_ : str = original_key
snake_case_ : Union[str, Any] = value
return new_dict
@torch.no_grad()
def lowerCAmelCase__ ( _a : Any=None , _a : Any=None ):
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(F'''{pytorch_dump_folder_path}/{file.split('/' )[-1]}''' ):
snake_case_ : int = requests.get(F'''{PREFIX}{file}''' , allow_redirects=_a )
os.makedirs(F'''{pytorch_dump_folder_path}/''' , exist_ok=_a )
open(F'''{pytorch_dump_folder_path}/{file.split('/' )[-1]}''' , "wb" ).write(r.content )
snake_case_ : Union[str, Any] = MODEL_MAPPING[model_name.split("/" )[-1]]
snake_case_ : Dict = JukeboxConfig.from_pretrained(_a )
snake_case_ : List[Any] = JukeboxModel(_a )
snake_case_ : Dict = []
snake_case_ : int = {}
for i, dict_name in enumerate(_a ):
snake_case_ : Optional[int] = torch.load(F'''{pytorch_dump_folder_path}/{dict_name.split('/' )[-1]}''' )["model"]
snake_case_ : Union[str, Any] = {}
for k in old_dic.keys():
if k.endswith(".b" ):
snake_case_ : Tuple = old_dic[k]
elif k.endswith(".w" ):
snake_case_ : Optional[Any] = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
snake_case_ : Dict = old_dic[k]
else:
snake_case_ : Optional[Any] = old_dic[k]
snake_case_ : List[str] = "vqvae" if i == 0 else F'''priors.{3 - i}'''
snake_case_ : List[str] = fix_jukebox_keys(_a , model.state_dict() , _a , _a )
weight_dict.append(_a )
snake_case_ : Tuple = weight_dict.pop(0 )
model.vqvae.load_state_dict(_a )
for i in range(len(_a ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(_a ).mkdir(exist_ok=_a )
with open(F'''{pytorch_dump_folder_path}/mapping.json''' , "w" ) as txtfile:
json.dump(_a , _a )
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_a )
return weight_dict
if __name__ == "__main__":
lowercase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''jukebox-5b-lyrics''',
type=str,
help='''Name of the model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''jukebox-5b-lyrics-converted''',
type=str,
help='''Path to the output PyTorch model directory.''',
)
lowercase : Optional[int] = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 568
| 1
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {'''vocab_file''': '''vocab.txt'''}
SCREAMING_SNAKE_CASE__ = {
'''vocab_file''': {
'''YituTech/conv-bert-base''': '''https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt''',
'''YituTech/conv-bert-medium-small''': (
'''https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt'''
),
'''YituTech/conv-bert-small''': '''https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt''',
}
}
SCREAMING_SNAKE_CASE__ = {
'''YituTech/conv-bert-base''': 512,
'''YituTech/conv-bert-medium-small''': 512,
'''YituTech/conv-bert-small''': 512,
}
SCREAMING_SNAKE_CASE__ = {
'''YituTech/conv-bert-base''': {'''do_lower_case''': True},
'''YituTech/conv-bert-medium-small''': {'''do_lower_case''': True},
'''YituTech/conv-bert-small''': {'''do_lower_case''': True},
}
class _UpperCamelCase( __lowerCamelCase ):
__SCREAMING_SNAKE_CASE : int = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE : List[Any] = PRETRAINED_INIT_CONFIGURATION
__SCREAMING_SNAKE_CASE : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE : Union[str, Any] = ConvBertTokenizer
def __init__( self : Dict , SCREAMING_SNAKE_CASE__ : List[Any]=None , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : Any=True , SCREAMING_SNAKE_CASE__ : Union[str, Any]="[UNK]" , SCREAMING_SNAKE_CASE__ : Any="[SEP]" , SCREAMING_SNAKE_CASE__ : Optional[Any]="[PAD]" , SCREAMING_SNAKE_CASE__ : Dict="[CLS]" , SCREAMING_SNAKE_CASE__ : List[Any]="[MASK]" , SCREAMING_SNAKE_CASE__ : List[Any]=True , SCREAMING_SNAKE_CASE__ : List[Any]=None , **SCREAMING_SNAKE_CASE__ : str , ):
'''simple docstring'''
super().__init__(
SCREAMING_SNAKE_CASE__ , tokenizer_file=SCREAMING_SNAKE_CASE__ , do_lower_case=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , mask_token=SCREAMING_SNAKE_CASE__ , tokenize_chinese_chars=SCREAMING_SNAKE_CASE__ , strip_accents=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
__a : Optional[int] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , SCREAMING_SNAKE_CASE__ ) != do_lower_case
or normalizer_state.get('strip_accents' , SCREAMING_SNAKE_CASE__ ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , SCREAMING_SNAKE_CASE__ ) != tokenize_chinese_chars
):
__a : List[Any] = getattr(SCREAMING_SNAKE_CASE__ , normalizer_state.pop('type' ) )
__a : int = do_lower_case
__a : Dict = strip_accents
__a : List[Any] = tokenize_chinese_chars
__a : List[Any] = normalizer_class(**SCREAMING_SNAKE_CASE__ )
__a : List[Any] = do_lower_case
def __lowerCAmelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : str=None ):
'''simple docstring'''
__a : str = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __lowerCAmelCase ( self : Tuple , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ):
'''simple docstring'''
__a : Union[str, Any] = [self.sep_token_id]
__a : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowerCAmelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] = None ):
'''simple docstring'''
__a : List[str] = self._tokenizer.model.save(SCREAMING_SNAKE_CASE__ , name=SCREAMING_SNAKE_CASE__ )
return tuple(SCREAMING_SNAKE_CASE__ )
| 577
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
'''xlm-mlm-en-2048''': '''https://huggingface.co/xlm-mlm-en-2048/resolve/main/config.json''',
'''xlm-mlm-ende-1024''': '''https://huggingface.co/xlm-mlm-ende-1024/resolve/main/config.json''',
'''xlm-mlm-enfr-1024''': '''https://huggingface.co/xlm-mlm-enfr-1024/resolve/main/config.json''',
'''xlm-mlm-enro-1024''': '''https://huggingface.co/xlm-mlm-enro-1024/resolve/main/config.json''',
'''xlm-mlm-tlm-xnli15-1024''': '''https://huggingface.co/xlm-mlm-tlm-xnli15-1024/resolve/main/config.json''',
'''xlm-mlm-xnli15-1024''': '''https://huggingface.co/xlm-mlm-xnli15-1024/resolve/main/config.json''',
'''xlm-clm-enfr-1024''': '''https://huggingface.co/xlm-clm-enfr-1024/resolve/main/config.json''',
'''xlm-clm-ende-1024''': '''https://huggingface.co/xlm-clm-ende-1024/resolve/main/config.json''',
'''xlm-mlm-17-1280''': '''https://huggingface.co/xlm-mlm-17-1280/resolve/main/config.json''',
'''xlm-mlm-100-1280''': '''https://huggingface.co/xlm-mlm-100-1280/resolve/main/config.json''',
}
class _UpperCamelCase( __lowerCamelCase ):
__SCREAMING_SNAKE_CASE : Tuple = '''xlm'''
__SCREAMING_SNAKE_CASE : int = {
'''hidden_size''': '''emb_dim''',
'''num_attention_heads''': '''n_heads''',
'''num_hidden_layers''': '''n_layers''',
'''n_words''': '''vocab_size''', # For backward compatibility
}
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[int]=3_0_1_4_5 , SCREAMING_SNAKE_CASE__ : int=2_0_4_8 , SCREAMING_SNAKE_CASE__ : int=1_2 , SCREAMING_SNAKE_CASE__ : Any=1_6 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE__ : Tuple=0.1 , SCREAMING_SNAKE_CASE__ : Optional[int]=True , SCREAMING_SNAKE_CASE__ : Optional[Any]=False , SCREAMING_SNAKE_CASE__ : Dict=False , SCREAMING_SNAKE_CASE__ : Any=False , SCREAMING_SNAKE_CASE__ : int=1 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=True , SCREAMING_SNAKE_CASE__ : Dict=5_1_2 , SCREAMING_SNAKE_CASE__ : List[str]=2_0_4_8**-0.5 , SCREAMING_SNAKE_CASE__ : Dict=1e-12 , SCREAMING_SNAKE_CASE__ : Tuple=0.02 , SCREAMING_SNAKE_CASE__ : List[str]=0 , SCREAMING_SNAKE_CASE__ : Optional[Any]=1 , SCREAMING_SNAKE_CASE__ : str=2 , SCREAMING_SNAKE_CASE__ : int=3 , SCREAMING_SNAKE_CASE__ : Tuple=5 , SCREAMING_SNAKE_CASE__ : List[Any]=True , SCREAMING_SNAKE_CASE__ : Union[str, Any]="first" , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : int=None , SCREAMING_SNAKE_CASE__ : Tuple=True , SCREAMING_SNAKE_CASE__ : Dict=0.1 , SCREAMING_SNAKE_CASE__ : str=5 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=5 , SCREAMING_SNAKE_CASE__ : str=0 , SCREAMING_SNAKE_CASE__ : str=0 , SCREAMING_SNAKE_CASE__ : Optional[Any]=2 , SCREAMING_SNAKE_CASE__ : Dict=0 , **SCREAMING_SNAKE_CASE__ : Optional[Any] , ):
'''simple docstring'''
__a : Optional[Any] = vocab_size
__a : int = emb_dim
__a : Tuple = n_layers
__a : List[str] = n_heads
__a : Any = dropout
__a : Any = attention_dropout
__a : Any = gelu_activation
__a : Optional[int] = sinusoidal_embeddings
__a : Union[str, Any] = causal
__a : str = asm
__a : Optional[Any] = n_langs
__a : int = use_lang_emb
__a : List[str] = layer_norm_eps
__a : Optional[int] = bos_index
__a : Any = eos_index
__a : str = pad_index
__a : List[str] = unk_index
__a : List[Any] = mask_index
__a : Tuple = is_encoder
__a : str = max_position_embeddings
__a : Any = embed_init_std
__a : int = init_std
__a : Dict = summary_type
__a : List[Any] = summary_use_proj
__a : Dict = summary_activation
__a : Union[str, Any] = summary_proj_to_labels
__a : List[Any] = summary_first_dropout
__a : List[Any] = start_n_top
__a : Tuple = end_n_top
__a : int = mask_token_id
__a : str = lang_id
if "n_words" in kwargs:
__a : Optional[Any] = kwargs['n_words']
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
class _UpperCamelCase( __lowerCamelCase ):
@property
def __lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
if self.task == "multiple-choice":
__a : Optional[Any] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__a : List[Any] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] )
| 577
| 1
|
import math
from typing import Any, Callable, List, Optional, Tuple, Union
import numpy as np
import torch
from ...models import TaFilmDecoder
from ...schedulers import DDPMScheduler
from ...utils import is_onnx_available, logging, randn_tensor
if is_onnx_available():
from ..onnx_utils import OnnxRuntimeModel
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
from .continous_encoder import SpectrogramContEncoder
from .notes_encoder import SpectrogramNotesEncoder
a__ : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
a__ : str = 2_5_6
class lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
snake_case_ = ['melgan']
def __init__( self : Dict , a_ : SpectrogramNotesEncoder , a_ : SpectrogramContEncoder , a_ : TaFilmDecoder , a_ : DDPMScheduler , a_ : OnnxRuntimeModel if is_onnx_available() else Any , ):
"""simple docstring"""
super().__init__()
# From MELGAN
lowerCamelCase__ = math.log(1e-5 ) # Matches MelGAN training.
lowerCamelCase__ = 4.0 # Largest value for most examples
lowerCamelCase__ = 1_28
self.register_modules(
notes_encoder=a_ , continuous_encoder=a_ , decoder=a_ , scheduler=a_ , melgan=a_ , )
def _UpperCamelCase ( self : Tuple , a_ : Tuple , a_ : List[str]=(-1.0, 1.0) , a_ : Union[str, Any]=False ):
"""simple docstring"""
lowerCamelCase__ , lowerCamelCase__ = output_range
if clip:
lowerCamelCase__ = torch.clip(a_ , self.min_value , self.max_value )
# Scale to [0, 1].
lowerCamelCase__ = (features - self.min_value) / (self.max_value - self.min_value)
# Scale to [min_out, max_out].
return zero_one * (max_out - min_out) + min_out
def _UpperCamelCase ( self : Optional[Any] , a_ : Dict , a_ : Union[str, Any]=(-1.0, 1.0) , a_ : Any=False ):
"""simple docstring"""
lowerCamelCase__ , lowerCamelCase__ = input_range
lowerCamelCase__ = torch.clip(a_ , a_ , a_ ) if clip else outputs
# Scale to [0, 1].
lowerCamelCase__ = (outputs - min_out) / (max_out - min_out)
# Scale to [self.min_value, self.max_value].
return zero_one * (self.max_value - self.min_value) + self.min_value
def _UpperCamelCase ( self : int , a_ : List[str] , a_ : Optional[Any] , a_ : Union[str, Any] ):
"""simple docstring"""
lowerCamelCase__ = input_tokens > 0
lowerCamelCase__ , lowerCamelCase__ = self.notes_encoder(
encoder_input_tokens=a_ , encoder_inputs_mask=a_ )
lowerCamelCase__ , lowerCamelCase__ = self.continuous_encoder(
encoder_inputs=a_ , encoder_inputs_mask=a_ )
return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)]
def _UpperCamelCase ( self : Dict , a_ : int , a_ : int , a_ : Union[str, Any] ):
"""simple docstring"""
lowerCamelCase__ = noise_time
if not torch.is_tensor(a_ ):
lowerCamelCase__ = torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device )
elif torch.is_tensor(a_ ) and len(timesteps.shape ) == 0:
lowerCamelCase__ = timesteps[None].to(input_tokens.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
lowerCamelCase__ = timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device )
lowerCamelCase__ = self.decoder(
encodings_and_masks=a_ , decoder_input_tokens=a_ , decoder_noise_time=a_ )
return logits
@torch.no_grad()
def __call__( self : Dict , a_ : List[List[int]] , a_ : Optional[torch.Generator] = None , a_ : int = 1_00 , a_ : bool = True , a_ : str = "numpy" , a_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , a_ : int = 1 , ):
"""simple docstring"""
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(a_ , a_ ) or callback_steps <= 0)
):
raise ValueError(
F'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
F''' {type(a_ )}.''' )
lowerCamelCase__ = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa )
lowerCamelCase__ = np.zeros([1, 0, self.n_dims] , np.floataa )
lowerCamelCase__ = torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=a_ , device=self.device )
for i, encoder_input_tokens in enumerate(a_ ):
if i == 0:
lowerCamelCase__ = torch.from_numpy(pred_mel[:1].copy() ).to(
device=self.device , dtype=self.decoder.dtype )
# The first chunk has no previous context.
lowerCamelCase__ = torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=a_ , device=self.device )
else:
# The full song pipeline does not feed in a context feature, so the mask
# will be all 0s after the feature converter. Because we know we're
# feeding in a full context chunk from the previous prediction, set it
# to all 1s.
lowerCamelCase__ = ones
lowerCamelCase__ = self.scale_features(
a_ , output_range=[-1.0, 1.0] , clip=a_ )
lowerCamelCase__ = self.encode(
input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ) , continuous_inputs=a_ , continuous_mask=a_ , )
# Sample encoder_continuous_inputs shaped gaussian noise to begin loop
lowerCamelCase__ = randn_tensor(
shape=encoder_continuous_inputs.shape , generator=a_ , device=self.device , dtype=self.decoder.dtype , )
# set step values
self.scheduler.set_timesteps(a_ )
# Denoising diffusion loop
for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
lowerCamelCase__ = self.decode(
encodings_and_masks=a_ , input_tokens=a_ , noise_time=t / self.scheduler.config.num_train_timesteps , )
# Compute previous output: x_t -> x_t-1
lowerCamelCase__ = self.scheduler.step(a_ , a_ , a_ , generator=a_ ).prev_sample
lowerCamelCase__ = self.scale_to_features(a_ , input_range=[-1.0, 1.0] )
lowerCamelCase__ = mel[:1]
lowerCamelCase__ = mel.cpu().float().numpy()
lowerCamelCase__ = np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1 )
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(a_ , a_ )
logger.info("""Generated segment""" , a_ )
if output_type == "numpy" and not is_onnx_available():
raise ValueError(
"""Cannot return output in 'np' format if ONNX is not available. Make sure to have ONNX installed or set 'output_type' to 'mel'.""" )
elif output_type == "numpy" and self.melgan is None:
raise ValueError(
"""Cannot return output in 'np' format if melgan component is not defined. Make sure to define `self.melgan` or set 'output_type' to 'mel'.""" )
if output_type == "numpy":
lowerCamelCase__ = self.melgan(input_features=full_pred_mel.astype(np.floataa ) )
else:
lowerCamelCase__ = full_pred_mel
if not return_dict:
return (output,)
return AudioPipelineOutput(audios=a_ )
| 165
|
from sklearn.metrics import recall_score
import datasets
a__ : str = """
Recall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:
Recall = TP / (TP + FN)
Where TP is the true positives and FN is the false negatives.
"""
a__ : Dict = """
Args:
- **predictions** (`list` of `int`): The predicted labels.
- **references** (`list` of `int`): The ground truth labels.
- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.
- **pos_label** (`int`): The class label to use as the 'positive class' when calculating the recall. Defaults to `1`.
- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.
- `'binary'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.
- `'micro'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.
- `'macro'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.
- `'weighted'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.
- `'samples'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).
- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.
- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .
- `'warn'`: If there is a zero division, the return value is `0`, but warnings are also raised.
- `0`: If there is a zero division, the return value is `0`.
- `1`: If there is a zero division, the return value is `1`.
Returns:
- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.
Examples:
Example 1-A simple example with some errors
>>> recall_metric = datasets.load_metric('recall')
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])
>>> print(results)
{'recall': 0.6666666666666666}
Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.
>>> recall_metric = datasets.load_metric('recall')
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)
>>> print(results)
{'recall': 0.5}
Example 3-The same example as Example 1, but with `sample_weight` included.
>>> recall_metric = datasets.load_metric('recall')
>>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)
>>> print(results)
{'recall': 0.55}
Example 4-A multiclass example, using different averages.
>>> recall_metric = datasets.load_metric('recall')
>>> predictions = [0, 2, 1, 0, 0, 1]
>>> references = [0, 1, 2, 0, 1, 2]
>>> results = recall_metric.compute(predictions=predictions, references=references, average='macro')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average='micro')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average='weighted')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average=None)
>>> print(results)
{'recall': array([1., 0., 0.])}
"""
a__ : List[Any] = """
@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase ( datasets.Metric ):
"""simple docstring"""
def _UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""int32""" ) ),
"""references""": datasets.Sequence(datasets.Value("""int32""" ) ),
}
if self.config_name == """multilabel"""
else {
"""predictions""": datasets.Value("""int32""" ),
"""references""": datasets.Value("""int32""" ),
} ) , reference_urls=["""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html"""] , )
def _UpperCamelCase ( self : Dict , a_ : Tuple , a_ : Optional[Any] , a_ : Union[str, Any]=None , a_ : List[Any]=1 , a_ : List[str]="binary" , a_ : List[str]=None , a_ : int="warn" , ):
"""simple docstring"""
lowerCamelCase__ = recall_score(
a_ , a_ , labels=a_ , pos_label=a_ , average=a_ , sample_weight=a_ , zero_division=a_ , )
return {"recall": float(a_ ) if score.size == 1 else score}
| 165
| 1
|
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A : Dict = logging.get_logger(__name__)
__A : int = {
'BAAI/AltCLIP': 'https://huggingface.co/BAAI/AltCLIP/resolve/main/config.json',
# See all AltCLIP models at https://huggingface.co/models?filter=altclip
}
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ = "altclip_text_model"
def __init__( self : List[str] , __lowerCamelCase : Union[str, Any]=250002 , __lowerCamelCase : List[str]=1024 , __lowerCamelCase : int=24 , __lowerCamelCase : int=16 , __lowerCamelCase : Dict=4096 , __lowerCamelCase : List[Any]="gelu" , __lowerCamelCase : List[str]=0.1 , __lowerCamelCase : List[str]=0.1 , __lowerCamelCase : int=514 , __lowerCamelCase : Optional[Any]=1 , __lowerCamelCase : int=0.02 , __lowerCamelCase : Tuple=0.02 , __lowerCamelCase : Optional[int]=1e-05 , __lowerCamelCase : Any=1 , __lowerCamelCase : Optional[int]=0 , __lowerCamelCase : Union[str, Any]=2 , __lowerCamelCase : Dict="absolute" , __lowerCamelCase : Any=True , __lowerCamelCase : Tuple=768 , **__lowerCamelCase : Optional[int] , ):
super().__init__(pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , **__A )
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = type_vocab_size
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = initializer_factor
SCREAMING_SNAKE_CASE = layer_norm_eps
SCREAMING_SNAKE_CASE = position_embedding_type
SCREAMING_SNAKE_CASE = use_cache
SCREAMING_SNAKE_CASE = project_dim
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ = "altclip_vision_model"
def __init__( self : Any , __lowerCamelCase : Union[str, Any]=768 , __lowerCamelCase : List[Any]=3072 , __lowerCamelCase : str=512 , __lowerCamelCase : Optional[int]=12 , __lowerCamelCase : Optional[int]=12 , __lowerCamelCase : Optional[Any]=3 , __lowerCamelCase : Tuple=224 , __lowerCamelCase : Dict=32 , __lowerCamelCase : Tuple="quick_gelu" , __lowerCamelCase : List[str]=1e-5 , __lowerCamelCase : Tuple=0.0 , __lowerCamelCase : Tuple=0.02 , __lowerCamelCase : str=1.0 , **__lowerCamelCase : str , ):
super().__init__(**__A )
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = projection_dim
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = patch_size
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = initializer_factor
SCREAMING_SNAKE_CASE = attention_dropout
SCREAMING_SNAKE_CASE = layer_norm_eps
SCREAMING_SNAKE_CASE = hidden_act
@classmethod
def _snake_case ( cls : List[str] , __lowerCamelCase : Union[str, os.PathLike] , **__lowerCamelCase : Optional[int] ):
cls._set_token_in_kwargs(__A )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = cls.get_config_dict(__A , **__A )
# get the vision config dict if we are loading from AltCLIPConfig
if config_dict.get("model_type" ) == "altclip":
SCREAMING_SNAKE_CASE = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(__A , **__A )
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ = "altclip"
lowerCamelCase__ = True
def __init__( self : Optional[int] , __lowerCamelCase : Tuple=None , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : Dict=768 , __lowerCamelCase : Optional[Any]=2.6_592 , **__lowerCamelCase : Dict ):
# If `_config_dict` exist, we use them for the backward compatibility.
# We pop out these 2 attributes before calling `super().__init__` to avoid them being saved (which causes a lot
# of confusion!).
SCREAMING_SNAKE_CASE = kwargs.pop("text_config_dict" , __A )
SCREAMING_SNAKE_CASE = kwargs.pop("vision_config_dict" , __A )
super().__init__(**__A )
# Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in
# `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most
# cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`.
if text_config_dict is not None:
if text_config is None:
SCREAMING_SNAKE_CASE = {}
# This is the complete result when using `text_config_dict`.
SCREAMING_SNAKE_CASE = AltCLIPTextConfig(**__A ).to_dict()
# Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different.
for key, value in _text_config_dict.items():
if key in text_config and value != text_config[key] and key not in ["transformers_version"]:
# If specified in `text_config_dict`
if key in text_config_dict:
SCREAMING_SNAKE_CASE = (
f"`{key}` is found in both `text_config_dict` and `text_config` but with different values. "
f"The value `text_config_dict[\"{key}\"]` will be used instead."
)
# If inferred from default argument values (just to be super careful)
else:
SCREAMING_SNAKE_CASE = (
f"`text_config_dict` is provided which will be used to initialize `AltCLIPTextConfig`. The "
f"value `text_config[\"{key}\"]` will be overriden."
)
logger.warning(__A )
# Update all values in `text_config` with the ones in `_text_config_dict`.
text_config.update(_text_config_dict )
if vision_config_dict is not None:
if vision_config is None:
SCREAMING_SNAKE_CASE = {}
# This is the complete result when using `vision_config_dict`.
SCREAMING_SNAKE_CASE = AltCLIPVisionConfig(**__A ).to_dict()
# convert keys to string instead of integer
if "id2label" in _vision_config_dict:
SCREAMING_SNAKE_CASE = {
str(__A ): value for key, value in _vision_config_dict["id2label"].items()
}
# Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different.
for key, value in _vision_config_dict.items():
if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]:
# If specified in `vision_config_dict`
if key in vision_config_dict:
SCREAMING_SNAKE_CASE = (
f"`{key}` is found in both `vision_config_dict` and `vision_config` but with different "
f"values. The value `vision_config_dict[\"{key}\"]` will be used instead."
)
# If inferred from default argument values (just to be super careful)
else:
SCREAMING_SNAKE_CASE = (
f"`vision_config_dict` is provided which will be used to initialize `AltCLIPVisionConfig`. "
f"The value `vision_config[\"{key}\"]` will be overriden."
)
logger.warning(__A )
# Update all values in `vision_config` with the ones in `_vision_config_dict`.
vision_config.update(_vision_config_dict )
if text_config is None:
SCREAMING_SNAKE_CASE = {}
logger.info("`text_config` is `None`. Initializing the `AltCLIPTextConfig` with default values." )
if vision_config is None:
SCREAMING_SNAKE_CASE = {}
logger.info("`vision_config` is `None`. initializing the `AltCLIPVisionConfig` with default values." )
SCREAMING_SNAKE_CASE = AltCLIPTextConfig(**__A )
SCREAMING_SNAKE_CASE = AltCLIPVisionConfig(**__A )
SCREAMING_SNAKE_CASE = projection_dim
SCREAMING_SNAKE_CASE = logit_scale_init_value
SCREAMING_SNAKE_CASE = 1.0
@classmethod
def _snake_case ( cls : str , __lowerCamelCase : AltCLIPTextConfig , __lowerCamelCase : AltCLIPVisionConfig , **__lowerCamelCase : Dict ):
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **__A )
def _snake_case ( self : str ):
SCREAMING_SNAKE_CASE = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE = self.text_config.to_dict()
SCREAMING_SNAKE_CASE = self.vision_config.to_dict()
SCREAMING_SNAKE_CASE = self.__class__.model_type
return output
| 718
|
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE = BlipImageProcessor()
SCREAMING_SNAKE_CASE = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-BertModel" )
SCREAMING_SNAKE_CASE = BlipProcessor(__lowerCamelCase , __lowerCamelCase )
processor.save_pretrained(self.tmpdirname )
def _snake_case ( self : Dict , **__lowerCamelCase : Any ):
return AutoProcessor.from_pretrained(self.tmpdirname , **__lowerCamelCase ).tokenizer
def _snake_case ( self : List[Any] , **__lowerCamelCase : Optional[Any] ):
return AutoProcessor.from_pretrained(self.tmpdirname , **__lowerCamelCase ).image_processor
def _snake_case ( self : Union[str, Any] ):
shutil.rmtree(self.tmpdirname )
def _snake_case ( self : Tuple ):
SCREAMING_SNAKE_CASE = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
SCREAMING_SNAKE_CASE = [Image.fromarray(np.moveaxis(__lowerCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _snake_case ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE = BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
SCREAMING_SNAKE_CASE = self.get_image_processor(do_normalize=__lowerCamelCase , padding_value=1.0 )
SCREAMING_SNAKE_CASE = BlipProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=__lowerCamelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __lowerCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowerCamelCase )
def _snake_case ( self : Optional[int] ):
SCREAMING_SNAKE_CASE = self.get_image_processor()
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = BlipProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE = image_processor(__lowerCamelCase , return_tensors="np" )
SCREAMING_SNAKE_CASE = processor(images=__lowerCamelCase , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def _snake_case ( self : Dict ):
SCREAMING_SNAKE_CASE = self.get_image_processor()
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = BlipProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE = "lower newer"
SCREAMING_SNAKE_CASE = processor(text=__lowerCamelCase )
SCREAMING_SNAKE_CASE = tokenizer(__lowerCamelCase , return_token_type_ids=__lowerCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _snake_case ( self : str ):
SCREAMING_SNAKE_CASE = self.get_image_processor()
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = BlipProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE = "lower newer"
SCREAMING_SNAKE_CASE = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE = processor(text=__lowerCamelCase , images=__lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] )
# test if it raises when no input is passed
with pytest.raises(__lowerCamelCase ):
processor()
def _snake_case ( self : Any ):
SCREAMING_SNAKE_CASE = self.get_image_processor()
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = BlipProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
SCREAMING_SNAKE_CASE = processor.batch_decode(__lowerCamelCase )
SCREAMING_SNAKE_CASE = tokenizer.batch_decode(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
def _snake_case ( self : Dict ):
SCREAMING_SNAKE_CASE = self.get_image_processor()
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = BlipProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE = "lower newer"
SCREAMING_SNAKE_CASE = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE = processor(text=__lowerCamelCase , images=__lowerCamelCase )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] )
| 698
| 0
|
"""simple docstring"""
from __future__ import annotations
def lowercase__( __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : int ):
lowercase_ : List[Any] = []
lowercase_ : Optional[Any] = []
lowercase_ : List[str] = 0
lowercase_ : Any = sum(__SCREAMING_SNAKE_CASE )
create_state_space_tree(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return result
def lowercase__( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : List[Any] , ):
if sum(__SCREAMING_SNAKE_CASE ) > max_sum or (remaining_nums_sum + sum(__SCREAMING_SNAKE_CASE )) < max_sum:
return
if sum(__SCREAMING_SNAKE_CASE ) == max_sum:
result.append(__SCREAMING_SNAKE_CASE )
return
for index in range(__SCREAMING_SNAKE_CASE , len(__SCREAMING_SNAKE_CASE ) ):
create_state_space_tree(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , index + 1 , [*path, nums[index]] , __SCREAMING_SNAKE_CASE , remaining_nums_sum - nums[index] , )
__SCREAMING_SNAKE_CASE =[3, 34, 4, 12, 5, 2]
__SCREAMING_SNAKE_CASE =9
__SCREAMING_SNAKE_CASE =generate_sum_of_subsets_soln(nums, max_sum)
print(*result)
| 425
|
'''simple docstring'''
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEmbeddings,
BertLayer,
BertPooler,
BertPreTrainedModel,
)
def UpperCamelCase ( a ) -> Any:
'''simple docstring'''
__magic_name__ = torch.exp(a )
__magic_name__ = torch.sum(a , dim=1 ) # sum of exp(x_i)
__magic_name__ = torch.sum(x * exp_x , dim=1 ) # sum of x_i * exp(x_i)
return torch.log(a ) - B / A
class _SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self : Optional[int] , a__ : Any ):
super().__init__()
__magic_name__ = config.output_attentions
__magic_name__ = config.output_hidden_states
__magic_name__ = nn.ModuleList([BertLayer(a__ ) for _ in range(config.num_hidden_layers )] )
__magic_name__ = nn.ModuleList([BertHighway(a__ ) for _ in range(config.num_hidden_layers )] )
__magic_name__ = [-1 for _ in range(config.num_hidden_layers )]
def snake_case__ ( self : Dict , a__ : Optional[Any] ):
if (type(a__ ) is float) or (type(a__ ) is int):
for i in range(len(self.early_exit_entropy ) ):
__magic_name__ = x
else:
__magic_name__ = x
def snake_case__ ( self : Union[str, Any] , a__ : int ):
__magic_name__ = pooler.state_dict()
for highway in self.highway:
for name, param in highway.pooler.state_dict().items():
param.copy_(loaded_model[name] )
def snake_case__ ( self : Optional[int] , a__ : Union[str, Any] , a__ : Union[str, Any]=None , a__ : int=None , a__ : Dict=None , a__ : str=None , ):
__magic_name__ = ()
__magic_name__ = ()
__magic_name__ = ()
for i, layer_module in enumerate(self.layer ):
if self.output_hidden_states:
__magic_name__ = all_hidden_states + (hidden_states,)
__magic_name__ = layer_module(
a__ , a__ , head_mask[i] , a__ , a__ )
__magic_name__ = layer_outputs[0]
if self.output_attentions:
__magic_name__ = all_attentions + (layer_outputs[1],)
__magic_name__ = (hidden_states,)
if self.output_hidden_states:
__magic_name__ = current_outputs + (all_hidden_states,)
if self.output_attentions:
__magic_name__ = current_outputs + (all_attentions,)
__magic_name__ = self.highway[i](a__ )
# logits, pooled_output
if not self.training:
__magic_name__ = highway_exit[0]
__magic_name__ = entropy(a__ )
__magic_name__ = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy
__magic_name__ = all_highway_exits + (highway_exit,)
if highway_entropy < self.early_exit_entropy[i]:
__magic_name__ = (highway_logits,) + current_outputs[1:] + (all_highway_exits,)
raise HighwayException(a__ , i + 1 )
else:
__magic_name__ = all_highway_exits + (highway_exit,)
# Add last layer
if self.output_hidden_states:
__magic_name__ = all_hidden_states + (hidden_states,)
__magic_name__ = (hidden_states,)
if self.output_hidden_states:
__magic_name__ = outputs + (all_hidden_states,)
if self.output_attentions:
__magic_name__ = outputs + (all_attentions,)
__magic_name__ = outputs + (all_highway_exits,)
return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits
@add_start_docstrings(
"""The Bert Model transformer with early exiting (DeeBERT). """ ,__a ,)
class _SCREAMING_SNAKE_CASE ( __a ):
def __init__( self : Optional[Any] , a__ : Any ):
super().__init__(a__ )
__magic_name__ = config
__magic_name__ = BertEmbeddings(a__ )
__magic_name__ = DeeBertEncoder(a__ )
__magic_name__ = BertPooler(a__ )
self.init_weights()
def snake_case__ ( self : Optional[int] ):
self.encoder.init_highway_pooler(self.pooler )
def snake_case__ ( self : int ):
return self.embeddings.word_embeddings
def snake_case__ ( self : str , a__ : Union[str, Any] ):
__magic_name__ = value
def snake_case__ ( self : List[Any] , a__ : List[Any] ):
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(a__ )
@add_start_docstrings_to_model_forward(a__ )
def snake_case__ ( self : int , a__ : Tuple=None , a__ : Dict=None , a__ : Union[str, Any]=None , a__ : List[Any]=None , a__ : Dict=None , a__ : Optional[Any]=None , a__ : int=None , a__ : Union[str, Any]=None , ):
if input_ids is not None and inputs_embeds is not None:
raise ValueError('''You cannot specify both input_ids and inputs_embeds at the same time''' )
elif input_ids is not None:
__magic_name__ = input_ids.size()
elif inputs_embeds is not None:
__magic_name__ = inputs_embeds.size()[:-1]
else:
raise ValueError('''You have to specify either input_ids or inputs_embeds''' )
__magic_name__ = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
__magic_name__ = torch.ones(a__ , device=a__ )
if encoder_attention_mask is None:
__magic_name__ = torch.ones(a__ , device=a__ )
if token_type_ids is None:
__magic_name__ = torch.zeros(a__ , dtype=torch.long , device=a__ )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
__magic_name__ = self.get_extended_attention_mask(a__ , a__ , a__ )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_attention_mask.dim() == 3:
__magic_name__ = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
__magic_name__ = encoder_attention_mask[:, None, None, :]
__magic_name__ = encoder_extended_attention_mask.to(
dtype=next(self.parameters() ).dtype ) # fp16 compatibility
__magic_name__ = (1.0 - encoder_extended_attention_mask) * -10_000.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
__magic_name__ = self.get_head_mask(a__ , self.config.num_hidden_layers )
__magic_name__ = self.embeddings(
input_ids=a__ , position_ids=a__ , token_type_ids=a__ , inputs_embeds=a__ )
__magic_name__ = self.encoder(
a__ , attention_mask=a__ , head_mask=a__ , encoder_hidden_states=a__ , encoder_attention_mask=a__ , )
__magic_name__ = encoder_outputs[0]
__magic_name__ = self.pooler(a__ )
__magic_name__ = (
sequence_output,
pooled_output,
) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits
class _SCREAMING_SNAKE_CASE ( __a ):
def __init__( self : Optional[int] , a__ : Union[str, Any] , a__ : List[str] ):
__magic_name__ = message
__magic_name__ = exit_layer # start from 1!
class _SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self : Optional[Any] , a__ : List[Any] ):
super().__init__()
__magic_name__ = BertPooler(a__ )
__magic_name__ = nn.Dropout(config.hidden_dropout_prob )
__magic_name__ = nn.Linear(config.hidden_size , config.num_labels )
def snake_case__ ( self : int , a__ : Union[str, Any] ):
# Pooler
__magic_name__ = encoder_outputs[0]
__magic_name__ = self.pooler(a__ )
# "return" pooler_output
# BertModel
__magic_name__ = (pooler_input, pooler_output) + encoder_outputs[1:]
# "return" bmodel_output
# Dropout and classification
__magic_name__ = bmodel_output[1]
__magic_name__ = self.dropout(a__ )
__magic_name__ = self.classifier(a__ )
return logits, pooled_output
@add_start_docstrings(
"""Bert Model (with early exiting - DeeBERT) with a classifier on top,
also takes care of multi-layer training. """ ,__a ,)
class _SCREAMING_SNAKE_CASE ( __a ):
def __init__( self : Tuple , a__ : int ):
super().__init__(a__ )
__magic_name__ = config.num_labels
__magic_name__ = config.num_hidden_layers
__magic_name__ = DeeBertModel(a__ )
__magic_name__ = nn.Dropout(config.hidden_dropout_prob )
__magic_name__ = nn.Linear(config.hidden_size , self.config.num_labels )
self.init_weights()
@add_start_docstrings_to_model_forward(a__ )
def snake_case__ ( self : str , a__ : Optional[Any]=None , a__ : Union[str, Any]=None , a__ : Union[str, Any]=None , a__ : Dict=None , a__ : str=None , a__ : List[Any]=None , a__ : Optional[int]=None , a__ : int=-1 , a__ : Tuple=False , ):
__magic_name__ = self.num_layers
try:
__magic_name__ = self.bert(
a__ , attention_mask=a__ , token_type_ids=a__ , position_ids=a__ , head_mask=a__ , inputs_embeds=a__ , )
# sequence_output, pooled_output, (hidden_states), (attentions), highway exits
__magic_name__ = outputs[1]
__magic_name__ = self.dropout(a__ )
__magic_name__ = self.classifier(a__ )
__magic_name__ = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
__magic_name__ = e.message
__magic_name__ = e.exit_layer
__magic_name__ = outputs[0]
if not self.training:
__magic_name__ = entropy(a__ )
__magic_name__ = []
__magic_name__ = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
__magic_name__ = MSELoss()
__magic_name__ = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
__magic_name__ = CrossEntropyLoss()
__magic_name__ = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
__magic_name__ = []
for highway_exit in outputs[-1]:
__magic_name__ = highway_exit[0]
if not self.training:
highway_logits_all.append(a__ )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
__magic_name__ = MSELoss()
__magic_name__ = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
__magic_name__ = CrossEntropyLoss()
__magic_name__ = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(a__ )
if train_highway:
__magic_name__ = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
__magic_name__ = (loss,) + outputs
if not self.training:
__magic_name__ = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
__magic_name__ = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
| 432
| 0
|
def _a ( __UpperCamelCase ):
a_ : list[list[int]] = [[0 for _ in range(__UpperCamelCase )] for _ in range(m + 1 )]
for i in range(m + 1 ):
a_ : str = 1
for n in range(m + 1 ):
for k in range(1 , __UpperCamelCase ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
__lowerCamelCase = int(input('''Enter a number: ''').strip())
print(partition(n))
except ValueError:
print('''Please enter a number.''')
else:
try:
__lowerCamelCase = int(sys.argv[1])
print(partition(n))
except ValueError:
print('''Please pass a number.''')
| 478
|
import numpy as np
__lowerCamelCase = [
['''a''', '''b''', '''c''', '''d''', '''e'''],
['''f''', '''g''', '''h''', '''i''', '''k'''],
['''l''', '''m''', '''n''', '''o''', '''p'''],
['''q''', '''r''', '''s''', '''t''', '''u'''],
['''v''', '''w''', '''x''', '''y''', '''z'''],
]
class a__ :
def __init__( self : Union[str, Any] ):
a_ : List[Any] = np.array(lowerCamelCase_ )
def UpperCAmelCase( self : Union[str, Any] , lowerCamelCase_ : str ):
a_ , a_ : Optional[int] = np.where(letter == self.SQUARE )
a_ : Union[str, Any] = np.concatenate([indexa + 1, indexa + 1] )
return indexes
def UpperCAmelCase( self : Tuple , lowerCamelCase_ : int , lowerCamelCase_ : int ):
a_ : str = self.SQUARE[indexa - 1, indexa - 1]
return letter
def UpperCAmelCase( self : Optional[Any] , lowerCamelCase_ : str ):
a_ : List[str] = message.lower()
a_ : str = message.replace(""" """ , """""" )
a_ : Union[str, Any] = message.replace("""j""" , """i""" )
a_ : Optional[Any] = np.empty((2, len(lowerCamelCase_ )) )
for letter_index in range(len(lowerCamelCase_ ) ):
a_ : int = self.letter_to_numbers(message[letter_index] )
a_ : str = numbers[0]
a_ : int = numbers[1]
a_ : int = first_step.reshape(2 * len(lowerCamelCase_ ) )
a_ : Optional[Any] = """"""
for numbers_index in range(len(lowerCamelCase_ ) ):
a_ : Optional[Any] = int(second_step[numbers_index * 2] )
a_ : Optional[Any] = int(second_step[(numbers_index * 2) + 1] )
a_ : Optional[Any] = self.numbers_to_letter(lowerCamelCase_ , lowerCamelCase_ )
a_ : List[str] = encoded_message + letter
return encoded_message
def UpperCAmelCase( self : Tuple , lowerCamelCase_ : str ):
a_ : Optional[Any] = message.lower()
message.replace(""" """ , """""" )
a_ : int = np.empty(2 * len(lowerCamelCase_ ) )
for letter_index in range(len(lowerCamelCase_ ) ):
a_ : str = self.letter_to_numbers(message[letter_index] )
a_ : Optional[int] = numbers[0]
a_ : Optional[Any] = numbers[1]
a_ : Tuple = first_step.reshape((2, len(lowerCamelCase_ )) )
a_ : Optional[int] = """"""
for numbers_index in range(len(lowerCamelCase_ ) ):
a_ : Dict = int(second_step[0, numbers_index] )
a_ : Tuple = int(second_step[1, numbers_index] )
a_ : Tuple = self.numbers_to_letter(lowerCamelCase_ , lowerCamelCase_ )
a_ : Union[str, Any] = decoded_message + letter
return decoded_message
| 478
| 1
|
"""simple docstring"""
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import DiffusionPipeline
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import logging
UpperCamelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
class UpperCamelCase__ ( _lowerCAmelCase ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , ) -> Union[str, Any]:
super().__init__()
self.register_modules(
vae=SCREAMING_SNAKE_CASE__ , text_encoder=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ , unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ , safety_checker=SCREAMING_SNAKE_CASE__ , feature_extractor=SCREAMING_SNAKE_CASE__ , )
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ = "auto" ) -> int:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
A__ = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(SCREAMING_SNAKE_CASE__ )
def snake_case__ ( self ) -> Union[str, Any]:
self.enable_attention_slicing(SCREAMING_SNAKE_CASE__ )
@torch.no_grad()
def __call__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = 512 , SCREAMING_SNAKE_CASE__ = 512 , SCREAMING_SNAKE_CASE__ = 50 , SCREAMING_SNAKE_CASE__ = 7.5 , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = 1 , SCREAMING_SNAKE_CASE__ = 0.0 , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = "pil" , SCREAMING_SNAKE_CASE__ = True , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = 1 , SCREAMING_SNAKE_CASE__ = None , **SCREAMING_SNAKE_CASE__ , ) -> Dict:
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
A__ = 1
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
A__ = len(SCREAMING_SNAKE_CASE__ )
else:
raise ValueError(f"""`prompt` has to be of type `str` or `list` but is {type(SCREAMING_SNAKE_CASE__ )}""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) or callback_steps <= 0)
):
raise ValueError(
f"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
f""" {type(SCREAMING_SNAKE_CASE__ )}.""" )
# get prompt text embeddings
A__ = self.tokenizer(
SCREAMING_SNAKE_CASE__ , padding="max_length" , max_length=self.tokenizer.model_max_length , return_tensors="pt" , )
A__ = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
A__ = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
f""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
A__ = text_input_ids[:, : self.tokenizer.model_max_length]
if text_embeddings is None:
A__ = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
A__ , A__ , A__ = text_embeddings.shape
A__ = text_embeddings.repeat(1 , SCREAMING_SNAKE_CASE__ , 1 )
A__ = text_embeddings.view(bs_embed * num_images_per_prompt , SCREAMING_SNAKE_CASE__ , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
A__ = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
A__ = 42
if negative_prompt is None:
A__ = [""]
elif type(SCREAMING_SNAKE_CASE__ ) is not type(SCREAMING_SNAKE_CASE__ ):
raise TypeError(
f"""`negative_prompt` should be the same type to `prompt`, but got {type(SCREAMING_SNAKE_CASE__ )} !="""
f""" {type(SCREAMING_SNAKE_CASE__ )}.""" )
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
A__ = [negative_prompt]
elif batch_size != len(SCREAMING_SNAKE_CASE__ ):
raise ValueError(
f"""`negative_prompt`: {negative_prompt} has batch size {len(SCREAMING_SNAKE_CASE__ )}, but `prompt`:"""
f""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"""
" the batch size of `prompt`." )
else:
A__ = negative_prompt
A__ = text_input_ids.shape[-1]
A__ = self.tokenizer(
SCREAMING_SNAKE_CASE__ , padding="max_length" , max_length=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ , return_tensors="pt" , )
A__ = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
A__ = uncond_embeddings.shape[1]
A__ = uncond_embeddings.repeat(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 1 )
A__ = uncond_embeddings.view(batch_size * num_images_per_prompt , SCREAMING_SNAKE_CASE__ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
A__ = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
A__ = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
A__ = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64)
A__ = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
A__ = torch.randn(
SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , device="cpu" , dtype=SCREAMING_SNAKE_CASE__ ).to(self.device )
A__ = torch.randn(SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , device="cpu" , dtype=SCREAMING_SNAKE_CASE__ ).to(
self.device )
else:
A__ = torch.randn(
SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , device=self.device , dtype=SCREAMING_SNAKE_CASE__ )
A__ = torch.randn(SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , device=self.device , dtype=SCREAMING_SNAKE_CASE__ )
else:
if latents_reference.shape != latents_shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
A__ = latents_reference.to(self.device )
A__ = latents.to(self.device )
# This is the key part of the pipeline where we
# try to ensure that the generated images w/ the same seed
# but different sizes actually result in similar images
A__ = (latents_shape[3] - latents_shape_reference[3]) // 2
A__ = (latents_shape[2] - latents_shape_reference[2]) // 2
A__ = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx
A__ = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy
A__ = 0 if dx < 0 else dx
A__ = 0 if dy < 0 else dy
A__ = max(-dx , 0 )
A__ = max(-dy , 0 )
# import pdb
# pdb.set_trace()
A__ = latents_reference[:, :, dy : dy + h, dx : dx + w]
# set timesteps
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
A__ = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
A__ = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
A__ = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
A__ = {}
if accepts_eta:
A__ = eta
for i, t in enumerate(self.progress_bar(SCREAMING_SNAKE_CASE__ ) ):
# expand the latents if we are doing classifier free guidance
A__ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
A__ = self.scheduler.scale_model_input(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# predict the noise residual
A__ = self.unet(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , encoder_hidden_states=SCREAMING_SNAKE_CASE__ ).sample
# perform guidance
if do_classifier_free_guidance:
A__ , A__ = noise_pred.chunk(2 )
A__ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
A__ = self.scheduler.step(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
A__ = 1 / 0.1_8_2_1_5 * latents
A__ = self.vae.decode(SCREAMING_SNAKE_CASE__ ).sample
A__ = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
A__ = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if self.safety_checker is not None:
A__ = self.feature_extractor(self.numpy_to_pil(SCREAMING_SNAKE_CASE__ ) , return_tensors="pt" ).to(
self.device )
A__ , A__ = self.safety_checker(
images=SCREAMING_SNAKE_CASE__ , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) )
else:
A__ = None
if output_type == "pil":
A__ = self.numpy_to_pil(SCREAMING_SNAKE_CASE__ )
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=SCREAMING_SNAKE_CASE__ , nsfw_content_detected=SCREAMING_SNAKE_CASE__ )
| 104
|
'''simple docstring'''
import argparse
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
CLIPTokenizer,
CLIPTokenizerFast,
VideoMAEImageProcessor,
XCLIPConfig,
XCLIPModel,
XCLIPProcessor,
XCLIPTextConfig,
XCLIPVisionConfig,
)
def __lowercase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
__a = XCLIPTextConfig()
# derive patch size from model name
__a = model_name.find("""patch""" )
__a = int(model_name[start_idx + len("""patch""" ) : start_idx + len("""patch""" ) + 2] )
__a = XCLIPVisionConfig(patch_size=__SCREAMING_SNAKE_CASE , num_frames=__SCREAMING_SNAKE_CASE )
if "large" in model_name:
__a = 768
__a = 3072
__a = 12
__a = 1024
__a = 4096
__a = 16
__a = 24
__a = 768
__a = 3072
if model_name == "xclip-large-patch14-16-frames":
__a = 336
__a = XCLIPConfig.from_text_vision_configs(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if "large" in model_name:
__a = 768
return config
def __lowercase ( __SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
if name == "token_embedding.weight":
__a = name.replace("""token_embedding.weight""" , """text_model.embeddings.token_embedding.weight""" )
if name == "positional_embedding":
__a = name.replace("""positional_embedding""" , """text_model.embeddings.position_embedding.weight""" )
if "ln_1" in name:
__a = name.replace("""ln_1""" , """layer_norm1""" )
if "ln_2" in name:
__a = name.replace("""ln_2""" , """layer_norm2""" )
if "c_fc" in name:
__a = name.replace("""c_fc""" , """fc1""" )
if "c_proj" in name:
__a = name.replace("""c_proj""" , """fc2""" )
if name.startswith("""transformer.resblocks""" ):
__a = name.replace("""transformer.resblocks""" , """text_model.encoder.layers""" )
if "attn.out_proj" in name and "message" not in name:
__a = name.replace("""attn.out_proj""" , """self_attn.out_proj""" )
if "ln_final" in name:
__a = name.replace("""ln_final""" , """text_model.final_layer_norm""" )
# visual encoder
if name == "visual.class_embedding":
__a = name.replace("""visual.class_embedding""" , """vision_model.embeddings.class_embedding""" )
if name == "visual.positional_embedding":
__a = name.replace("""visual.positional_embedding""" , """vision_model.embeddings.position_embedding.weight""" )
if name.startswith("""visual.transformer.resblocks""" ):
__a = name.replace("""visual.transformer.resblocks""" , """vision_model.encoder.layers""" )
if "visual.conv1" in name:
__a = name.replace("""visual.conv1""" , """vision_model.embeddings.patch_embedding""" )
if "visual.ln_pre" in name:
__a = name.replace("""visual.ln_pre""" , """vision_model.pre_layernorm""" )
if "visual.ln_post" in name:
__a = name.replace("""visual.ln_post""" , """vision_model.post_layernorm""" )
if "visual.proj" in name:
__a = name.replace("""visual.proj""" , """visual_projection.weight""" )
if "text_projection" in name:
__a = name.replace("""text_projection""" , """text_projection.weight""" )
# things on top
if "prompts_visual_proj" in name:
__a = name.replace("""prompts_visual_proj""" , """prompts_visual_projection""" )
if "prompts_visual_ln" in name:
__a = name.replace("""prompts_visual_ln""" , """prompts_visual_layernorm""" )
# mit
if name == "mit.positional_embedding":
__a = name.replace("""positional""" , """position""" )
if name.startswith("""mit.resblocks""" ):
__a = name.replace("""mit.resblocks""" , """mit.encoder.layers""" )
# prompts generator
if name.startswith("""prompts_generator.norm""" ):
__a = name.replace("""prompts_generator.norm""" , """prompts_generator.layernorm""" )
return name
def __lowercase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
__a = orig_state_dict.pop(__SCREAMING_SNAKE_CASE )
if "attn.in_proj" in key:
__a = key.split(""".""" )
if key.startswith("""visual""" ):
__a = key_split[3]
__a = config.vision_config.hidden_size
if "message_attn" in key:
if "weight" in key:
__a = val[
:dim, :
]
__a = val[
dim : dim * 2, :
]
__a = val[
-dim:, :
]
else:
__a = val[
:dim
]
__a = val[
dim : dim * 2
]
__a = val[
-dim:
]
else:
if "weight" in key:
__a = val[
:dim, :
]
__a = val[
dim : dim * 2, :
]
__a = val[
-dim:, :
]
else:
__a = val[:dim]
__a = val[
dim : dim * 2
]
__a = val[-dim:]
elif key.startswith("""mit""" ):
__a = key_split[2]
__a = config.vision_config.mit_hidden_size
if "weight" in key:
__a = val[:dim, :]
__a = val[dim : dim * 2, :]
__a = val[-dim:, :]
else:
__a = val[:dim]
__a = val[dim : dim * 2]
__a = val[-dim:]
else:
__a = key_split[2]
__a = config.text_config.hidden_size
if "weight" in key:
__a = val[:dim, :]
__a = val[
dim : dim * 2, :
]
__a = val[-dim:, :]
else:
__a = val[:dim]
__a = val[
dim : dim * 2
]
__a = val[-dim:]
else:
__a = rename_key(__SCREAMING_SNAKE_CASE )
if new_key_name in ["visual_projection.weight", "text_projection.weight"]:
__a = val.T
__a = val
return orig_state_dict
def __lowercase ( __SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
if num_frames == 8:
__a = """eating_spaghetti_8_frames.npy"""
elif num_frames == 16:
__a = """eating_spaghetti.npy"""
elif num_frames == 32:
__a = """eating_spaghetti_32_frames.npy"""
__a = hf_hub_download(
repo_id="""hf-internal-testing/spaghetti-video""" , filename=__SCREAMING_SNAKE_CASE , repo_type="""dataset""" , )
__a = np.load(__SCREAMING_SNAKE_CASE )
return list(__SCREAMING_SNAKE_CASE )
def __lowercase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=False ) -> Union[str, Any]:
"""simple docstring"""
__a = {
# fully supervised kinetics-400 checkpoints
"""xclip-base-patch32""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth""",
"""xclip-base-patch32-16-frames""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth"""
),
"""xclip-base-patch16""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth""",
"""xclip-base-patch16-16-frames""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth"""
),
"""xclip-large-patch14""": """https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&export=download&confirm=t&uuid=b26caedc-88e2-473e-830a-9d158b653cdb""",
"""xclip-large-patch14-16-frames""": """https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&export=download&confirm=t&uuid=538fa810-e671-4050-b385-9a623f89804f""",
# fully supervised kinetics-600 checkpoints
"""xclip-base-patch16-kinetics-600""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth"""
),
"""xclip-base-patch16-kinetics-600-16-frames""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth"""
),
"""xclip-large-patch14-kinetics-600""": """https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&export=download&confirm=t&uuid=141d4977-4a65-44ae-864f-4b0c19f838be""",
# few shot
"""xclip-base-patch16-hmdb-2-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth"""
),
"""xclip-base-patch16-hmdb-4-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth"""
),
"""xclip-base-patch16-hmdb-8-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth"""
),
"""xclip-base-patch16-hmdb-16-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth"""
),
"""xclip-base-patch16-ucf-2-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth"""
),
"""xclip-base-patch16-ucf-4-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth"""
),
"""xclip-base-patch16-ucf-8-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth"""
),
"""xclip-base-patch16-ucf-16-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth"""
),
# zero shot
"""xclip-base-patch16-zero-shot""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth""",
}
__a = model_to_url[model_name]
__a = 8
if "16-frames" in model_name:
__a = 16
elif "shot" in model_name:
__a = 32
__a = get_xclip_config(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__a = XCLIPModel(__SCREAMING_SNAKE_CASE )
model.eval()
if "drive" in checkpoint_url:
__a = """pytorch_model.bin"""
gdown.cached_download(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , quiet=__SCREAMING_SNAKE_CASE )
__a = torch.load(__SCREAMING_SNAKE_CASE , map_location="""cpu""" )["""model"""]
else:
__a = torch.hub.load_state_dict_from_url(__SCREAMING_SNAKE_CASE )["""model"""]
__a = convert_state_dict(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__a = XCLIPModel(__SCREAMING_SNAKE_CASE )
__a , __a = model.load_state_dict(__SCREAMING_SNAKE_CASE , strict=__SCREAMING_SNAKE_CASE )
assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"]
model.eval()
__a = 336 if model_name == """xclip-large-patch14-16-frames""" else 224
__a = VideoMAEImageProcessor(size=__SCREAMING_SNAKE_CASE )
__a = CLIPTokenizer.from_pretrained("""openai/clip-vit-base-patch32""" )
__a = CLIPTokenizerFast.from_pretrained("""openai/clip-vit-base-patch32""" )
__a = XCLIPProcessor(image_processor=__SCREAMING_SNAKE_CASE , tokenizer=__SCREAMING_SNAKE_CASE )
__a = prepare_video(__SCREAMING_SNAKE_CASE )
__a = processor(
text=["""playing sports""", """eating spaghetti""", """go shopping"""] , videos=__SCREAMING_SNAKE_CASE , return_tensors="""pt""" , padding=__SCREAMING_SNAKE_CASE )
print("""Shape of pixel values:""" , inputs.pixel_values.shape )
with torch.no_grad():
__a = model(**__SCREAMING_SNAKE_CASE )
# Verify outputs
__a = outputs.logits_per_video
__a = logits_per_video.softmax(dim=1 )
print("""Probs:""" , __SCREAMING_SNAKE_CASE )
# kinetics-400
if model_name == "xclip-base-patch32":
__a = torch.tensor([[0.0_019, 0.9_951, 0.0_030]] )
elif model_name == "xclip-base-patch32-16-frames":
__a = torch.tensor([[7.0999E-04, 9.9883E-01, 4.5580E-04]] )
elif model_name == "xclip-base-patch16":
__a = torch.tensor([[0.0_083, 0.9_681, 0.0_236]] )
elif model_name == "xclip-base-patch16-16-frames":
__a = torch.tensor([[7.6937E-04, 9.9728E-01, 1.9473E-03]] )
elif model_name == "xclip-large-patch14":
__a = torch.tensor([[0.0_062, 0.9_864, 0.0_075]] )
elif model_name == "xclip-large-patch14-16-frames":
__a = torch.tensor([[3.3877E-04, 9.9937E-01, 2.8888E-04]] )
# kinetics-600
elif model_name == "xclip-base-patch16-kinetics-600":
__a = torch.tensor([[0.0_555, 0.8_914, 0.0_531]] )
elif model_name == "xclip-base-patch16-kinetics-600-16-frames":
__a = torch.tensor([[3.8554E-04, 9.9929E-01, 3.2754E-04]] )
elif model_name == "xclip-large-patch14-kinetics-600":
__a = torch.tensor([[0.0_036, 0.9_920, 0.0_045]] )
# few shot
elif model_name == "xclip-base-patch16-hmdb-2-shot":
__a = torch.tensor([[7.1890E-06, 9.9994E-01, 5.6559E-05]] )
elif model_name == "xclip-base-patch16-hmdb-4-shot":
__a = torch.tensor([[1.0320E-05, 9.9993E-01, 6.2435E-05]] )
elif model_name == "xclip-base-patch16-hmdb-8-shot":
__a = torch.tensor([[4.1377E-06, 9.9990E-01, 9.8386E-05]] )
elif model_name == "xclip-base-patch16-hmdb-16-shot":
__a = torch.tensor([[4.1347E-05, 9.9962E-01, 3.3411E-04]] )
elif model_name == "xclip-base-patch16-ucf-2-shot":
__a = torch.tensor([[8.5857E-05, 9.9928E-01, 6.3291E-04]] )
elif model_name == "xclip-base-patch16-ucf-4-shot":
__a = torch.tensor([[8.5857E-05, 9.9928E-01, 6.3291E-04]] )
elif model_name == "xclip-base-patch16-ucf-8-shot":
__a = torch.tensor([[0.0_027, 0.9_904, 0.0_070]] )
elif model_name == "xclip-base-patch16-ucf-16-shot":
__a = torch.tensor([[9.8219E-04, 9.9593E-01, 3.0863E-03]] )
# zero shot
elif model_name == "xclip-base-patch16-zero-shot":
__a = torch.tensor([[3.5082E-04, 9.9785E-01, 1.7966E-03]] )
else:
raise ValueError(F'''Model name {model_name} not supported''' )
assert torch.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1E-3 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__SCREAMING_SNAKE_CASE )
if push_to_hub:
print("""Pushing model, processor and slow tokenizer files to the hub...""" )
model.push_to_hub(__SCREAMING_SNAKE_CASE , organization="""nielsr""" )
processor.push_to_hub(__SCREAMING_SNAKE_CASE , organization="""nielsr""" )
slow_tokenizer.push_to_hub(__SCREAMING_SNAKE_CASE , organization="""nielsr""" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='xclip-base-patch32',
type=str,
help='Name of the model.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
SCREAMING_SNAKE_CASE_ = parser.parse_args()
convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 582
| 0
|
'''simple docstring'''
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
SCREAMING_SNAKE_CASE_ = {
'169M': 12,
'430M': 24,
'1B5': 24,
'3B': 32,
'7B': 32,
'14B': 40,
}
SCREAMING_SNAKE_CASE_ = {
'169M': 7_68,
'430M': 10_24,
'1B5': 20_48,
'3B': 25_60,
'7B': 40_96,
'14B': 51_20,
}
def __lowercase ( __SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
__a = list(state_dict.keys() )
for name in state_dict_keys:
__a = state_dict.pop(__SCREAMING_SNAKE_CASE )
# emb -> embedding
if name.startswith("""emb.""" ):
__a = name.replace("""emb.""" , """embeddings.""" )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith("""blocks.0.ln0""" ):
__a = name.replace("""blocks.0.ln0""" , """blocks.0.pre_ln""" )
# att -> attention
__a = re.sub(r"""blocks\.(\d+)\.att""" , r"""blocks.\1.attention""" , __SCREAMING_SNAKE_CASE )
# ffn -> feed_forward
__a = re.sub(r"""blocks\.(\d+)\.ffn""" , r"""blocks.\1.feed_forward""" , __SCREAMING_SNAKE_CASE )
# time_mix_k -> time_mix_key and reshape
if name.endswith(""".time_mix_k""" ):
__a = name.replace(""".time_mix_k""" , """.time_mix_key""" )
# time_mix_v -> time_mix_value and reshape
if name.endswith(""".time_mix_v""" ):
__a = name.replace(""".time_mix_v""" , """.time_mix_value""" )
# time_mix_r -> time_mix_key and reshape
if name.endswith(""".time_mix_r""" ):
__a = name.replace(""".time_mix_r""" , """.time_mix_receptance""" )
if name != "head.weight":
__a = """rwkv.""" + name
__a = weight
return state_dict
def __lowercase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=None ) -> Tuple:
"""simple docstring"""
if tokenizer_file is None:
print("""No `--tokenizer_file` provided, we will use the default tokenizer.""" )
__a = 5_0277
__a = AutoTokenizer.from_pretrained("""EleutherAI/gpt-neox-20b""" )
else:
__a = PreTrainedTokenizerFast(tokenizer_file=__SCREAMING_SNAKE_CASE )
__a = len(__SCREAMING_SNAKE_CASE )
tokenizer.save_pretrained(__SCREAMING_SNAKE_CASE )
# 2. Build the config
__a = list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
__a = candidate
break
if size is None:
raise ValueError("""Could not infer the size, please provide it with the `--size` argument.""" )
if size not in possible_sizes:
raise ValueError(F'''`size` should be one of {possible_sizes}, got {size}.''' )
__a = RwkvConfig(
vocab_size=__SCREAMING_SNAKE_CASE , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(__SCREAMING_SNAKE_CASE )
# 3. Download model file then convert state_dict
__a = hf_hub_download(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__a = torch.load(__SCREAMING_SNAKE_CASE , map_location="""cpu""" )
__a = convert_state_dict(__SCREAMING_SNAKE_CASE )
# 4. Split in shards and save
__a , __a = shard_checkpoint(__SCREAMING_SNAKE_CASE )
for shard_file, shard in shards.items():
torch.save(__SCREAMING_SNAKE_CASE , os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
if index is not None:
__a = os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Save the index as well
with open(__SCREAMING_SNAKE_CASE , """w""" , encoding="""utf-8""" ) as f:
__a = json.dumps(__SCREAMING_SNAKE_CASE , indent=2 , sort_keys=__SCREAMING_SNAKE_CASE ) + """\n"""
f.write(__SCREAMING_SNAKE_CASE )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
"""Cleaning up shards. This may error with an OOM error, it this is the case don't worry you still have converted the model.""" )
__a = list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
__a = torch.load(os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError("""Please provide a `model_name` to push the model to the Hub.""" )
__a = AutoModelForCausalLM.from_pretrained(__SCREAMING_SNAKE_CASE )
model.push_to_hub(__SCREAMING_SNAKE_CASE , max_shard_size="""2GB""" )
tokenizer.push_to_hub(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--repo_id', default=None, type=str, required=True, help='Repo ID from which to pull the checkpoint.'
)
parser.add_argument(
'--checkpoint_file', default=None, type=str, required=True, help='Name of the checkpoint file in the repo.'
)
parser.add_argument(
'--output_dir', default=None, type=str, required=True, help='Where to save the converted model.'
)
parser.add_argument(
'--tokenizer_file',
default=None,
type=str,
help='Path to the tokenizer file to use (if not provided, only the model is converted).',
)
parser.add_argument(
'--size',
default=None,
type=str,
help='Size of the model. Will be inferred from the `checkpoint_file` if not passed.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Push to the Hub the converted model.',
)
parser.add_argument(
'--model_name',
default=None,
type=str,
help='Name of the pushed model on the Hub, including the username / organization.',
)
SCREAMING_SNAKE_CASE_ = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 201
|
'''simple docstring'''
from __future__ import annotations
import os
from typing import Any
import requests
SCREAMING_SNAKE_CASE_ = 'https://api.github.com'
# https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user
SCREAMING_SNAKE_CASE_ = BASE_URL + '/user'
# https://github.com/settings/tokens
SCREAMING_SNAKE_CASE_ = os.environ.get('USER_TOKEN', '')
def __lowercase ( __SCREAMING_SNAKE_CASE ) -> dict[Any, Any]:
"""simple docstring"""
__a = {
"""Authorization""": F'''token {auth_token}''',
"""Accept""": """application/vnd.github.v3+json""",
}
return requests.get(__SCREAMING_SNAKE_CASE , headers=__SCREAMING_SNAKE_CASE ).json()
if __name__ == "__main__": # pragma: no cover
if USER_TOKEN:
for key, value in fetch_github_info(USER_TOKEN).items():
print(f"""{key}: {value}""")
else:
raise ValueError('\'USER_TOKEN\' field cannot be empty.')
| 201
| 1
|
from __future__ import annotations
import unittest
from transformers import DistilBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.distilbert.modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertModel,
)
class UpperCamelCase :
'''simple docstring'''
def __init__( self , UpperCamelCase_ , ):
lowercase_ :Dict = parent
lowercase_ :Union[str, Any] = 13
lowercase_ :Union[str, Any] = 7
lowercase_ :Any = True
lowercase_ :Tuple = True
lowercase_ :Optional[Any] = False
lowercase_ :Optional[Any] = True
lowercase_ :Optional[Any] = 99
lowercase_ :Optional[Any] = 32
lowercase_ :Any = 2
lowercase_ :str = 4
lowercase_ :Dict = 37
lowercase_ :str = '''gelu'''
lowercase_ :List[str] = 0.1
lowercase_ :List[str] = 0.1
lowercase_ :Optional[int] = 512
lowercase_ :int = 16
lowercase_ :Any = 2
lowercase_ :List[Any] = 0.02
lowercase_ :Tuple = 3
lowercase_ :Any = 4
lowercase_ :List[str] = None
def UpperCamelCase ( self ):
lowercase_ :Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase_ :Dict = None
if self.use_input_mask:
lowercase_ :Tuple = random_attention_mask([self.batch_size, self.seq_length] )
lowercase_ :Optional[Any] = None
lowercase_ :Dict = None
lowercase_ :str = None
if self.use_labels:
lowercase_ :List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase_ :List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase_ :Optional[int] = ids_tensor([self.batch_size] , self.num_choices )
lowercase_ :Any = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
lowercase_ :Union[str, Any] = TFDistilBertModel(config=_A )
lowercase_ :Optional[int] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
lowercase_ :List[str] = model(_A )
lowercase_ :Any = [input_ids, input_mask]
lowercase_ :Union[str, Any] = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
lowercase_ :Union[str, Any] = TFDistilBertForMaskedLM(config=_A )
lowercase_ :Union[str, Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
lowercase_ :Dict = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
lowercase_ :Union[str, Any] = TFDistilBertForQuestionAnswering(config=_A )
lowercase_ :Union[str, Any] = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
}
lowercase_ :Optional[int] = model(_A )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
lowercase_ :Tuple = self.num_labels
lowercase_ :List[Any] = TFDistilBertForSequenceClassification(_A )
lowercase_ :Dict = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
lowercase_ :Optional[int] = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
lowercase_ :int = self.num_choices
lowercase_ :str = TFDistilBertForMultipleChoice(_A )
lowercase_ :Union[str, Any] = tf.tile(tf.expand_dims(_A , 1 ) , (1, self.num_choices, 1) )
lowercase_ :int = tf.tile(tf.expand_dims(_A , 1 ) , (1, self.num_choices, 1) )
lowercase_ :List[str] = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
}
lowercase_ :Union[str, Any] = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
lowercase_ :List[str] = self.num_labels
lowercase_ :int = TFDistilBertForTokenClassification(_A )
lowercase_ :List[str] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
lowercase_ :int = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase ( self ):
lowercase_ :Any = self.prepare_config_and_inputs()
((lowercase_) , (lowercase_) , (lowercase_) , (lowercase_) , (lowercase_) , (lowercase_)) :Tuple = config_and_inputs
lowercase_ :Optional[int] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class UpperCamelCase ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
'''simple docstring'''
lowercase : List[str] =(
(
TFDistilBertModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertForMultipleChoice,
)
if is_tf_available()
else None
)
lowercase : Tuple =(
{
'''feature-extraction''': TFDistilBertModel,
'''fill-mask''': TFDistilBertForMaskedLM,
'''question-answering''': TFDistilBertForQuestionAnswering,
'''text-classification''': TFDistilBertForSequenceClassification,
'''token-classification''': TFDistilBertForTokenClassification,
'''zero-shot''': TFDistilBertForSequenceClassification,
}
if is_tf_available()
else {}
)
lowercase : str =False
lowercase : str =False
def UpperCamelCase ( self ):
lowercase_ :Tuple = TFDistilBertModelTester(self )
lowercase_ :int = ConfigTester(self , config_class=_A , dim=37 )
def UpperCamelCase ( self ):
self.config_tester.run_common_tests()
def UpperCamelCase ( self ):
lowercase_ :Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*_A )
def UpperCamelCase ( self ):
lowercase_ :int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*_A )
def UpperCamelCase ( self ):
lowercase_ :Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*_A )
def UpperCamelCase ( self ):
lowercase_ :Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*_A )
def UpperCamelCase ( self ):
lowercase_ :List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*_A )
def UpperCamelCase ( self ):
lowercase_ :List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*_A )
@slow
def UpperCamelCase ( self ):
for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ):
lowercase_ :List[str] = TFDistilBertModel.from_pretrained(_A )
self.assertIsNotNone(_A )
@require_tf
class UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCamelCase ( self ):
lowercase_ :str = TFDistilBertModel.from_pretrained('''distilbert-base-uncased''' )
lowercase_ :int = tf.constant([[0, 1, 2, 3, 4, 5]] )
lowercase_ :Optional[int] = model(_A )[0]
lowercase_ :Optional[int] = [1, 6, 768]
self.assertEqual(output.shape , _A )
lowercase_ :List[str] = tf.constant(
[
[
[0.1926_1885, -0.1373_2955, 0.411_9799],
[0.2215_0156, -0.0742_2661, 0.3903_7204],
[0.2275_6018, -0.089_6414, 0.370_1467],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , _A , atol=1E-4 )
| 257
|
# flake8: noqa
# Lint as: python3
from typing import Dict, List, Optional, Type
from .. import config
from ..utils import logging
from .formatting import (
ArrowFormatter,
CustomFormatter,
Formatter,
PandasFormatter,
PythonFormatter,
TensorFormatter,
format_table,
query_table,
)
from .np_formatter import NumpyFormatter
lowerCAmelCase__ : Tuple =logging.get_logger(__name__)
lowerCAmelCase__ : Dict[Optional[str], Type[Formatter]] ={}
lowerCAmelCase__ : Dict[Optional[str], str] ={}
lowerCAmelCase__ : Dict[Optional[str], Exception] ={}
def __lowercase ( a__ , a__ , a__ = None , ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE = aliases if aliases is not None else []
if format_type in _FORMAT_TYPES:
logger.warning(
f"""Overwriting format type '{format_type}' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})""" )
__SCREAMING_SNAKE_CASE = formatter_cls
for alias in set(aliases + [format_type] ):
if alias in _FORMAT_TYPES_ALIASES:
logger.warning(
f"""Overwriting format type alias '{alias}' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})""" )
__SCREAMING_SNAKE_CASE = format_type
def __lowercase ( a__ , a__ , a__ = None ) -> List[str]:
__SCREAMING_SNAKE_CASE = aliases if aliases is not None else []
for alias in set(aliases + [format_type] ):
__SCREAMING_SNAKE_CASE = unavailable_error
# Here we define all the available formatting functions that can be used by `Dataset.set_format`
_register_formatter(PythonFormatter, None, aliases=['''python'''])
_register_formatter(ArrowFormatter, '''arrow''', aliases=['''pa''', '''pyarrow'''])
_register_formatter(NumpyFormatter, '''numpy''', aliases=['''np'''])
_register_formatter(PandasFormatter, '''pandas''', aliases=['''pd'''])
_register_formatter(CustomFormatter, '''custom''')
if config.TORCH_AVAILABLE:
from .torch_formatter import TorchFormatter
_register_formatter(TorchFormatter, '''torch''', aliases=['''pt''', '''pytorch'''])
else:
lowerCAmelCase__ : List[Any] =ValueError('''PyTorch needs to be installed to be able to return PyTorch tensors.''')
_register_unavailable_formatter(_torch_error, '''torch''', aliases=['''pt''', '''pytorch'''])
if config.TF_AVAILABLE:
from .tf_formatter import TFFormatter
_register_formatter(TFFormatter, '''tensorflow''', aliases=['''tf'''])
else:
lowerCAmelCase__ : Optional[Any] =ValueError('''Tensorflow needs to be installed to be able to return Tensorflow tensors.''')
_register_unavailable_formatter(_tf_error, '''tensorflow''', aliases=['''tf'''])
if config.JAX_AVAILABLE:
from .jax_formatter import JaxFormatter
_register_formatter(JaxFormatter, '''jax''', aliases=[])
else:
lowerCAmelCase__ : Optional[int] =ValueError('''JAX needs to be installed to be able to return JAX arrays.''')
_register_unavailable_formatter(_jax_error, '''jax''', aliases=[])
def __lowercase ( a__ ) -> Optional[str]:
if format_type in _FORMAT_TYPES_ALIASES:
return _FORMAT_TYPES_ALIASES[format_type]
else:
return format_type
def __lowercase ( a__ , **a__ ) -> Formatter:
__SCREAMING_SNAKE_CASE = get_format_type_from_alias(a__ )
if format_type in _FORMAT_TYPES:
return _FORMAT_TYPES[format_type](**a__ )
if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE:
raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type]
else:
raise ValueError(
f"""Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got '{format_type}'""" )
| 148
| 0
|
import itertools
from dataclasses import dataclass
from typing import Optional
import pandas as pd
import pyarrow as pa
import datasets
from datasets.table import table_cast
@dataclass
class lowerCamelCase__ ( datasets.BuilderConfig ):
"""simple docstring"""
_UpperCamelCase : Optional[datasets.Features] = None
class lowerCamelCase__ ( datasets.ArrowBasedBuilder ):
"""simple docstring"""
_UpperCamelCase : Optional[Any] = PandasConfig
def snake_case__ ( self ):
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features )
def snake_case__ ( self , snake_case ):
'''simple docstring'''
if not self.config.data_files:
raise ValueError(F'''At least one data file must be specified, but got data_files={self.config.data_files}''' )
UpperCamelCase__ = dl_manager.download_and_extract(self.config.data_files )
if isinstance(snake_case , (str, list, tuple) ):
UpperCamelCase__ = data_files
if isinstance(snake_case , snake_case ):
UpperCamelCase__ = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
UpperCamelCase__ = [dl_manager.iter_files(snake_case ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )]
UpperCamelCase__ = []
for split_name, files in data_files.items():
if isinstance(snake_case , snake_case ):
UpperCamelCase__ = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
UpperCamelCase__ = [dl_manager.iter_files(snake_case ) for file in files]
splits.append(datasets.SplitGenerator(name=snake_case , gen_kwargs={"files": files} ) )
return splits
def snake_case__ ( self , snake_case ):
'''simple docstring'''
if self.config.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
UpperCamelCase__ = table_cast(snake_case , self.config.features.arrow_schema )
return pa_table
def snake_case__ ( self , snake_case ):
'''simple docstring'''
for i, file in enumerate(itertools.chain.from_iterable(snake_case ) ):
with open(snake_case , "rb" ) as f:
UpperCamelCase__ = pa.Table.from_pandas(pd.read_pickle(snake_case ) )
yield i, self._cast_table(snake_case )
| 185
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {
'facebook/data2vec-text-base': 'https://huggingface.co/data2vec/resolve/main/config.json',
}
class lowerCamelCase__ ( UpperCAmelCase ):
"""simple docstring"""
_UpperCamelCase : int = 'data2vec-text'
def __init__( self , snake_case=30522 , snake_case=768 , snake_case=12 , snake_case=12 , snake_case=3072 , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=512 , snake_case=2 , snake_case=0.02 , snake_case=1E-1_2 , snake_case=1 , snake_case=0 , snake_case=2 , snake_case="absolute" , snake_case=True , snake_case=None , **snake_case , ):
'''simple docstring'''
super().__init__(pad_token_id=snake_case , bos_token_id=snake_case , eos_token_id=snake_case , **snake_case )
UpperCamelCase__ = vocab_size
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = hidden_act
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = type_vocab_size
UpperCamelCase__ = initializer_range
UpperCamelCase__ = layer_norm_eps
UpperCamelCase__ = position_embedding_type
UpperCamelCase__ = use_cache
UpperCamelCase__ = classifier_dropout
class lowerCamelCase__ ( UpperCAmelCase ):
"""simple docstring"""
@property
def snake_case__ ( self ):
'''simple docstring'''
if self.task == "multiple-choice":
UpperCamelCase__ = {0: "batch", 1: "choice", 2: "sequence"}
else:
UpperCamelCase__ = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 185
| 1
|
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Any , lowerCAmelCase : Any , lowerCAmelCase : Optional[Any]=7 , lowerCAmelCase : Optional[Any]=3 , lowerCAmelCase : Optional[Any]=30 , lowerCAmelCase : Union[str, Any]=4_00 , lowerCAmelCase : int=True , lowerCAmelCase : Union[str, Any]=None , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : List[str]=[0.5, 0.5, 0.5] , lowerCAmelCase : List[str]=[0.5, 0.5, 0.5] , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : Optional[int]=1 / 2_55 , lowerCAmelCase : Optional[Any]=True , ):
A_ = size if size is not None else {"shortest_edge": 18, "longest_edge": 13_33}
A_ = parent
A_ = batch_size
A_ = num_channels
A_ = min_resolution
A_ = max_resolution
A_ = do_resize
A_ = size
A_ = do_normalize
A_ = image_mean
A_ = image_std
A_ = do_rescale
A_ = rescale_factor
A_ = do_pad
def _UpperCAmelCase ( self : Dict ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def _UpperCAmelCase ( self : int , lowerCAmelCase : Optional[int] , lowerCAmelCase : Tuple=False ):
if not batched:
A_ = image_inputs[0]
if isinstance(_UpperCamelCase , Image.Image ):
A_ = image.size
else:
A_ = image.shape[1], image.shape[2]
if w < h:
A_ = int(self.size["shortest_edge"] * h / w )
A_ = self.size["shortest_edge"]
elif w > h:
A_ = self.size["shortest_edge"]
A_ = int(self.size["shortest_edge"] * w / h )
else:
A_ = self.size["shortest_edge"]
A_ = self.size["shortest_edge"]
else:
A_ = []
for image in image_inputs:
A_ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
A_ = max(_UpperCamelCase , key=lambda lowerCAmelCase : item[0] )[0]
A_ = max(_UpperCamelCase , key=lambda lowerCAmelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class __lowerCAmelCase ( lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase : int =DetaImageProcessor if is_vision_available() else None
def _UpperCAmelCase ( self : Union[str, Any] ):
A_ = DetaImageProcessingTester(self )
@property
def _UpperCAmelCase ( self : Union[str, Any] ):
return self.image_processor_tester.prepare_image_processor_dict()
def _UpperCAmelCase ( self : Any ):
A_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCamelCase , "image_mean" ) )
self.assertTrue(hasattr(_UpperCamelCase , "image_std" ) )
self.assertTrue(hasattr(_UpperCamelCase , "do_normalize" ) )
self.assertTrue(hasattr(_UpperCamelCase , "do_resize" ) )
self.assertTrue(hasattr(_UpperCamelCase , "do_rescale" ) )
self.assertTrue(hasattr(_UpperCamelCase , "do_pad" ) )
self.assertTrue(hasattr(_UpperCamelCase , "size" ) )
def _UpperCAmelCase ( self : List[str] ):
A_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 13_33} )
self.assertEqual(image_processor.do_pad , _UpperCamelCase )
def _UpperCAmelCase ( self : str ):
pass
def _UpperCAmelCase ( self : Dict ):
A_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase , Image.Image )
# Test not batched input
A_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
A_ = self.image_processor_tester.get_expected_values(_UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A_ = self.image_processor_tester.get_expected_values(_UpperCamelCase , batched=_UpperCamelCase )
A_ = image_processing(_UpperCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _UpperCAmelCase ( self : List[Any] ):
A_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase , numpify=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase , np.ndarray )
# Test not batched input
A_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
A_ = self.image_processor_tester.get_expected_values(_UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A_ = image_processing(_UpperCamelCase , return_tensors="pt" ).pixel_values
A_ = self.image_processor_tester.get_expected_values(_UpperCamelCase , batched=_UpperCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _UpperCAmelCase ( self : List[Any] ):
A_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase , torchify=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase , torch.Tensor )
# Test not batched input
A_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
A_ = self.image_processor_tester.get_expected_values(_UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A_ = image_processing(_UpperCamelCase , return_tensors="pt" ).pixel_values
A_ = self.image_processor_tester.get_expected_values(_UpperCamelCase , batched=_UpperCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def _UpperCAmelCase ( self : Tuple ):
A_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
A_ = json.loads(f.read() )
A_ = {"image_id": 3_97_69, "annotations": target}
# encode them
A_ = DetaImageProcessor()
A_ = image_processing(images=_UpperCamelCase , annotations=_UpperCamelCase , return_tensors="pt" )
# verify pixel values
A_ = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["pixel_values"].shape , _UpperCamelCase )
A_ = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , _UpperCamelCase , atol=1e-4 ) )
# verify area
A_ = torch.tensor([58_87.96_00, 1_12_50.20_61, 48_93_53.84_38, 83_71_22.75_00, 14_79_67.51_56, 16_57_32.34_38] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , _UpperCamelCase ) )
# verify boxes
A_ = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , _UpperCamelCase )
A_ = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , _UpperCamelCase , atol=1e-3 ) )
# verify image_id
A_ = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , _UpperCamelCase ) )
# verify is_crowd
A_ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , _UpperCamelCase ) )
# verify class_labels
A_ = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , _UpperCamelCase ) )
# verify orig_size
A_ = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , _UpperCamelCase ) )
# verify size
A_ = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , _UpperCamelCase ) )
@slow
def _UpperCAmelCase ( self : List[str] ):
A_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
A_ = json.loads(f.read() )
A_ = {"file_name": "000000039769.png", "image_id": 3_97_69, "segments_info": target}
A_ = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
A_ = DetaImageProcessor(format="coco_panoptic" )
A_ = image_processing(images=_UpperCamelCase , annotations=_UpperCamelCase , masks_path=_UpperCamelCase , return_tensors="pt" )
# verify pixel values
A_ = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["pixel_values"].shape , _UpperCamelCase )
A_ = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , _UpperCamelCase , atol=1e-4 ) )
# verify area
A_ = torch.tensor([14_79_79.68_75, 16_55_27.04_69, 48_46_38.59_38, 1_12_92.93_75, 58_79.65_62, 76_34.11_47] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , _UpperCamelCase ) )
# verify boxes
A_ = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , _UpperCamelCase )
A_ = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , _UpperCamelCase , atol=1e-3 ) )
# verify image_id
A_ = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , _UpperCamelCase ) )
# verify is_crowd
A_ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , _UpperCamelCase ) )
# verify class_labels
A_ = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , _UpperCamelCase ) )
# verify masks
A_ = 82_28_73
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , _UpperCamelCase )
# verify orig_size
A_ = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , _UpperCamelCase ) )
# verify size
A_ = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , _UpperCamelCase ) )
| 452
|
'''simple docstring'''
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
def _A ( snake_case ) -> str:
_lowercase : Dict = torch.load(snake_case , map_location="cpu" )
if "model" in sd.keys():
_lowercase : Tuple = torch.load(snake_case , map_location="cpu" )["model"]
# pop unnecessary weights
_lowercase : Any = [
"decoder.version",
"decoder.output_projection.weight",
]
for key in keys_to_delete:
if key in sd:
sd.pop(snake_case )
_lowercase : List[Any] = {
"decoder.project_in_dim.weight": "decoder.project_in.weight",
"decoder.project_out_dim.weight": "decoder.project_out.weight",
"decoder.layer_norm.weight": "decoder.final_layer_norm.weight",
"decoder.layer_norm.bias": "decoder.final_layer_norm.bias",
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
_lowercase : Dict = sd.pop(snake_case )
_lowercase : List[str] = list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
_lowercase : List[Any] = sd[key]
# We split QKV in separate Q,K,V
_lowercase : str = key.replace(".qkv_proj." , ".q_proj." )
_lowercase : List[str] = key.replace(".qkv_proj." , ".k_proj." )
_lowercase : Optional[Any] = key.replace(".qkv_proj." , ".v_proj." )
_lowercase : Union[str, Any] = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
_lowercase , _lowercase , _lowercase : Dict = torch.split(snake_case , depth // 3 , dim=0 )
_lowercase : Optional[int] = q
_lowercase : str = k
_lowercase : List[str] = v
del sd[key]
return sd
@torch.no_grad()
def _A ( snake_case , snake_case , snake_case=None ) -> Any:
_lowercase : Union[str, Any] = load_checkpoint(snake_case )
if config is not None:
_lowercase : Tuple = OPTConfig.from_pretrained(snake_case )
else:
_lowercase : Optional[int] = OPTConfig()
_lowercase : List[Any] = OPTModel(snake_case ).half().eval()
model.load_state_dict(snake_case )
# Check results
Path(snake_case ).mkdir(exist_ok=snake_case )
model.save_pretrained(snake_case )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--fairseq_path',
type=str,
help=(
'path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:'
' https://huggingface.co/models?other=opt_metasq'
),
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--hf_config', default=None, type=str, help='Define HF config.')
_snake_case = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 245
| 0
|
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
def __lowerCAmelCase ( self , _lowerCAmelCase ):
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['''bs'''] , model_result['''ss'''] ):
_lowerCAmelCase = model_result['''result'''][batch_size][sequence_length]
self.assertIsNotNone(_lowerCAmelCase )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = '''sshleifer/tiny-gpt2'''
_lowerCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowerCAmelCase , inference=_lowerCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowerCAmelCase , )
_lowerCAmelCase = PyTorchBenchmark(_lowerCAmelCase )
_lowerCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = '''sgugger/tiny-distilbert-classification'''
_lowerCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowerCAmelCase , inference=_lowerCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowerCAmelCase , only_pretrain_model=_lowerCAmelCase , )
_lowerCAmelCase = PyTorchBenchmark(_lowerCAmelCase )
_lowerCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = '''sshleifer/tiny-gpt2'''
_lowerCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowerCAmelCase , inference=_lowerCAmelCase , torchscript=_lowerCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowerCAmelCase , )
_lowerCAmelCase = PyTorchBenchmark(_lowerCAmelCase )
_lowerCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(torch_device == '''cpu''' , '''Cant do half precision''' )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = '''sshleifer/tiny-gpt2'''
_lowerCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowerCAmelCase , inference=_lowerCAmelCase , fpaa=_lowerCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowerCAmelCase , )
_lowerCAmelCase = PyTorchBenchmark(_lowerCAmelCase )
_lowerCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = '''sshleifer/tiny-gpt2'''
_lowerCAmelCase = AutoConfig.from_pretrained(_lowerCAmelCase )
# set architectures equal to `None`
_lowerCAmelCase = None
_lowerCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowerCAmelCase , inference=_lowerCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowerCAmelCase , )
_lowerCAmelCase = PyTorchBenchmark(_lowerCAmelCase , configs=[config] )
_lowerCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = '''sshleifer/tiny-gpt2'''
_lowerCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowerCAmelCase , inference=_lowerCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowerCAmelCase , )
_lowerCAmelCase = PyTorchBenchmark(_lowerCAmelCase )
_lowerCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
@unittest.skipIf(torch_device == '''cpu''' , '''Can\'t do half precision''' )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = '''sshleifer/tiny-gpt2'''
_lowerCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowerCAmelCase , inference=_lowerCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , fpaa=_lowerCAmelCase , multi_process=_lowerCAmelCase , )
_lowerCAmelCase = PyTorchBenchmark(_lowerCAmelCase )
_lowerCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = '''sshleifer/tiny-gpt2'''
_lowerCAmelCase = AutoConfig.from_pretrained(_lowerCAmelCase )
_lowerCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowerCAmelCase , inference=_lowerCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowerCAmelCase , )
_lowerCAmelCase = PyTorchBenchmark(_lowerCAmelCase , configs=[config] )
_lowerCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = '''sshleifer/tinier_bart'''
_lowerCAmelCase = AutoConfig.from_pretrained(_lowerCAmelCase )
_lowerCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowerCAmelCase , inference=_lowerCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowerCAmelCase , )
_lowerCAmelCase = PyTorchBenchmark(_lowerCAmelCase , configs=[config] )
_lowerCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = '''sshleifer/tiny-gpt2'''
_lowerCAmelCase = AutoConfig.from_pretrained(_lowerCAmelCase )
_lowerCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowerCAmelCase , inference=_lowerCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowerCAmelCase , )
_lowerCAmelCase = PyTorchBenchmark(_lowerCAmelCase , configs=[config] )
_lowerCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = '''sshleifer/tinier_bart'''
_lowerCAmelCase = AutoConfig.from_pretrained(_lowerCAmelCase )
_lowerCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowerCAmelCase , inference=_lowerCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowerCAmelCase , )
_lowerCAmelCase = PyTorchBenchmark(_lowerCAmelCase , configs=[config] )
_lowerCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = '''sshleifer/tiny-gpt2'''
with tempfile.TemporaryDirectory() as tmp_dir:
_lowerCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowerCAmelCase , inference=_lowerCAmelCase , save_to_csv=_lowerCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(_lowerCAmelCase , '''inf_time.csv''' ) , train_memory_csv_file=os.path.join(_lowerCAmelCase , '''train_mem.csv''' ) , inference_memory_csv_file=os.path.join(_lowerCAmelCase , '''inf_mem.csv''' ) , train_time_csv_file=os.path.join(_lowerCAmelCase , '''train_time.csv''' ) , env_info_csv_file=os.path.join(_lowerCAmelCase , '''env.csv''' ) , multi_process=_lowerCAmelCase , )
_lowerCAmelCase = PyTorchBenchmark(_lowerCAmelCase )
benchmark.run()
self.assertTrue(Path(os.path.join(_lowerCAmelCase , '''inf_time.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(_lowerCAmelCase , '''train_time.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(_lowerCAmelCase , '''inf_mem.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(_lowerCAmelCase , '''train_mem.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(_lowerCAmelCase , '''env.csv''' ) ).exists() )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = '''sshleifer/tiny-gpt2'''
def _check_summary_is_not_empty(_lowerCAmelCase ):
self.assertTrue(hasattr(_lowerCAmelCase , '''sequential''' ) )
self.assertTrue(hasattr(_lowerCAmelCase , '''cumulative''' ) )
self.assertTrue(hasattr(_lowerCAmelCase , '''current''' ) )
self.assertTrue(hasattr(_lowerCAmelCase , '''total''' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
_lowerCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowerCAmelCase , inference=_lowerCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(_lowerCAmelCase , '''log.txt''' ) , log_print=_lowerCAmelCase , trace_memory_line_by_line=_lowerCAmelCase , multi_process=_lowerCAmelCase , )
_lowerCAmelCase = PyTorchBenchmark(_lowerCAmelCase )
_lowerCAmelCase = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
_check_summary_is_not_empty(result.train_summary )
self.assertTrue(Path(os.path.join(_lowerCAmelCase , '''log.txt''' ) ).exists() )
| 709
|
import numpy as np
import torch
from torch.utils.data import Dataset, IterableDataset
from ..utils.generic import ModelOutput
class UpperCAmelCase ( snake_case_ ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = dataset
_lowerCAmelCase = process
_lowerCAmelCase = params
def __len__( self ):
return len(self.dataset )
def __getitem__( self , _lowerCAmelCase ):
_lowerCAmelCase = self.dataset[i]
_lowerCAmelCase = self.process(_lowerCAmelCase , **self.params )
return processed
class UpperCAmelCase ( snake_case_ ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None ):
_lowerCAmelCase = loader
_lowerCAmelCase = infer
_lowerCAmelCase = params
if loader_batch_size == 1:
# Let's spare some time by deactivating altogether
_lowerCAmelCase = None
_lowerCAmelCase = loader_batch_size
# Internal bookkeeping
_lowerCAmelCase = None
_lowerCAmelCase = None
def __len__( self ):
return len(self.loader )
def __iter__( self ):
_lowerCAmelCase = iter(self.loader )
return self
def __lowerCAmelCase ( self ):
if isinstance(self._loader_batch_data , torch.Tensor ):
# Batch data is simple tensor, just fetch the slice
_lowerCAmelCase = self._loader_batch_data[self._loader_batch_index]
else:
# Batch data is assumed to be BaseModelOutput (or dict)
_lowerCAmelCase = {}
for k, element in self._loader_batch_data.items():
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
# Convert ModelOutput to tuple first
_lowerCAmelCase = element.to_tuple()
if isinstance(element[0] , torch.Tensor ):
_lowerCAmelCase = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
_lowerCAmelCase = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(_lowerCAmelCase , _lowerCAmelCase ):
# Those are stored as lists of tensors so need specific unbatching.
if isinstance(element[0] , torch.Tensor ):
_lowerCAmelCase = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
_lowerCAmelCase = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if element is None:
# This can happen for optional data that get passed around
_lowerCAmelCase = None
elif isinstance(element[self._loader_batch_index] , torch.Tensor ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
_lowerCAmelCase = element[self._loader_batch_index].unsqueeze(0 )
elif isinstance(element[self._loader_batch_index] , np.ndarray ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
_lowerCAmelCase = np.expand_dims(element[self._loader_batch_index] , 0 )
else:
# This is typically a list, so no need to `unsqueeze`.
_lowerCAmelCase = element[self._loader_batch_index]
# Recreate the element by reusing the original class to make it look
# batch_size=1
_lowerCAmelCase = self._loader_batch_data.__class__(_lowerCAmelCase )
self._loader_batch_index += 1
return result
def __lowerCAmelCase ( self ):
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
# We are currently unrolling a batch so we just need to return
# the current item within a batch
return self.loader_batch_item()
# We're out of items within a batch
_lowerCAmelCase = next(self.iterator )
_lowerCAmelCase = self.infer(_lowerCAmelCase , **self.params )
# We now have a batch of "inferred things".
if self.loader_batch_size is not None:
# Try to infer the size of the batch
if isinstance(_lowerCAmelCase , torch.Tensor ):
_lowerCAmelCase = processed
else:
_lowerCAmelCase = list(processed.keys() )[0]
_lowerCAmelCase = processed[key]
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = len(_lowerCAmelCase )
else:
_lowerCAmelCase = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
_lowerCAmelCase = observed_batch_size
# Setting internal index to unwrap the batch
_lowerCAmelCase = processed
_lowerCAmelCase = 0
return self.loader_batch_item()
else:
# We're not unrolling batches
return processed
class UpperCAmelCase ( snake_case_ ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None ):
super().__init__(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def __iter__( self ):
_lowerCAmelCase = iter(self.loader )
_lowerCAmelCase = None
return self
def __lowerCAmelCase ( self ):
if self.subiterator is None:
_lowerCAmelCase = self.infer(next(self.iterator ) , **self.params )
try:
# Try to return next item
_lowerCAmelCase = next(self.subiterator )
except StopIteration:
# When a preprocess iterator ends, we can start lookig at the next item
# ChunkIterator will keep feeding until ALL elements of iterator
# all have created their subiterator and have been iterating against.
#
# Another way to look at it, is we're basically flattening lists of lists
# into a single list, but with generators
_lowerCAmelCase = self.infer(next(self.iterator ) , **self.params )
_lowerCAmelCase = next(self.subiterator )
return processed
class UpperCAmelCase ( snake_case_ ):
def __iter__( self ):
_lowerCAmelCase = iter(self.loader )
return self
def __lowerCAmelCase ( self ):
# Extremely similar to PipelineIterator in its unpacking mechanism
# BUT, we have an extra required item which is the presence of `is_last`
# That is because everything is flattened by `PipelineChunkIterator` we
# need to keep track of how to regroup here in the original `process`
# boundaries so that `process` and `postprocess` see the same data.
# This iterator accumulates items (possibly while unbatching) until it
# its a `is_last` and then just passes it on to the caller.
_lowerCAmelCase = False
_lowerCAmelCase = []
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
while self._loader_batch_index < self.loader_batch_size:
_lowerCAmelCase = self.loader_batch_item()
_lowerCAmelCase = item.pop('''is_last''' )
accumulator.append(_lowerCAmelCase )
if is_last:
return accumulator
while not is_last:
_lowerCAmelCase = self.infer(next(self.iterator ) , **self.params )
if self.loader_batch_size is not None:
if isinstance(_lowerCAmelCase , torch.Tensor ):
_lowerCAmelCase = processed
else:
_lowerCAmelCase = list(processed.keys() )[0]
_lowerCAmelCase = processed[key]
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = len(_lowerCAmelCase )
else:
_lowerCAmelCase = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
_lowerCAmelCase = observed_batch_size
_lowerCAmelCase = processed
_lowerCAmelCase = 0
while self._loader_batch_index < self.loader_batch_size:
_lowerCAmelCase = self.loader_batch_item()
_lowerCAmelCase = item.pop('''is_last''' )
accumulator.append(_lowerCAmelCase )
if is_last:
return accumulator
else:
_lowerCAmelCase = processed
_lowerCAmelCase = item.pop('''is_last''' )
accumulator.append(_lowerCAmelCase )
return accumulator
class UpperCAmelCase ( snake_case_ ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = dataset
_lowerCAmelCase = key
def __len__( self ):
return len(self.dataset )
def __getitem__( self , _lowerCAmelCase ):
return self.dataset[i][self.key]
class UpperCAmelCase ( snake_case_ ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = dataset
_lowerCAmelCase = keya
_lowerCAmelCase = keya
def __len__( self ):
return len(self.dataset )
def __getitem__( self , _lowerCAmelCase ):
return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
| 664
| 0
|
"""simple docstring"""
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
lowerCamelCase__ = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Any = ['input_features']
def __init__( self : List[str] , __a : List[str]=80 , __a : List[str]=1_6000 , __a : List[str]=160 , __a : Optional[int]=30 , __a : List[str]=400 , __a : Union[str, Any]=0.0 , __a : List[str]=False , **__a : Dict , ) -> Union[str, Any]:
super().__init__(
feature_size=snake_case_ , sampling_rate=snake_case_ , padding_value=snake_case_ , return_attention_mask=snake_case_ , **snake_case_ , )
_UpperCamelCase : Dict = n_fft
_UpperCamelCase : int = hop_length
_UpperCamelCase : Optional[int] = chunk_length
_UpperCamelCase : List[str] = chunk_length * sampling_rate
_UpperCamelCase : List[str] = self.n_samples // hop_length
_UpperCamelCase : Any = sampling_rate
_UpperCamelCase : str = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=snake_case_ , min_frequency=0.0 , max_frequency=80_00.0 , sampling_rate=snake_case_ , norm="slaney" , mel_scale="slaney" , )
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __a : np.array ) -> List[Any]:
_UpperCamelCase : Tuple = spectrogram(
snake_case_ , window_function(self.n_fft , "hann" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel="log10" , )
_UpperCamelCase : Dict = log_spec[:, :-1]
_UpperCamelCase : Optional[int] = np.maximum(snake_case_ , log_spec.max() - 8.0 )
_UpperCamelCase : Union[str, Any] = (log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def __SCREAMING_SNAKE_CASE ( __a : List[np.ndarray] , __a : List[np.ndarray] , __a : float = 0.0 ) -> List[Any]:
if attention_mask is not None:
_UpperCamelCase : Union[str, Any] = np.array(snake_case_ , np.intaa )
_UpperCamelCase : Tuple = []
for vector, length in zip(snake_case_ , attention_mask.sum(-1 ) ):
_UpperCamelCase : Any = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7 )
if length < normed_slice.shape[0]:
_UpperCamelCase : List[Any] = padding_value
normed_input_values.append(snake_case_ )
else:
_UpperCamelCase : int = [(x - x.mean()) / np.sqrt(x.var() + 1e-7 ) for x in input_values]
return normed_input_values
def __call__( self : List[Any] , __a : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , __a : bool = True , __a : Optional[int] = None , __a : Optional[Union[str, TensorType]] = None , __a : Optional[bool] = None , __a : Optional[str] = "max_length" , __a : Optional[int] = None , __a : Optional[int] = None , __a : Optional[bool] = None , **__a : int , ) -> Tuple:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'''
F''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'''
F''' was sampled with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
_UpperCamelCase : List[Any] = isinstance(snake_case_ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' )
_UpperCamelCase : List[Any] = is_batched_numpy or (
isinstance(snake_case_ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
_UpperCamelCase : List[Any] = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(snake_case_ , np.ndarray ):
_UpperCamelCase : Tuple = np.asarray(snake_case_ , dtype=np.floataa )
elif isinstance(snake_case_ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
_UpperCamelCase : List[Any] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
_UpperCamelCase : List[Any] = [np.asarray([raw_speech] ).T]
_UpperCamelCase : Dict = BatchFeature({"input_features": raw_speech} )
# convert into correct format for padding
_UpperCamelCase : Any = self.pad(
snake_case_ , padding=snake_case_ , max_length=max_length if max_length else self.n_samples , truncation=snake_case_ , pad_to_multiple_of=snake_case_ , return_attention_mask=return_attention_mask or do_normalize , )
# zero-mean and unit-variance normalization
if do_normalize:
_UpperCamelCase : Tuple = self.zero_mean_unit_var_norm(
padded_inputs["input_features"] , attention_mask=padded_inputs["attention_mask"] , padding_value=self.padding_value , )
_UpperCamelCase : Optional[Any] = np.stack(padded_inputs["input_features"] , axis=0 )
# make sure list is in array format
_UpperCamelCase : Optional[int] = padded_inputs.get("input_features" ).transpose(2 , 0 , 1 )
_UpperCamelCase : Union[str, Any] = [self._np_extract_fbank_features(snake_case_ ) for waveform in input_features[0]]
if isinstance(input_features[0] , snake_case_ ):
_UpperCamelCase : List[str] = [np.asarray(snake_case_ , dtype=np.floataa ) for feature in input_features]
else:
_UpperCamelCase : Optional[int] = input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
_UpperCamelCase : Dict = padded_inputs["attention_mask"][:, :: self.hop_length]
if return_tensors is not None:
_UpperCamelCase : str = padded_inputs.convert_to_tensors(snake_case_ )
return padded_inputs
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[Any]:
_UpperCamelCase : Optional[int] = copy.deepcopy(self.__dict__ )
_UpperCamelCase : Optional[Any] = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output
| 624
|
"""simple docstring"""
import os
import pytest
from transformers.dynamic_module_utils import get_imports
_SCREAMING_SNAKE_CASE = """
import os
"""
_SCREAMING_SNAKE_CASE = """
def foo():
import os
return False
"""
_SCREAMING_SNAKE_CASE = """
def foo():
def bar():
if True:
import os
return False
return bar()
"""
_SCREAMING_SNAKE_CASE = """
import os
try:
import bar
except ImportError:
raise ValueError()
"""
_SCREAMING_SNAKE_CASE = """
import os
def foo():
try:
import bar
except ImportError:
raise ValueError()
"""
_SCREAMING_SNAKE_CASE = """
import os
try:
import bar
except (ImportError, AttributeError):
raise ValueError()
"""
_SCREAMING_SNAKE_CASE = """
import os
try:
import bar
except ImportError as e:
raise ValueError()
"""
_SCREAMING_SNAKE_CASE = """
import os
try:
import bar
except:
raise ValueError()
"""
_SCREAMING_SNAKE_CASE = """
import os
try:
import bar
import baz
except ImportError:
raise ValueError()
"""
_SCREAMING_SNAKE_CASE = """
import os
try:
import bar
import baz
except ImportError:
x = 1
raise ValueError()
"""
_SCREAMING_SNAKE_CASE = [
TOP_LEVEL_IMPORT,
IMPORT_IN_FUNCTION,
DEEPLY_NESTED_IMPORT,
TOP_LEVEL_TRY_IMPORT,
GENERIC_EXCEPT_IMPORT,
MULTILINE_TRY_IMPORT,
MULTILINE_BOTH_IMPORT,
MULTIPLE_EXCEPTS_IMPORT,
EXCEPT_AS_IMPORT,
TRY_IMPORT_IN_FUNCTION,
]
@pytest.mark.parametrize("case" , SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
__snake_case = os.path.join(SCREAMING_SNAKE_CASE , "test_file.py" )
with open(SCREAMING_SNAKE_CASE , "w" ) as _tmp_file:
_tmp_file.write(SCREAMING_SNAKE_CASE )
__snake_case = get_imports(SCREAMING_SNAKE_CASE )
assert parsed_imports == ["os"]
| 163
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCamelCase : Tuple = logging.get_logger(__name__)
_UpperCamelCase : List[Any] = {
"google/pegasus-large": "https://huggingface.co/google/pegasus-large/resolve/main/config.json",
# See all PEGASUS models at https://huggingface.co/models?filter=pegasus
}
class _snake_case ( a_ ):
SCREAMING_SNAKE_CASE : Any = '''pegasus'''
SCREAMING_SNAKE_CASE : Union[str, Any] = ['''past_key_values''']
SCREAMING_SNAKE_CASE : Optional[int] = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self , _SCREAMING_SNAKE_CASE=5_02_65 , _SCREAMING_SNAKE_CASE=10_24 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=40_96 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=40_96 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=10_24 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=1 , **_SCREAMING_SNAKE_CASE , ):
'''simple docstring'''
lowerCAmelCase = vocab_size
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = d_model
lowerCAmelCase = encoder_ffn_dim
lowerCAmelCase = encoder_layers
lowerCAmelCase = encoder_attention_heads
lowerCAmelCase = decoder_ffn_dim
lowerCAmelCase = decoder_layers
lowerCAmelCase = decoder_attention_heads
lowerCAmelCase = dropout
lowerCAmelCase = attention_dropout
lowerCAmelCase = activation_dropout
lowerCAmelCase = activation_function
lowerCAmelCase = init_std
lowerCAmelCase = encoder_layerdrop
lowerCAmelCase = decoder_layerdrop
lowerCAmelCase = use_cache
lowerCAmelCase = encoder_layers
lowerCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , is_encoder_decoder=_SCREAMING_SNAKE_CASE , decoder_start_token_id=_SCREAMING_SNAKE_CASE , forced_eos_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
@property
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return self.encoder_attention_heads
@property
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return self.d_model
| 716
|
'''simple docstring'''
from __future__ import annotations
def snake_case ( snake_case : list , snake_case : int ) -> List[str]:
"""simple docstring"""
if len(snake_case ) <= 1 or n <= 1:
return
insert_next(snake_case , n - 1 )
rec_insertion_sort(snake_case , n - 1 )
def snake_case ( snake_case : list , snake_case : int ) -> str:
"""simple docstring"""
if index >= len(snake_case ) or collection[index - 1] <= collection[index]:
return
# Swaps adjacent elements since they are not in ascending order
lowerCAmelCase , lowerCAmelCase = (
collection[index],
collection[index - 1],
)
insert_next(snake_case , index + 1 )
if __name__ == "__main__":
_UpperCamelCase : List[str] = input("Enter integers separated by spaces: ")
_UpperCamelCase : list[int] = [int(num) for num in numbers.split()]
rec_insertion_sort(number_list, len(number_list))
print(number_list)
| 514
| 0
|
from scipy.stats import pearsonr
import datasets
A : Any = '\nPearson correlation coefficient and p-value for testing non-correlation.\nThe Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.\nThe p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.\n'
A : Optional[Any] = '\nArgs:\n predictions (`list` of `int`): Predicted class labels, as returned by a model.\n references (`list` of `int`): Ground truth labels.\n return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.\n\nReturns:\n pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.\n p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.\n\nExamples:\n\n Example 1-A simple example using only predictions and references.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n\n Example 2-The same as Example 1, but that also returns the `p-value`.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)\n >>> print(sorted(list(results.keys())))\n [\'p-value\', \'pearsonr\']\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n >>> print(round(results[\'p-value\'], 2))\n 0.15\n'
A : int = '\n@article{2020SciPy-NMeth,\nauthor = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, Ilhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Antonio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\ntitle = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\njournal = {Nature Methods},\nyear = {2020},\nvolume = {17},\npages = {261--272},\nadsurl = {https://rdcu.be/b08Wh},\ndoi = {10.1038/s41592-019-0686-2},\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A ( datasets.Metric ):
'''simple docstring'''
def lowerCamelCase__ (self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""float""" ),
"""references""": datasets.Value("""float""" ),
} ) , reference_urls=["""https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html"""] , )
def lowerCamelCase__ (self : Dict , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : int , _UpperCAmelCase : Optional[int]=False ) -> Tuple:
"""simple docstring"""
if return_pvalue:
lowercase__ = pearsonr(_UpperCAmelCase , _UpperCAmelCase )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(_UpperCAmelCase , _UpperCAmelCase )[0] )}
| 15
|
'''simple docstring'''
from maths.is_square_free import is_square_free
from maths.prime_factors import prime_factors
def UpperCamelCase_ ( A__ : int ):
'''simple docstring'''
lowerCAmelCase_ : Any = prime_factors(A__ )
if is_square_free(A__ ):
return -1 if len(A__ ) % 2 else 1
return 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 275
| 0
|
'''simple docstring'''
import argparse
import json
import os
import re
import shutil
import torch
from transformers import BioGptConfig, BioGptForCausalLM
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
snake_case__ : List[str] = 2
class SCREAMING_SNAKE_CASE__ :
def __init__( self , *, # begin keyword-only arguments
A_="<s>" , A_="<pad>" , A_="</s>" , A_="<unk>" , A_=None , )-> Optional[Any]:
'''simple docstring'''
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = bos, unk, pad, eos
UpperCamelCase = []
UpperCamelCase = []
UpperCamelCase = {}
UpperCamelCase = self.add_symbol(A_ )
UpperCamelCase = self.add_symbol(A_ )
UpperCamelCase = self.add_symbol(A_ )
UpperCamelCase = self.add_symbol(A_ )
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(A_ )
UpperCamelCase = len(self.symbols )
def __eq__( self , A_ )-> str:
'''simple docstring'''
return self.indices == other.indices
def __getitem__( self , A_ )-> List[Any]:
'''simple docstring'''
if idx < len(self.symbols ):
return self.symbols[idx]
return self.unk_word
def __len__( self )-> Union[str, Any]:
'''simple docstring'''
return len(self.symbols )
def __contains__( self , A_ )-> str:
'''simple docstring'''
return sym in self.indices
@classmethod
def UpperCAmelCase_ ( cls , A_ )-> Optional[int]:
'''simple docstring'''
UpperCamelCase = cls()
d.add_from_file(A_ )
return d
def UpperCAmelCase_ ( self , A_ , A_=1 , A_=False )-> Union[str, Any]:
'''simple docstring'''
if word in self.indices and not overwrite:
UpperCamelCase = self.indices[word]
UpperCamelCase = self.count[idx] + n
return idx
else:
UpperCamelCase = len(self.symbols )
UpperCamelCase = idx
self.symbols.append(A_ )
self.count.append(A_ )
return idx
def UpperCAmelCase_ ( self , A_ )-> List[str]:
'''simple docstring'''
return 0
def UpperCAmelCase_ ( self , A_ )-> Union[str, Any]:
'''simple docstring'''
if isinstance(A_ , A_ ):
try:
with open(A_ , 'r' , encoding='utf-8' ) as fd:
self.add_from_file(A_ )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception('Incorrect encoding detected in {}, please rebuild the dataset'.format(A_ ) )
return
UpperCamelCase = f.readlines()
UpperCamelCase = self._load_meta(A_ )
for line in lines[indices_start_line:]:
try:
UpperCamelCase , UpperCamelCase = line.rstrip().rsplit(' ' , 1 )
if field == "#fairseq:overwrite":
UpperCamelCase = True
UpperCamelCase , UpperCamelCase = line.rsplit(' ' , 1 )
else:
UpperCamelCase = False
UpperCamelCase = int(A_ )
UpperCamelCase = line
if word in self and not overwrite:
raise RuntimeError(
'Duplicate word found when loading Dictionary: \'{}\'. '
'Duplicate words can overwrite earlier ones by adding the '
'#fairseq:overwrite flag at the end of the corresponding row '
'in the dictionary file. If using the Camembert model, please '
'download an updated copy of the model file.'.format(A_ ) )
self.add_symbol(A_ , n=A_ , overwrite=A_ )
except ValueError:
raise ValueError('Incorrect dictionary format, expected \'<token> <cnt> [flags]\'' )
def A_( A : List[str]):
# (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up,
# e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7}
UpperCamelCase = dict((re.sub(r'@@$' , '' , A), v) if k.endswith('@@') else (re.sub(r'$' , '</w>' , A), v) for k, v in d.items())
UpperCamelCase = '<s> <pad> </s> <unk>'.split()
# restore the special tokens
for k in keep_keys:
del da[f'''{k}</w>''']
UpperCamelCase = d[k] # restore
return da
def A_( A : Any , A : Any):
# prep
if not os.path.exists(A):
raise ValueError(f'''path {biogpt_checkpoint_path} does not exist!''')
os.makedirs(A , exist_ok=A)
print(f'''Writing results to {pytorch_dump_folder_path}''')
# handle various types of models
UpperCamelCase = os.path.join(A , 'checkpoint.pt')
if not os.path.isfile(A):
raise ValueError(f'''path to the file {checkpoint_file} does not exist!''')
UpperCamelCase = torch.load(A , map_location='cpu')
UpperCamelCase = chkpt['cfg']['model']
# dicts
UpperCamelCase = os.path.join(A , 'dict.txt')
if not os.path.isfile(A):
raise ValueError(f'''path to the file {dict_file} does not exist!''')
UpperCamelCase = Dictionary.load(A)
UpperCamelCase = rewrite_dict_keys(src_dict.indices)
UpperCamelCase = len(A)
UpperCamelCase = os.path.join(A , VOCAB_FILES_NAMES['vocab_file'])
print(f'''Generating {src_vocab_file} of {src_vocab_size} records''')
with open(A , 'w' , encoding='utf-8') as f:
f.write(json.dumps(A , ensure_ascii=A , indent=A))
# merges_file (bpecodes)
UpperCamelCase = os.path.join(A , 'bpecodes')
if not os.path.isfile(A):
raise ValueError(f'''path to the file {bpecodes_file} does not exist!''')
UpperCamelCase = os.path.join(A , VOCAB_FILES_NAMES['merges_file'])
shutil.copyfile(A , A)
# model config
UpperCamelCase = os.path.join(A , 'config.json')
UpperCamelCase = {
'activation_dropout': args['activation_dropout'],
'architectures': ['BioGptForCausalLM'],
'attention_probs_dropout_prob': args['attention_dropout'],
'bos_token_id': 0,
'eos_token_id': 2,
'hidden_act': args['activation_fn'],
'hidden_dropout_prob': args['dropout'],
'hidden_size': args['decoder_embed_dim'],
'initializer_range': 0.02,
'intermediate_size': args['decoder_ffn_embed_dim'],
'layer_norm_eps': 1E-12,
'layerdrop': args['decoder_layerdrop'],
'max_position_embeddings': args['max_target_positions'],
'model_type': 'biogpt',
'num_attention_heads': args['decoder_attention_heads'],
'num_hidden_layers': args['decoder_layers'],
'pad_token_id': 1,
'scale_embedding': not args['no_scale_embedding'],
'tie_word_embeddings': args['share_decoder_input_output_embed'],
'vocab_size': src_vocab_size,
}
# good hparam defaults to start with
print(f'''Generating {biogpt_model_config_file}''')
with open(A , 'w' , encoding='utf-8') as f:
f.write(json.dumps(A , ensure_ascii=A , indent=A))
# tokenizer config
UpperCamelCase = os.path.join(A , A)
UpperCamelCase = {
'bos_token': '<s>',
'eos_token': '</s>',
'model_max_length': 1024,
'pad_token': '<pad>',
'special_tokens_map_file': None,
'tokenizer_class': 'BioGptTokenizer',
'unk_token': '<unk>',
}
print(f'''Generating {biogpt_tokenizer_config_file}''')
with open(A , 'w' , encoding='utf-8') as f:
f.write(json.dumps(A , ensure_ascii=A , indent=A))
# model
UpperCamelCase = chkpt['model']
# remove unneeded keys
UpperCamelCase = [
'decoder.version',
]
for k in ignore_keys:
model_state_dict.pop(A , A)
UpperCamelCase = list(model_state_dict.keys())
for layer_name in layer_names:
if layer_name.endswith('output_projection.weight'):
UpperCamelCase = model_state_dict.pop(A)
else:
UpperCamelCase = model_state_dict.pop(A)
UpperCamelCase = BioGptConfig.from_pretrained(A)
UpperCamelCase = BioGptForCausalLM(A)
# check that it loads ok
model_new.load_state_dict(A)
# save
UpperCamelCase = os.path.join(A , A)
print(f'''Generating {pytorch_weights_dump_path}''')
torch.save(A , A)
print('Conversion is done!')
if __name__ == "__main__":
snake_case__ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--biogpt_checkpoint_path',
default=None,
type=str,
required=True,
help=(
'Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,'
' bpecodes, etc.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
snake_case__ : int = parser.parse_args()
convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
| 707
|
'''simple docstring'''
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase):
@slow
def UpperCAmelCase_ ( self )-> Union[str, Any]:
'''simple docstring'''
UpperCamelCase = FlaxMTaForConditionalGeneration.from_pretrained('google/mt5-small' )
UpperCamelCase = AutoTokenizer.from_pretrained('google/mt5-small' )
UpperCamelCase = tokenizer('Hello there' , return_tensors='np' ).input_ids
UpperCamelCase = tokenizer('Hi I am' , return_tensors='np' ).input_ids
UpperCamelCase = shift_tokens_right(A_ , model.config.pad_token_id , model.config.decoder_start_token_id )
UpperCamelCase = model(A_ , decoder_input_ids=A_ ).logits
UpperCamelCase = optax.softmax_cross_entropy(A_ , onehot(A_ , logits.shape[-1] ) ).mean()
UpperCamelCase = -(labels.shape[-1] * loss.item())
UpperCamelCase = -84.9_127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
| 432
| 0
|
import os
def _snake_case ( __snake_case = "input.txt" ):
with open(os.path.join(os.path.dirname(__snake_case ) , __snake_case ) ) as input_file:
_UpperCamelCase = [
[int(__snake_case ) for element in line.split(''',''' )]
for line in input_file.readlines()
]
_UpperCamelCase = len(__snake_case )
_UpperCamelCase = len(matrix[0] )
_UpperCamelCase = [[-1 for _ in range(__snake_case )] for _ in range(__snake_case )]
for i in range(__snake_case ):
_UpperCamelCase = matrix[i][0]
for j in range(1 , __snake_case ):
for i in range(__snake_case ):
_UpperCamelCase = minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1 , __snake_case ):
_UpperCamelCase = min(
minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2 , -1 , -1 ):
_UpperCamelCase = min(
minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(f'{solution() = }')
| 10
|
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
SCREAMING_SNAKE_CASE: List[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE: Optional[Any] = {
'''google/umt5-small''': '''https://huggingface.co/google/umt5-small/resolve/main/config.json''',
# See all umt5 models at https://huggingface.co/models?filter=umt5
}
class lowercase_ (SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase__ ="umt5"
lowerCAmelCase__ =["past_key_values"]
def __init__( self : Optional[Any] , snake_case__ : Union[str, Any]=25_01_12 , snake_case__ : Optional[int]=5_12 , snake_case__ : Optional[Any]=64 , snake_case__ : str=10_24 , snake_case__ : Dict=8 , snake_case__ : Union[str, Any]=None , snake_case__ : List[Any]=6 , snake_case__ : List[Any]=32 , snake_case__ : List[Any]=1_28 , snake_case__ : Optional[int]=0.1 , snake_case__ : List[str]=1e-6 , snake_case__ : Optional[Any]=1.0 , snake_case__ : Optional[int]="gated-gelu" , snake_case__ : Any=True , snake_case__ : List[Any]=True , snake_case__ : List[str]="T5Tokenizer" , snake_case__ : List[str]=True , snake_case__ : Tuple=0 , snake_case__ : Optional[int]=1 , snake_case__ : List[str]=0 , **snake_case__ : Any , ):
"""simple docstring"""
super().__init__(
is_encoder_decoder=snake_case__ , tokenizer_class=snake_case__ , tie_word_embeddings=snake_case__ , pad_token_id=snake_case__ , eos_token_id=snake_case__ , decoder_start_token_id=snake_case__ , **snake_case__ , )
SCREAMING_SNAKE_CASE_ = vocab_size
SCREAMING_SNAKE_CASE_ = d_model
SCREAMING_SNAKE_CASE_ = d_kv
SCREAMING_SNAKE_CASE_ = d_ff
SCREAMING_SNAKE_CASE_ = num_layers
SCREAMING_SNAKE_CASE_ = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
SCREAMING_SNAKE_CASE_ = num_heads
SCREAMING_SNAKE_CASE_ = relative_attention_num_buckets
SCREAMING_SNAKE_CASE_ = relative_attention_max_distance
SCREAMING_SNAKE_CASE_ = dropout_rate
SCREAMING_SNAKE_CASE_ = layer_norm_epsilon
SCREAMING_SNAKE_CASE_ = initializer_factor
SCREAMING_SNAKE_CASE_ = feed_forward_proj
SCREAMING_SNAKE_CASE_ = use_cache
SCREAMING_SNAKE_CASE_ = self.feed_forward_proj.split('-' )
SCREAMING_SNAKE_CASE_ = act_info[-1]
SCREAMING_SNAKE_CASE_ = act_info[0] == 'gated'
if len(snake_case__ ) > 1 and act_info[0] != "gated" or len(snake_case__ ) > 2:
raise ValueError(
f'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'''
'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '
'\'gated-gelu\' or \'relu\'' )
if feed_forward_proj == "gated-gelu":
SCREAMING_SNAKE_CASE_ = 'gelu_new'
@property
def __a ( self : Optional[Any] ):
"""simple docstring"""
return self.d_model
@property
def __a ( self : Union[str, Any] ):
"""simple docstring"""
return self.num_heads
@property
def __a ( self : Optional[Any] ):
"""simple docstring"""
return self.num_layers
class lowercase_ (SCREAMING_SNAKE_CASE__ ):
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
def __a ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = {
'input_ids': {0: 'batch', 1: 'encoder_sequence'},
'attention_mask': {0: 'batch', 1: 'encoder_sequence'},
}
if self.use_past:
SCREAMING_SNAKE_CASE_ = 'past_encoder_sequence + sequence'
SCREAMING_SNAKE_CASE_ = {0: 'batch'}
SCREAMING_SNAKE_CASE_ = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
SCREAMING_SNAKE_CASE_ = {0: 'batch', 1: 'decoder_sequence'}
SCREAMING_SNAKE_CASE_ = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(snake_case__ , direction='inputs' )
return common_inputs
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
def __a ( self : Union[str, Any] ):
"""simple docstring"""
return 13
@property
def __a ( self : Dict ):
"""simple docstring"""
return 5e-4
| 360
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
"microsoft/focalnet-tiny": "https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json",
}
class _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A : Optional[int] = '''focalnet'''
def __init__( self, A=224, A=4, A=3, A=96, A=False, A=[192, 384, 768, 768], A=[2, 2, 6, 2], A=[2, 2, 2, 2], A=[3, 3, 3, 3], A="gelu", A=4.0, A=0.0, A=0.1, A=False, A=1E-4, A=False, A=False, A=False, A=0.02, A=1E-5, A=32, A=None, A=None, **A, ):
'''simple docstring'''
super().__init__(**A )
SCREAMING_SNAKE_CASE : List[Any] = image_size
SCREAMING_SNAKE_CASE : Dict = patch_size
SCREAMING_SNAKE_CASE : Optional[int] = num_channels
SCREAMING_SNAKE_CASE : List[Any] = embed_dim
SCREAMING_SNAKE_CASE : Tuple = use_conv_embed
SCREAMING_SNAKE_CASE : List[Any] = hidden_sizes
SCREAMING_SNAKE_CASE : Union[str, Any] = depths
SCREAMING_SNAKE_CASE : List[str] = focal_levels
SCREAMING_SNAKE_CASE : int = focal_windows
SCREAMING_SNAKE_CASE : List[Any] = hidden_act
SCREAMING_SNAKE_CASE : int = mlp_ratio
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Dict = drop_path_rate
SCREAMING_SNAKE_CASE : Optional[int] = use_layerscale
SCREAMING_SNAKE_CASE : int = layerscale_value
SCREAMING_SNAKE_CASE : Optional[Any] = use_post_layernorm
SCREAMING_SNAKE_CASE : List[Any] = use_post_layernorm_in_modulation
SCREAMING_SNAKE_CASE : int = normalize_modulator
SCREAMING_SNAKE_CASE : str = initializer_range
SCREAMING_SNAKE_CASE : str = layer_norm_eps
SCREAMING_SNAKE_CASE : int = encoder_stride
SCREAMING_SNAKE_CASE : Optional[int] = ['stem'] + [F"stage{idx}" for idx in range(1, len(self.depths ) + 1 )]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = get_aligned_output_features_output_indices(
out_features=A, out_indices=A, stage_names=self.stage_names )
| 508
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
UpperCamelCase_ = {
"configuration_longt5": ["LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP", "LongT5Config", "LongT5OnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST",
"LongT5EncoderModel",
"LongT5ForConditionalGeneration",
"LongT5Model",
"LongT5PreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"FlaxLongT5ForConditionalGeneration",
"FlaxLongT5Model",
"FlaxLongT5PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 508
| 1
|
'''simple docstring'''
def __lowerCamelCase ( _UpperCamelCase : list ):
'''simple docstring'''
def merge(_UpperCamelCase : list , _UpperCamelCase : list ) -> list:
def _merge():
while left and right:
yield (left if left[0] <= right[0] else right).pop(0 )
yield from left
yield from right
return list(_merge() )
if len(_UpperCamelCase ) <= 1:
return collection
UpperCAmelCase_ = len(_UpperCamelCase ) // 2
return merge(merge_sort(collection[:mid] ) , merge_sort(collection[mid:] ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase__ : Union[str, Any] = input("Enter numbers separated by a comma:\n").strip()
lowercase__ : Optional[Any] = [int(item) for item in user_input.split(",")]
print(*merge_sort(unsorted), sep=",")
| 390
|
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline
else:
from .pipeline_kandinsky import KandinskyPipeline
from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline
from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline
from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput
from .text_encoder import MultilingualCLIP
| 390
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_SCREAMING_SNAKE_CASE = {
"configuration_mgp_str": ["MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP", "MgpstrConfig"],
"processing_mgp_str": ["MgpstrProcessor"],
"tokenization_mgp_str": ["MgpstrTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST",
"MgpstrModel",
"MgpstrPreTrainedModel",
"MgpstrForSceneTextRecognition",
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 707
|
'''simple docstring'''
import itertools
import random
import unittest
import numpy as np
from transformers import WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, WavaVecaConfig, WavaVecaFeatureExtractor
from transformers.testing_utils import require_torch, slow
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
_SCREAMING_SNAKE_CASE = random.Random()
def __lowerCamelCase ( __lowerCAmelCase : Any , __lowerCAmelCase : Union[str, Any]=1.0 , __lowerCAmelCase : Union[str, Any]=None , __lowerCAmelCase : Optional[int]=None ) -> Optional[Any]:
if rng is None:
snake_case = global_rng
snake_case = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : int , __snake_case : int , __snake_case : Dict=7 , __snake_case : Optional[int]=4_00 , __snake_case : Optional[int]=20_00 , __snake_case : List[str]=1 , __snake_case : str=0.0 , __snake_case : Dict=1_60_00 , __snake_case : Dict=True , __snake_case : Optional[int]=True , )-> Optional[int]:
snake_case = parent
snake_case = batch_size
snake_case = min_seq_length
snake_case = max_seq_length
snake_case = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
snake_case = feature_size
snake_case = padding_value
snake_case = sampling_rate
snake_case = return_attention_mask
snake_case = do_normalize
def lowerCAmelCase ( self : Union[str, Any] )-> Dict:
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def lowerCAmelCase ( self : Tuple , __snake_case : List[Any]=False , __snake_case : int=False )-> Tuple:
def _flatten(__snake_case : List[str] ):
return list(itertools.chain(*__snake_case ) )
if equal_length:
snake_case = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
snake_case = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
snake_case = [np.asarray(__snake_case ) for x in speech_inputs]
return speech_inputs
class _lowerCAmelCase ( A__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = WavaVecaFeatureExtractor
def lowerCAmelCase ( self : Tuple )-> Optional[Any]:
snake_case = WavaVecaFeatureExtractionTester(self )
def lowerCAmelCase ( self : Dict , __snake_case : str )-> List[Any]:
self.assertTrue(np.all(np.mean(__snake_case , axis=0 ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(__snake_case , axis=0 ) - 1 ) < 1e-3 ) )
def lowerCAmelCase ( self : Optional[Any] )-> Optional[int]:
# Tests that all call wrap to encode_plus and batch_encode_plus
snake_case = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
snake_case = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
snake_case = [np.asarray(__snake_case ) for speech_input in speech_inputs]
# Test not batched input
snake_case = feat_extract(speech_inputs[0] , return_tensors="""np""" ).input_values
snake_case = feat_extract(np_speech_inputs[0] , return_tensors="""np""" ).input_values
self.assertTrue(np.allclose(__snake_case , __snake_case , atol=1e-3 ) )
# Test batched
snake_case = feat_extract(__snake_case , return_tensors="""np""" ).input_values
snake_case = feat_extract(__snake_case , return_tensors="""np""" ).input_values
for enc_seq_a, enc_seq_a in zip(__snake_case , __snake_case ):
self.assertTrue(np.allclose(__snake_case , __snake_case , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
snake_case = [floats_list((1, x) )[0] for x in (8_00, 8_00, 8_00)]
snake_case = np.asarray(__snake_case )
snake_case = feat_extract(__snake_case , return_tensors="""np""" ).input_values
snake_case = feat_extract(__snake_case , return_tensors="""np""" ).input_values
for enc_seq_a, enc_seq_a in zip(__snake_case , __snake_case ):
self.assertTrue(np.allclose(__snake_case , __snake_case , atol=1e-3 ) )
def lowerCAmelCase ( self : int )-> List[str]:
snake_case = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
snake_case = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
snake_case = ["""longest""", """max_length""", """do_not_pad"""]
snake_case = [None, 16_00, None]
for max_length, padding in zip(__snake_case , __snake_case ):
snake_case = feat_extract(__snake_case , padding=__snake_case , max_length=__snake_case , return_tensors="""np""" )
snake_case = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_00] )
self.assertTrue(input_values[0][8_00:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[1][:10_00] )
self.assertTrue(input_values[0][10_00:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[2][:12_00] )
def lowerCAmelCase ( self : Any )-> Tuple:
snake_case = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
snake_case = range(8_00 , 14_00 , 2_00 )
snake_case = [floats_list((1, x) )[0] for x in lengths]
snake_case = ["""longest""", """max_length""", """do_not_pad"""]
snake_case = [None, 16_00, None]
for max_length, padding in zip(__snake_case , __snake_case ):
snake_case = feat_extract(__snake_case , max_length=__snake_case , padding=__snake_case )
snake_case = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_00] )
self._check_zero_mean_unit_variance(input_values[1][:10_00] )
self._check_zero_mean_unit_variance(input_values[2][:12_00] )
def lowerCAmelCase ( self : int )-> str:
snake_case = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
snake_case = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
snake_case = feat_extract(
__snake_case , truncation=__snake_case , max_length=10_00 , padding="""max_length""" , return_tensors="""np""" )
snake_case = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_00] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def lowerCAmelCase ( self : int )-> str:
snake_case = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
snake_case = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
snake_case = feat_extract(
__snake_case , truncation=__snake_case , max_length=10_00 , padding="""longest""" , return_tensors="""np""" )
snake_case = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_00] )
self._check_zero_mean_unit_variance(input_values[1, :10_00] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 10_00) )
snake_case = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
snake_case = feat_extract(
__snake_case , truncation=__snake_case , max_length=20_00 , padding="""longest""" , return_tensors="""np""" )
snake_case = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_00] )
self._check_zero_mean_unit_variance(input_values[1, :10_00] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 12_00) )
@require_torch
def lowerCAmelCase ( self : List[str] )-> Union[str, Any]:
import torch
snake_case = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
snake_case = np.random.rand(1_00 ).astype(np.floataa )
snake_case = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
snake_case = feature_extractor.pad([{"""input_values""": inputs}] , return_tensors="""np""" )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
snake_case = feature_extractor.pad([{"""input_values""": inputs}] , return_tensors="""pt""" )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
@slow
@require_torch
def lowerCAmelCase ( self : str )-> List[Any]:
# this test makes sure that models that are using
# group norm don't have their feature extractor return the
# attention_mask
for model_id in WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST:
snake_case = WavaVecaConfig.from_pretrained(__snake_case )
snake_case = WavaVecaFeatureExtractor.from_pretrained(__snake_case )
# only "layer" feature extraction norm should make use of
# attention_mask
self.assertEqual(feat_extract.return_attention_mask , config.feat_extract_norm == """layer""" )
| 517
| 0
|
import logging
import os
import sys
from dataclasses import dataclass, field
from importlib import import_module
from typing import Dict, List, Optional, Tuple
import numpy as np
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch import nn
from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask
import transformers
from transformers import (
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
A_ = logging.getLogger(__name__)
@dataclass
class __lowercase :
lowercase = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
lowercase = field(
default=a__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
lowercase = field(
default='NER' , metadata={'help': 'Task type to fine tune in training (e.g. NER, POS, etc)'} )
lowercase = field(
default=a__ , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
lowercase = field(default=a__ , metadata={'help': 'Set this flag to use fast tokenization.'} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
lowercase = field(
default=a__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
@dataclass
class __lowercase :
lowercase = field(
metadata={'help': 'The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task.'} )
lowercase = field(
default=a__ , metadata={'help': 'Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.'} , )
lowercase = field(
default=128 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
lowercase = field(
default=a__ , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
def __UpperCAmelCase ( )-> Dict:
"""simple docstring"""
lowercase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowercase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowercase = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'Output directory ({training_args.output_dir}) already exists and is not empty. Use'
''' --overwrite_output_dir to overcome.''' )
lowercase = import_module('''tasks''' )
try:
lowercase = getattr(_lowerCamelCase, model_args.task_type )
lowercase = token_classification_task_clazz()
except AttributeError:
raise ValueError(
f'Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. '
f'Available tasks classes are: {TokenClassificationTask.__subclasses__()}' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN, )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''', training_args.local_rank, training_args.device, training_args.n_gpu, bool(training_args.local_rank != -1 ), training_args.fpaa, )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''', _lowerCamelCase )
# Set seed
set_seed(training_args.seed )
# Prepare CONLL-2003 task
lowercase = token_classification_task.get_labels(data_args.labels )
lowercase = dict(enumerate(_lowerCamelCase ) )
lowercase = len(_lowerCamelCase )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowercase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path, num_labels=_lowerCamelCase, idalabel=_lowerCamelCase, labelaid={label: i for i, label in enumerate(_lowerCamelCase )}, cache_dir=model_args.cache_dir, )
lowercase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast, )
lowercase = AutoModelForTokenClassification.from_pretrained(
model_args.model_name_or_path, from_tf=bool('''.ckpt''' in model_args.model_name_or_path ), config=_lowerCamelCase, cache_dir=model_args.cache_dir, )
# Get datasets
lowercase = (
TokenClassificationDataset(
token_classification_task=_lowerCamelCase, data_dir=data_args.data_dir, tokenizer=_lowerCamelCase, labels=_lowerCamelCase, model_type=config.model_type, max_seq_length=data_args.max_seq_length, overwrite_cache=data_args.overwrite_cache, mode=Split.train, )
if training_args.do_train
else None
)
lowercase = (
TokenClassificationDataset(
token_classification_task=_lowerCamelCase, data_dir=data_args.data_dir, tokenizer=_lowerCamelCase, labels=_lowerCamelCase, model_type=config.model_type, max_seq_length=data_args.max_seq_length, overwrite_cache=data_args.overwrite_cache, mode=Split.dev, )
if training_args.do_eval
else None
)
def align_predictions(UpperCAmelCase, UpperCAmelCase ) -> Tuple[List[int], List[int]]:
lowercase = np.argmax(_lowerCamelCase, axis=2 )
lowercase = preds.shape
lowercase = [[] for _ in range(_lowerCamelCase )]
lowercase = [[] for _ in range(_lowerCamelCase )]
for i in range(_lowerCamelCase ):
for j in range(_lowerCamelCase ):
if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index:
out_label_list[i].append(label_map[label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
return preds_list, out_label_list
def compute_metrics(UpperCAmelCase ) -> Dict:
lowercase = align_predictions(p.predictions, p.label_ids )
return {
"accuracy_score": accuracy_score(_lowerCamelCase, _lowerCamelCase ),
"precision": precision_score(_lowerCamelCase, _lowerCamelCase ),
"recall": recall_score(_lowerCamelCase, _lowerCamelCase ),
"f1": fa_score(_lowerCamelCase, _lowerCamelCase ),
}
# Data collator
lowercase = DataCollatorWithPadding(_lowerCamelCase, pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
lowercase = Trainer(
model=_lowerCamelCase, args=_lowerCamelCase, train_dataset=_lowerCamelCase, eval_dataset=_lowerCamelCase, compute_metrics=_lowerCamelCase, data_collator=_lowerCamelCase, )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_process_zero():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
lowercase = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
lowercase = trainer.evaluate()
lowercase = os.path.join(training_args.output_dir, '''eval_results.txt''' )
if trainer.is_world_process_zero():
with open(_lowerCamelCase, '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(''' %s = %s''', _lowerCamelCase, _lowerCamelCase )
writer.write('''%s = %s\n''' % (key, value) )
results.update(_lowerCamelCase )
# Predict
if training_args.do_predict:
lowercase = TokenClassificationDataset(
token_classification_task=_lowerCamelCase, data_dir=data_args.data_dir, tokenizer=_lowerCamelCase, labels=_lowerCamelCase, model_type=config.model_type, max_seq_length=data_args.max_seq_length, overwrite_cache=data_args.overwrite_cache, mode=Split.test, )
lowercase = trainer.predict(_lowerCamelCase )
lowercase = align_predictions(_lowerCamelCase, _lowerCamelCase )
lowercase = os.path.join(training_args.output_dir, '''test_results.txt''' )
if trainer.is_world_process_zero():
with open(_lowerCamelCase, '''w''' ) as writer:
for key, value in metrics.items():
logger.info(''' %s = %s''', _lowerCamelCase, _lowerCamelCase )
writer.write('''%s = %s\n''' % (key, value) )
# Save predictions
lowercase = os.path.join(training_args.output_dir, '''test_predictions.txt''' )
if trainer.is_world_process_zero():
with open(_lowerCamelCase, '''w''' ) as writer:
with open(os.path.join(data_args.data_dir, '''test.txt''' ), '''r''' ) as f:
token_classification_task.write_predictions_to_file(_lowerCamelCase, _lowerCamelCase, _lowerCamelCase )
return results
def __UpperCAmelCase ( UpperCAmelCase )-> Dict:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 604
|
"""simple docstring"""
import os
import sys
from contextlib import contextmanager
# Windows only
if os.name == "nt":
import ctypes
import msvcrt # noqa
class _snake_case ( ctypes.Structure ):
# _fields is a specific attr expected by ctypes
snake_case__ = [("size", ctypes.c_int), ("visible", ctypes.c_byte)]
def lowercase_ ( ) -> Optional[int]:
'''simple docstring'''
if os.name == "nt":
__lowerCamelCase : str = CursorInfo()
__lowerCamelCase : Union[str, Any] = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(_lowerCamelCase , ctypes.byref(_lowerCamelCase ) )
__lowerCamelCase : Tuple = False
ctypes.windll.kernelaa.SetConsoleCursorInfo(_lowerCamelCase , ctypes.byref(_lowerCamelCase ) )
elif os.name == "posix":
sys.stdout.write("\033[?25l" )
sys.stdout.flush()
def lowercase_ ( ) -> Any:
'''simple docstring'''
if os.name == "nt":
__lowerCamelCase : List[str] = CursorInfo()
__lowerCamelCase : List[Any] = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(_lowerCamelCase , ctypes.byref(_lowerCamelCase ) )
__lowerCamelCase : str = True
ctypes.windll.kernelaa.SetConsoleCursorInfo(_lowerCamelCase , ctypes.byref(_lowerCamelCase ) )
elif os.name == "posix":
sys.stdout.write("\033[?25h" )
sys.stdout.flush()
@contextmanager
def lowercase_ ( ) -> List[Any]:
'''simple docstring'''
try:
hide_cursor()
yield
finally:
show_cursor()
| 646
| 0
|
'''simple docstring'''
import os
import unittest
from huggingface_hub.utils import are_progress_bars_disabled
import transformers.models.bart.tokenization_bart
from transformers import logging
from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context
from transformers.utils.logging import disable_progress_bar, enable_progress_bar
class snake_case (unittest.TestCase ):
def _a ( self ) -> Optional[int]:
lowercase__ = logging.get_logger()
# the current default level is logging.WARNING
lowercase__ = logging.get_verbosity()
logging.set_verbosity_error()
self.assertEqual(logger.getEffectiveLevel() ,logging.get_verbosity() )
logging.set_verbosity_warning()
self.assertEqual(logger.getEffectiveLevel() ,logging.get_verbosity() )
logging.set_verbosity_info()
self.assertEqual(logger.getEffectiveLevel() ,logging.get_verbosity() )
logging.set_verbosity_debug()
self.assertEqual(logger.getEffectiveLevel() ,logging.get_verbosity() )
# restore to the original level
logging.set_verbosity(A_ )
def _a ( self ) -> str:
lowercase__ = logging.get_verbosity()
lowercase__ = logging.get_logger("transformers.models.bart.tokenization_bart" )
lowercase__ = "Testing 1, 2, 3"
# should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`)
if level_origin <= logging.WARNING:
with CaptureLogger(A_ ) as cl:
logger.warning(A_ )
self.assertEqual(cl.out ,msg + "\n" )
# this is setting the level for all of `transformers.*` loggers
logging.set_verbosity_error()
# should not be able to log warnings
with CaptureLogger(A_ ) as cl:
logger.warning(A_ )
self.assertEqual(cl.out ,"" )
# should be able to log warnings again
logging.set_verbosity_warning()
with CaptureLogger(A_ ) as cl:
logger.warning(A_ )
self.assertEqual(cl.out ,msg + "\n" )
# restore to the original level
logging.set_verbosity(A_ )
@mockenv(TRANSFORMERS_VERBOSITY="error" )
def _a ( self ) -> str:
transformers.utils.logging._reset_library_root_logger()
# this action activates the env var
lowercase__ = logging.get_logger("transformers.models.bart.tokenization_bart" )
lowercase__ = os.getenv("TRANSFORMERS_VERBOSITY" ,A_ )
lowercase__ = logging.log_levels[env_level_str]
lowercase__ = logging.get_verbosity()
self.assertEqual(
A_ ,A_ ,F'''TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}''' ,)
# restore to the original level
lowercase__ = ""
transformers.utils.logging._reset_library_root_logger()
@mockenv(TRANSFORMERS_VERBOSITY="super-error" )
def _a ( self ) -> Optional[int]:
transformers.utils.logging._reset_library_root_logger()
lowercase__ = logging.logging.getLogger()
with CaptureLogger(A_ ) as cl:
# this action activates the env var
logging.get_logger("transformers.models.bart.tokenization_bart" )
self.assertIn("Unknown option TRANSFORMERS_VERBOSITY=super-error" ,cl.out )
# no need to restore as nothing was changed
def _a ( self ) -> Union[str, Any]:
transformers.utils.logging._reset_library_root_logger()
lowercase__ = logging.get_logger("transformers.models.bart.tokenization_bart" )
lowercase__ = "Testing 1, 2, 3"
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS="1" ):
# nothing should be logged as env var disables this method
with CaptureLogger(A_ ) as cl:
logger.warning_advice(A_ )
self.assertEqual(cl.out ,"" )
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS="" ):
# should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset
with CaptureLogger(A_ ) as cl:
logger.warning_advice(A_ )
self.assertEqual(cl.out ,msg + "\n" )
def lowerCamelCase ( ):
'''simple docstring'''
disable_progress_bar()
assert are_progress_bars_disabled()
enable_progress_bar()
assert not are_progress_bars_disabled()
| 716
|
'''simple docstring'''
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class snake_case :
def __init__( self ,UpperCAmelCase_ ,UpperCAmelCase_=13 ,UpperCAmelCase_=32 ,UpperCAmelCase_=2 ,UpperCAmelCase_=3 ,UpperCAmelCase_=16 ,UpperCAmelCase_=[32, 64, 128] ,UpperCAmelCase_=[1, 2, 1] ,UpperCAmelCase_=[2, 2, 4] ,UpperCAmelCase_=2 ,UpperCAmelCase_=2.0 ,UpperCAmelCase_=True ,UpperCAmelCase_=0.0 ,UpperCAmelCase_=0.0 ,UpperCAmelCase_=0.1 ,UpperCAmelCase_="gelu" ,UpperCAmelCase_=False ,UpperCAmelCase_=True ,UpperCAmelCase_=0.02 ,UpperCAmelCase_=1E-5 ,UpperCAmelCase_=True ,UpperCAmelCase_=None ,UpperCAmelCase_=True ,UpperCAmelCase_=10 ,UpperCAmelCase_=8 ,UpperCAmelCase_=["stage1", "stage2"] ,UpperCAmelCase_=[1, 2] ,) -> Dict:
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = image_size
lowercase__ = patch_size
lowercase__ = num_channels
lowercase__ = embed_dim
lowercase__ = hidden_sizes
lowercase__ = depths
lowercase__ = num_heads
lowercase__ = window_size
lowercase__ = mlp_ratio
lowercase__ = qkv_bias
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = drop_path_rate
lowercase__ = hidden_act
lowercase__ = use_absolute_embeddings
lowercase__ = patch_norm
lowercase__ = layer_norm_eps
lowercase__ = initializer_range
lowercase__ = is_training
lowercase__ = scope
lowercase__ = use_labels
lowercase__ = type_sequence_label_size
lowercase__ = encoder_stride
lowercase__ = out_features
lowercase__ = out_indices
def _a ( self ) -> int:
lowercase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
lowercase__ = self.get_config()
return config, pixel_values, labels
def _a ( self ) -> Any:
return FocalNetConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,embed_dim=self.embed_dim ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,num_heads=self.num_heads ,window_size=self.window_size ,mlp_ratio=self.mlp_ratio ,qkv_bias=self.qkv_bias ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,drop_path_rate=self.drop_path_rate ,hidden_act=self.hidden_act ,use_absolute_embeddings=self.use_absolute_embeddings ,path_norm=self.patch_norm ,layer_norm_eps=self.layer_norm_eps ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,out_features=self.out_features ,out_indices=self.out_indices ,)
def _a ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ) -> str:
lowercase__ = FocalNetModel(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
lowercase__ = model(UpperCAmelCase_ )
lowercase__ = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
lowercase__ = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, expected_seq_len, expected_dim) )
def _a ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ) -> Dict:
lowercase__ = FocalNetBackbone(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
lowercase__ = model(UpperCAmelCase_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) ,len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) ,len(config.out_features ) )
self.parent.assertListEqual(model.channels ,config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
lowercase__ = None
lowercase__ = FocalNetBackbone(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
lowercase__ = model(UpperCAmelCase_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) ,1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) ,1 )
self.parent.assertListEqual(model.channels ,[config.hidden_sizes[-1]] )
def _a ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ) -> Dict:
lowercase__ = FocalNetForMaskedImageModeling(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
lowercase__ = model(UpperCAmelCase_ )
self.parent.assertEqual(
result.reconstruction.shape ,(self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowercase__ = 1
lowercase__ = FocalNetForMaskedImageModeling(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
lowercase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase__ = model(UpperCAmelCase_ )
self.parent.assertEqual(result.reconstruction.shape ,(self.batch_size, 1, self.image_size, self.image_size) )
def _a ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ) -> Optional[Any]:
lowercase__ = self.type_sequence_label_size
lowercase__ = FocalNetForImageClassification(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
lowercase__ = model(UpperCAmelCase_ ,labels=UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowercase__ = 1
lowercase__ = FocalNetForImageClassification(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
lowercase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase__ = model(UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def _a ( self ) -> List[str]:
lowercase__ = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ = config_and_inputs
lowercase__ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class snake_case (UpperCamelCase , UpperCamelCase , unittest.TestCase ):
lowerCAmelCase__ :List[str] = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
lowerCAmelCase__ :str = (
{"feature-extraction": FocalNetModel, "image-classification": FocalNetForImageClassification}
if is_torch_available()
else {}
)
lowerCAmelCase__ :str = False
lowerCAmelCase__ :List[Any] = False
lowerCAmelCase__ :Dict = False
lowerCAmelCase__ :List[Any] = False
lowerCAmelCase__ :Union[str, Any] = False
def _a ( self ) -> Any:
lowercase__ = FocalNetModelTester(self )
lowercase__ = ConfigTester(self ,config_class=UpperCAmelCase_ ,embed_dim=37 ,has_text_modality=UpperCAmelCase_ )
def _a ( self ) -> Optional[int]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _a ( self ) -> List[Any]:
return
def _a ( self ) -> Tuple:
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
def _a ( self ) -> Union[str, Any]:
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*UpperCAmelCase_ )
def _a ( self ) -> str:
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*UpperCAmelCase_ )
def _a ( self ) -> Dict:
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase_ )
@unittest.skip(reason="FocalNet does not use inputs_embeds" )
def _a ( self ) -> str:
pass
@unittest.skip(reason="FocalNet does not use feedforward chunking" )
def _a ( self ) -> Optional[int]:
pass
def _a ( self ) -> int:
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
lowercase__ = model_class(UpperCAmelCase_ )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
lowercase__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCAmelCase_ ,nn.Linear ) )
def _a ( self ) -> Union[str, Any]:
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
lowercase__ = model_class(UpperCAmelCase_ )
lowercase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ = [*signature.parameters.keys()]
lowercase__ = ["pixel_values"]
self.assertListEqual(arg_names[:1] ,UpperCAmelCase_ )
def _a ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ) -> List[Any]:
lowercase__ = model_class(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
with torch.no_grad():
lowercase__ = model(**self._prepare_for_class(UpperCAmelCase_ ,UpperCAmelCase_ ) )
lowercase__ = outputs.hidden_states
lowercase__ = getattr(
self.model_tester ,"expected_num_hidden_layers" ,len(self.model_tester.depths ) + 1 )
self.assertEqual(len(UpperCAmelCase_ ) ,UpperCAmelCase_ )
# FocalNet has a different seq_length
lowercase__ = (
config.patch_size
if isinstance(config.patch_size ,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowercase__ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,)
lowercase__ = outputs.reshaped_hidden_states
self.assertEqual(len(UpperCAmelCase_ ) ,UpperCAmelCase_ )
lowercase__ , lowercase__ , lowercase__ , lowercase__ = reshaped_hidden_states[0].shape
lowercase__ = (
reshaped_hidden_states[0].view(UpperCAmelCase_ ,UpperCAmelCase_ ,height * width ).permute(0 ,2 ,1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,)
def _a ( self ) -> str:
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size ,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
lowercase__ = True
self.check_hidden_states_output(UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ = True
self.check_hidden_states_output(UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ )
def _a ( self ) -> int:
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = 3
lowercase__ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size ,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
lowercase__ = (
config.patch_size
if isinstance(config.patch_size ,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowercase__ = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
lowercase__ = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
lowercase__ = True
self.check_hidden_states_output(UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,(padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ = True
self.check_hidden_states_output(UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,(padded_height, padded_width) )
@slow
def _a ( self ) -> Dict:
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = FocalNetModel.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
def _a ( self ) -> List[Any]:
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = _config_zero_init(UpperCAmelCase_ )
for model_class in self.all_model_classes:
lowercase__ = model_class(config=UpperCAmelCase_ )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() ,[0.0, 1.0] ,msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' ,)
@require_vision
@require_torch
class snake_case (unittest.TestCase ):
@cached_property
def _a ( self ) -> Optional[int]:
# TODO update organization
return AutoImageProcessor.from_pretrained("microsoft/focalnet-tiny" ) if is_vision_available() else None
@slow
def _a ( self ) -> List[str]:
lowercase__ = FocalNetForImageClassification.from_pretrained("microsoft/focalnet-tiny" ).to(UpperCAmelCase_ )
lowercase__ = self.default_image_processor
lowercase__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
lowercase__ = image_processor(images=UpperCAmelCase_ ,return_tensors="pt" ).to(UpperCAmelCase_ )
# forward pass
with torch.no_grad():
lowercase__ = model(**UpperCAmelCase_ )
# verify the logits
lowercase__ = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape ,UpperCAmelCase_ )
lowercase__ = torch.tensor([0.21_66, -0.43_68, 0.21_91] ).to(UpperCAmelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,UpperCAmelCase_ ,atol=1E-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() ,281 )
@require_torch
class snake_case (UpperCamelCase , unittest.TestCase ):
lowerCAmelCase__ :Tuple = (FocalNetBackbone,) if is_torch_available() else ()
lowerCAmelCase__ :int = FocalNetConfig
lowerCAmelCase__ :List[Any] = False
def _a ( self ) -> Optional[int]:
lowercase__ = FocalNetModelTester(self )
| 539
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ : Dict = logging.get_logger(__name__)
a_ : List[str] = {
'studio-ousia/luke-base': 'https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json',
'studio-ousia/luke-large': 'https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json',
}
class _snake_case ( A__ ):
_lowercase : str = '''luke'''
def __init__( self , a=5_0267 , a=50_0000 , a=768 , a=256 , a=12 , a=12 , a=3072 , a="gelu" , a=0.1 , a=0.1 , a=512 , a=2 , a=0.02 , a=1E-12 , a=True , a=None , a=1 , a=0 , a=2 , **a , ) -> Dict:
super().__init__(pad_token_id=a , bos_token_id=a , eos_token_id=a , **a)
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = entity_vocab_size
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = entity_emb_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = type_vocab_size
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = layer_norm_eps
SCREAMING_SNAKE_CASE = use_entity_aware_attention
SCREAMING_SNAKE_CASE = classifier_dropout
| 73
|
import numpy as np
import datasets
SCREAMING_SNAKE_CASE : Optional[int] = "\nCompute the Mahalanobis Distance\n\nMahalonobis distance is the distance between a point and a distribution.\nAnd not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance.\nIt was introduced by Prof. P. C. Mahalanobis in 1936\nand has been used in various statistical applications ever since\n[source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/]\n"
SCREAMING_SNAKE_CASE : Dict = "\\n@article{de2000mahalanobis,\n title={The mahalanobis distance},\n author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L},\n journal={Chemometrics and intelligent laboratory systems},\n volume={50},\n number={1},\n pages={1--18},\n year={2000},\n publisher={Elsevier}\n}\n"
SCREAMING_SNAKE_CASE : List[str] = "\nArgs:\n X: List of datapoints to be compared with the `reference_distribution`.\n reference_distribution: List of datapoints from the reference distribution we want to compare to.\nReturns:\n mahalanobis: The Mahalonobis distance for each datapoint in `X`.\nExamples:\n\n >>> mahalanobis_metric = datasets.load_metric(\"mahalanobis\")\n >>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]])\n >>> print(results)\n {'mahalanobis': array([0.5])}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase ( datasets.Metric ):
def A_ (self ) -> str:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""X""": datasets.Sequence(datasets.Value("""float""" , id="""sequence""" ) , id="""X""" ),
} ) , )
def A_ (self , __UpperCamelCase , __UpperCamelCase ) -> int:
# convert to numpy arrays
UpperCamelCase_ : int = np.array(__UpperCamelCase )
UpperCamelCase_ : Union[str, Any] = np.array(__UpperCamelCase )
# Assert that arrays are 2D
if len(X.shape ) != 2:
raise ValueError("""Expected `X` to be a 2D vector""" )
if len(reference_distribution.shape ) != 2:
raise ValueError("""Expected `reference_distribution` to be a 2D vector""" )
if reference_distribution.shape[0] < 2:
raise ValueError(
"""Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension""" )
# Get mahalanobis distance for each prediction
UpperCamelCase_ : str = X - np.mean(__UpperCamelCase )
UpperCamelCase_ : Dict = np.cov(reference_distribution.T )
try:
UpperCamelCase_ : str = np.linalg.inv(__UpperCamelCase )
except np.linalg.LinAlgError:
UpperCamelCase_ : List[Any] = np.linalg.pinv(__UpperCamelCase )
UpperCamelCase_ : Tuple = np.dot(__UpperCamelCase , __UpperCamelCase )
UpperCamelCase_ : Tuple = np.dot(__UpperCamelCase , X_minus_mu.T ).diagonal()
return {"mahalanobis": mahal_dist}
| 635
| 0
|
import unittest
from transformers import BertGenerationTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowercase_ = "▁"
lowercase_ = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase , unittest.TestCase ):
A : str = BertGenerationTokenizer
A : Dict = False
A : Any = True
def snake_case__ ( self : Any ):
super().setUp()
__snake_case : Dict = BertGenerationTokenizer(_lowerCAmelCase , keep_accents=_lowerCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def snake_case__ ( self : Tuple ):
__snake_case : List[str] = """<s>"""
__snake_case : int = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowerCAmelCase ) , _lowerCAmelCase )
def snake_case__ ( self : List[str] ):
__snake_case : List[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<unk>""" )
self.assertEqual(vocab_keys[1] , """<s>""" )
self.assertEqual(vocab_keys[-1] , """<pad>""" )
self.assertEqual(len(_lowerCAmelCase ) , 10_02 )
def snake_case__ ( self : List[Any] ):
self.assertEqual(self.get_tokenizer().vocab_size , 10_00 )
def snake_case__ ( self : List[str] ):
__snake_case : int = BertGenerationTokenizer(_lowerCAmelCase , keep_accents=_lowerCAmelCase )
__snake_case : Optional[int] = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(_lowerCAmelCase , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_lowerCAmelCase ) , [2_85, 46, 10, 1_70, 3_82] , )
__snake_case : Optional[Any] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
_lowerCAmelCase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
__snake_case : Optional[int] = tokenizer.convert_tokens_to_ids(_lowerCAmelCase )
self.assertListEqual(
_lowerCAmelCase , [8, 21, 84, 55, 24, 19, 7, 0, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
__snake_case : Any = tokenizer.convert_ids_to_tokens(_lowerCAmelCase )
self.assertListEqual(
_lowerCAmelCase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
@cached_property
def snake_case__ ( self : Tuple ):
return BertGenerationTokenizer.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" )
@slow
def snake_case__ ( self : Optional[int] ):
__snake_case : Tuple = """Hello World!"""
__snake_case : Union[str, Any] = [1_85_36, 22_60, 1_01]
self.assertListEqual(_lowerCAmelCase , self.big_tokenizer.encode(_lowerCAmelCase ) )
@slow
def snake_case__ ( self : Dict ):
__snake_case : Any = (
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"""
)
__snake_case : List[Any] = [
8_71,
4_19,
3_58,
9_46,
9_91,
25_21,
4_52,
3_58,
13_57,
3_87,
77_51,
35_36,
1_12,
9_85,
4_56,
1_26,
8_65,
9_38,
54_00,
57_34,
4_58,
13_68,
4_67,
7_86,
24_62,
52_46,
11_59,
6_33,
8_65,
45_19,
4_57,
5_82,
8_52,
25_57,
4_27,
9_16,
5_08,
4_05,
3_43_24,
4_97,
3_91,
4_08,
1_13_42,
12_44,
3_85,
1_00,
9_38,
9_85,
4_56,
5_74,
3_62,
1_25_97,
32_00,
31_29,
11_72,
]
self.assertListEqual(_lowerCAmelCase , self.big_tokenizer.encode(_lowerCAmelCase ) )
@require_torch
@slow
def snake_case__ ( self : Any ):
import torch
from transformers import BertGenerationConfig, BertGenerationEncoder
# Build sequence
__snake_case : Dict = list(self.big_tokenizer.get_vocab().keys() )[:10]
__snake_case : List[str] = """ """.join(_lowerCAmelCase )
__snake_case : List[Any] = self.big_tokenizer.encode_plus(_lowerCAmelCase , return_tensors="""pt""" , return_token_type_ids=_lowerCAmelCase )
__snake_case : Any = self.big_tokenizer.batch_encode_plus(
[sequence + """ """ + sequence] , return_tensors="""pt""" , return_token_type_ids=_lowerCAmelCase )
__snake_case : List[Any] = BertGenerationConfig()
__snake_case : Any = BertGenerationEncoder(_lowerCAmelCase )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**_lowerCAmelCase )
model(**_lowerCAmelCase )
@slow
def snake_case__ ( self : List[Any] ):
# fmt: off
__snake_case : str = {"""input_ids""": [[3_92_86, 4_58, 3_63_35, 20_01, 4_56, 1_30_73, 1_32_66, 4_55, 1_13, 77_46, 17_41, 1_11_57, 3_91, 1_30_73, 1_32_66, 4_55, 1_13, 39_67, 3_54_12, 1_13, 49_36, 1_09, 38_70, 23_77, 1_13, 3_00_84, 4_57_20, 4_58, 1_34, 1_74_96, 1_12, 5_03, 1_16_72, 1_13, 1_18, 1_12, 56_65, 1_33_47, 3_86_87, 1_12, 14_96, 3_13_89, 1_12, 32_68, 4_72_64, 1_34, 9_62, 1_12, 1_63_77, 80_35, 2_31_30, 4_30, 1_21_69, 1_55_18, 2_85_92, 4_58, 1_46, 4_16_97, 1_09, 3_91, 1_21_69, 1_55_18, 1_66_89, 4_58, 1_46, 4_13_58, 1_09, 4_52, 7_26, 40_34, 1_11, 7_63, 3_54_12, 50_82, 3_88, 19_03, 1_11, 90_51, 3_91, 28_70, 4_89_18, 19_00, 11_23, 5_50, 9_98, 1_12, 95_86, 1_59_85, 4_55, 3_91, 4_10, 2_29_55, 3_76_36, 1_14], [4_48, 1_74_96, 4_19, 36_63, 3_85, 7_63, 1_13, 2_75_33, 28_70, 32_83, 1_30_43, 16_39, 2_47_13, 5_23, 6_56, 2_40_13, 1_85_50, 25_21, 5_17, 2_70_14, 2_12_44, 4_20, 12_12, 14_65, 3_91, 9_27, 48_33, 3_88, 5_78, 1_17_86, 1_14, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [4_84, 21_69, 76_87, 2_19_32, 1_81_46, 7_26, 3_63, 1_70_32, 33_91, 1_14, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_lowerCAmelCase , model_name="""google/bert_for_seq_generation_L-24_bbc_encoder""" , revision="""c817d1fd1be2ffa69431227a1fe320544943d4db""" , )
| 390
|
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase , __UpperCamelCase ):
@register_to_config
def __init__( self : int , _lowerCAmelCase : int = 7_68 , ):
super().__init__()
__snake_case : Optional[Any] = nn.Parameter(torch.zeros(1 , _lowerCAmelCase ) )
__snake_case : str = nn.Parameter(torch.ones(1 , _lowerCAmelCase ) )
def snake_case__ ( self : str , _lowerCAmelCase : Optional[Union[str, torch.device]] = None , _lowerCAmelCase : Optional[torch.dtype] = None , ):
__snake_case : int = nn.Parameter(self.mean.to(_lowerCAmelCase ).to(_lowerCAmelCase ) )
__snake_case : List[str] = nn.Parameter(self.std.to(_lowerCAmelCase ).to(_lowerCAmelCase ) )
return self
def snake_case__ ( self : Any , _lowerCAmelCase : Optional[Any] ):
__snake_case : Optional[int] = (embeds - self.mean) * 1.0 / self.std
return embeds
def snake_case__ ( self : Optional[Any] , _lowerCAmelCase : Optional[int] ):
__snake_case : Optional[int] = (embeds * self.std) + self.mean
return embeds
| 390
| 1
|
'''simple docstring'''
from __future__ import annotations
import typing
from collections import Counter
def a__ ( UpperCamelCase_ : int ):
UpperCAmelCase__ :typing.Counter[int] = Counter()
for base in range(1, max_perimeter + 1 ):
for perpendicular in range(UpperCamelCase_, max_perimeter + 1 ):
UpperCAmelCase__ :Tuple = (base * base + perpendicular * perpendicular) ** 0.5
if hypotenuse == int(UpperCamelCase_ ):
UpperCAmelCase__ :List[str] = int(base + perpendicular + hypotenuse )
if perimeter > max_perimeter:
continue
triplets[perimeter] += 1
return triplets
def a__ ( UpperCamelCase_ : int = 1_000 ):
UpperCAmelCase__ :List[str] = pythagorean_triple(UpperCamelCase_ )
return triplets.most_common(1 )[0][0]
if __name__ == "__main__":
print(F'''Perimeter {solution()} has maximum solutions''')
| 467
|
'''simple docstring'''
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
__lowerCamelCase = Lock()
def a__ ( UpperCamelCase_ : str, UpperCamelCase_ : Any, UpperCamelCase_ : Any, UpperCamelCase_ : int, UpperCamelCase_ : Union[str, Any], UpperCamelCase_ : List[str], UpperCamelCase_ : Optional[int] ):
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0, 10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(UpperCamelCase_ )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
UpperCAmelCase__ :Union[str, Any] = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
UpperCAmelCase__ :List[Any] = min(UpperCamelCase_, UpperCamelCase_ )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(UpperCamelCase_ )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
UpperCAmelCase__ :List[str] = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
UpperCAmelCase__ :List[Any] = max(UpperCamelCase_, UpperCamelCase_ )
# after all swaps are performed, send the values back to main
result_pipe[1].send(UpperCamelCase_ )
def a__ ( UpperCamelCase_ : int ):
UpperCAmelCase__ :List[str] = []
UpperCAmelCase__ :Any = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
UpperCAmelCase__ :str = Pipe()
UpperCAmelCase__ :Dict = Pipe()
process_array_.append(
Process(
target=UpperCamelCase_, args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]), ) )
UpperCAmelCase__ :Optional[Any] = temp_rs
UpperCAmelCase__ :Any = temp_rr
for i in range(1, len(UpperCamelCase_ ) - 1 ):
UpperCAmelCase__ :Optional[int] = Pipe()
UpperCAmelCase__ :List[str] = Pipe()
process_array_.append(
Process(
target=UpperCamelCase_, args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]), ) )
UpperCAmelCase__ :Optional[Any] = temp_rs
UpperCAmelCase__ :str = temp_rr
process_array_.append(
Process(
target=UpperCamelCase_, args=(
len(UpperCamelCase_ ) - 1,
arr[len(UpperCamelCase_ ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(UpperCamelCase_ ) - 1],
), ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0, len(UpperCamelCase_ ) ):
UpperCAmelCase__ :List[str] = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def a__ ( ):
UpperCAmelCase__ :Optional[Any] = list(range(10, 0, -1 ) )
print('''Initial List''' )
print(*UpperCamelCase_ )
UpperCAmelCase__ :str = odd_even_transposition(UpperCamelCase_ )
print('''Sorted List\n''' )
print(*UpperCamelCase_ )
if __name__ == "__main__":
main()
| 467
| 1
|
import baseaa
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Any ):
'''simple docstring'''
return baseaa.aaaencode(string.encode("utf-8" ) )
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: List[str] ):
'''simple docstring'''
return baseaa.aaadecode(A_ ).decode("utf-8" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 710
|
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
# Register SEW's fairseq modules
from sew_asapp import tasks # noqa: F401
from transformers import (
SEWConfig,
SEWForCTC,
SEWModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"""post_extract_proj""": """feature_projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.upsample.0""": """encoder.upsample.projection""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """layer_norm""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Tuple , __lowerCamelCase: List[str] , __lowerCamelCase: int , __lowerCamelCase: Optional[Any] , __lowerCamelCase: Union[str, Any] ):
'''simple docstring'''
for attribute in key.split("." ):
lowercase_ = getattr(__lowerCamelCase , __lowerCamelCase )
if weight_type is not None:
lowercase_ = getattr(__lowerCamelCase , __lowerCamelCase ).shape
else:
lowercase_ = hf_pointer.shape
assert hf_shape == value.shape, (
F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
F' {value.shape} for {full_name}'
)
if weight_type == "weight":
lowercase_ = value
elif weight_type == "weight_g":
lowercase_ = value
elif weight_type == "weight_v":
lowercase_ = value
elif weight_type == "bias":
lowercase_ = value
else:
lowercase_ = value
logger.info(F'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: List[Any] , __lowerCamelCase: Optional[int] , __lowerCamelCase: Any ):
'''simple docstring'''
lowercase_ = []
lowercase_ = fairseq_model.state_dict()
lowercase_ = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
lowercase_ = False
if "conv_layers" in name:
load_conv_layer(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , hf_model.config.feat_extract_norm == "group" , )
lowercase_ = True
else:
for key, mapped_key in MAPPING.items():
lowercase_ = "sew." + mapped_key if (is_finetuned and mapped_key != "lm_head") else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
lowercase_ = True
if "*" in mapped_key:
lowercase_ = name.split(__lowerCamelCase )[0].split("." )[-2]
lowercase_ = mapped_key.replace("*" , __lowerCamelCase )
if "weight_g" in name:
lowercase_ = "weight_g"
elif "weight_v" in name:
lowercase_ = "weight_v"
elif "weight" in name:
lowercase_ = "weight"
elif "bias" in name:
lowercase_ = "bias"
else:
lowercase_ = None
set_recursively(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
continue
if not is_used:
unused_weights.append(__lowerCamelCase )
logger.warning(F'Unused weights: {unused_weights}' )
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: List[str] , __lowerCamelCase: List[Any] , __lowerCamelCase: List[Any] , __lowerCamelCase: List[Any] , __lowerCamelCase: Dict ):
'''simple docstring'''
lowercase_ = full_name.split("conv_layers." )[-1]
lowercase_ = name.split("." )
lowercase_ = int(items[0] )
lowercase_ = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
lowercase_ = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
lowercase_ = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
lowercase_ = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
lowercase_ = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(__lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Optional[Any] , __lowerCamelCase: Any ):
'''simple docstring'''
lowercase_ = SEWConfig()
if is_finetuned:
lowercase_ = model.wav_encoder.wav_model.cfg
else:
lowercase_ = model.cfg
lowercase_ = fs_config.conv_bias
lowercase_ = eval(fs_config.conv_feature_layers )
lowercase_ = [x[0] for x in conv_layers]
lowercase_ = [x[1] for x in conv_layers]
lowercase_ = [x[2] for x in conv_layers]
lowercase_ = "gelu"
lowercase_ = "layer" if fs_config.extractor_mode == "layer_norm" else "group"
lowercase_ = 0.0
lowercase_ = fs_config.activation_fn.name
lowercase_ = fs_config.encoder_embed_dim
lowercase_ = 0.02
lowercase_ = fs_config.encoder_ffn_embed_dim
lowercase_ = 1E-5
lowercase_ = fs_config.encoder_layerdrop
lowercase_ = fs_config.encoder_attention_heads
lowercase_ = fs_config.conv_pos_groups
lowercase_ = fs_config.conv_pos
lowercase_ = len(__lowerCamelCase )
lowercase_ = fs_config.encoder_layers
lowercase_ = fs_config.squeeze_factor
# take care of any params that are overridden by the Wav2VecCtc model
if is_finetuned:
lowercase_ = model.cfg
lowercase_ = fs_config.final_dropout
lowercase_ = fs_config.layerdrop
lowercase_ = fs_config.activation_dropout
lowercase_ = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0
lowercase_ = fs_config.attention_dropout
lowercase_ = fs_config.dropout_input
lowercase_ = fs_config.dropout
lowercase_ = fs_config.mask_channel_length
lowercase_ = fs_config.mask_channel_prob
lowercase_ = fs_config.mask_length
lowercase_ = fs_config.mask_prob
lowercase_ = "Wav2Vec2FeatureExtractor"
lowercase_ = "Wav2Vec2CTCTokenizer"
return config
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Union[str, Any] , __lowerCamelCase: Tuple , __lowerCamelCase: Tuple=None , __lowerCamelCase: List[Any]=None , __lowerCamelCase: str=True ):
'''simple docstring'''
if is_finetuned:
lowercase_ , lowercase_ , lowercase_ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
else:
lowercase_ , lowercase_ , lowercase_ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
if config_path is not None:
lowercase_ = SEWConfig.from_pretrained(__lowerCamelCase )
else:
lowercase_ = convert_config(model[0] , __lowerCamelCase )
lowercase_ = model[0].eval()
lowercase_ = True if config.feat_extract_norm == "layer" else False
lowercase_ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=__lowerCamelCase , return_attention_mask=__lowerCamelCase , )
if is_finetuned:
if dict_path:
lowercase_ = Dictionary.load(__lowerCamelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
lowercase_ = target_dict.pad_index
lowercase_ = target_dict.bos_index
lowercase_ = target_dict.pad_index
lowercase_ = target_dict.bos_index
lowercase_ = target_dict.eos_index
lowercase_ = len(target_dict.symbols )
lowercase_ = os.path.join(__lowerCamelCase , "vocab.json" )
if not os.path.isdir(__lowerCamelCase ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(__lowerCamelCase ) )
return
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
with open(__lowerCamelCase , "w" , encoding="utf-8" ) as vocab_handle:
json.dump(target_dict.indices , __lowerCamelCase )
lowercase_ = WavaVecaCTCTokenizer(
__lowerCamelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=__lowerCamelCase , )
lowercase_ = WavaVecaProcessor(feature_extractor=__lowerCamelCase , tokenizer=__lowerCamelCase )
processor.save_pretrained(__lowerCamelCase )
lowercase_ = SEWForCTC(__lowerCamelCase )
else:
lowercase_ = SEWModel(__lowerCamelCase )
feature_extractor.save_pretrained(__lowerCamelCase )
recursively_load_weights(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
hf_model.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--is_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_sew_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned
)
| 601
| 0
|
"""simple docstring"""
from __future__ import annotations
import math
def a__ ( lowerCAmelCase__ ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowerCAmelCase__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
lowerCamelCase = [num for num in range(3, 100_001, 2) if not is_prime(num)]
def a__ ( lowerCAmelCase__ ):
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise ValueError("n must be an integer" )
if n <= 0:
raise ValueError("n must be >= 0" )
UpperCAmelCase_ = []
for num in range(len(lowerCAmelCase__ ) ):
UpperCAmelCase_ = 0
while 2 * i * i <= odd_composites[num]:
UpperCAmelCase_ = odd_composites[num] - 2 * i * i
if is_prime(lowerCAmelCase__ ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(lowerCAmelCase__ ) == n:
return list_nums
return []
def a__ ( ):
return compute_nums(1 )[0]
if __name__ == "__main__":
print(F"{solution() = }")
| 82
|
'''simple docstring'''
import numpy as np
# Importing the Keras libraries and packages
import tensorflow as tf
from tensorflow.keras import layers, models
if __name__ == "__main__":
# Initialising the CNN
# (Sequential- Building the model layer by layer)
__UpperCamelCase : Union[str, Any] = models.Sequential()
# Step 1 - Convolution
# Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel
# (3,3) is the kernel size (filter matrix)
classifier.add(
layers.ConvaD(32, (3, 3), input_shape=(64, 64, 3), activation="""relu""")
)
# Step 2 - Pooling
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Adding a second convolutional layer
classifier.add(layers.ConvaD(32, (3, 3), activation="""relu"""))
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Step 3 - Flattening
classifier.add(layers.Flatten())
# Step 4 - Full connection
classifier.add(layers.Dense(units=128, activation="""relu"""))
classifier.add(layers.Dense(units=1, activation="""sigmoid"""))
# Compiling the CNN
classifier.compile(
optimizer="""adam""", loss="""binary_crossentropy""", metrics=["""accuracy"""]
)
# Part 2 - Fitting the CNN to the images
# Load Trained model weights
# from keras.models import load_model
# regressor=load_model('cnn.h5')
__UpperCamelCase : Optional[Any] = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1.0 / 255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True
)
__UpperCamelCase : Union[str, Any] = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 255)
__UpperCamelCase : Any = train_datagen.flow_from_directory(
"""dataset/training_set""", target_size=(64, 64), batch_size=32, class_mode="""binary"""
)
__UpperCamelCase : str = test_datagen.flow_from_directory(
"""dataset/test_set""", target_size=(64, 64), batch_size=32, class_mode="""binary"""
)
classifier.fit_generator(
training_set, steps_per_epoch=5, epochs=30, validation_data=test_set
)
classifier.save("""cnn.h5""")
# Part 3 - Making new predictions
__UpperCamelCase : Optional[Any] = tf.keras.preprocessing.image.load_img(
"""dataset/single_prediction/image.png""", target_size=(64, 64)
)
__UpperCamelCase : List[Any] = tf.keras.preprocessing.image.img_to_array(test_image)
__UpperCamelCase : Dict = np.expand_dims(test_image, axis=0)
__UpperCamelCase : Optional[Any] = classifier.predict(test_image)
# training_set.class_indices
if result[0][0] == 0:
__UpperCamelCase : Optional[int] = """Normal"""
if result[0][0] == 1:
__UpperCamelCase : Optional[int] = """Abnormality detected"""
| 448
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__lowerCAmelCase : List[Any] = {
'configuration_conditional_detr': [
'CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP',
'ConditionalDetrConfig',
'ConditionalDetrOnnxConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Any = ['ConditionalDetrFeatureExtractor']
__lowerCAmelCase : str = ['ConditionalDetrImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Optional[int] = [
'CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConditionalDetrForObjectDetection',
'ConditionalDetrForSegmentation',
'ConditionalDetrModel',
'ConditionalDetrPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
ConditionalDetrConfig,
ConditionalDetrOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
from .image_processing_conditional_detr import ConditionalDetrImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrModel,
ConditionalDetrPreTrainedModel,
)
else:
import sys
__lowerCAmelCase : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 709
|
from __future__ import annotations
def UpperCAmelCase_ ( __lowerCAmelCase ) -> int:
if not nums:
return 0
__lowercase : List[Any] = nums[0]
__lowercase : Union[str, Any] = 0
for num in nums[1:]:
__lowercase , __lowercase : Dict = (
max_excluding + num,
max(__lowerCAmelCase , __lowerCAmelCase ),
)
return max(__lowerCAmelCase , __lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 284
| 0
|
"""simple docstring"""
import string
def A_ ( snake_case_ : str ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = """"""
for i in sequence:
UpperCamelCase : int = ord(snake_case_ )
if 6_5 <= extract <= 9_0:
output += chr(1_5_5 - extract )
elif 9_7 <= extract <= 1_2_2:
output += chr(2_1_9 - extract )
else:
output += i
return output
def A_ ( snake_case_ : str ):
'''simple docstring'''
UpperCamelCase : str = string.ascii_letters
UpperCamelCase : Any = string.ascii_lowercase[::-1] + string.ascii_uppercase[::-1]
return "".join(
letters_reversed[letters.index(snake_case_ )] if c in letters else c for c in sequence )
def A_ ( ):
'''simple docstring'''
from timeit import timeit
print("""Running performance benchmarks...""" )
UpperCamelCase : str = """from string import printable ; from __main__ import atbash, atbash_slow"""
print(f'> atbash_slow(): {timeit("atbash_slow(printable)" ,setup=snake_case_ )} seconds' )
print(f'> atbash(): {timeit("atbash(printable)" ,setup=snake_case_ )} seconds' )
if __name__ == "__main__":
for example in ("ABCDEFGH", "123GGjj", "testStringtest", "with space"):
print(F'''{example} encrypted in atbash: {atbash(example)}''')
benchmark()
| 499
|
"""simple docstring"""
from PIL import Image
def A_ ( snake_case_ : Image ,snake_case_ : int ):
'''simple docstring'''
UpperCamelCase : Optional[int] = (2_5_9 * (level + 2_5_5)) / (2_5_5 * (2_5_9 - level))
def contrast(snake_case_ : int ) -> int:
return int(1_2_8 + factor * (c - 1_2_8) )
return img.point(snake_case_ )
if __name__ == "__main__":
# Load image
with Image.open('''image_data/lena.jpg''') as img:
# Change contrast to 170
__A : Optional[int] = change_contrast(img, 170)
cont_img.save('''image_data/lena_high_contrast.png''', format='''png''')
| 499
| 1
|
from __future__ import annotations
def __lowerCamelCase ( _lowercase ) -> Union[str, Any]:
UpperCamelCase = len(UpperCamelCase__ )
# We need to create solution object to save path.
UpperCamelCase = [[0 for _ in range(UpperCamelCase__ )] for _ in range(UpperCamelCase__ )]
UpperCamelCase = run_maze(UpperCamelCase__ , 0 , 0 , UpperCamelCase__ )
if solved:
print('\n'.join(str(UpperCamelCase__ ) for row in solutions ) )
else:
print('No solution exists!' )
return solved
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase , _lowercase ) -> str:
UpperCamelCase = len(UpperCamelCase__ )
# Final check point.
if i == j == (size - 1):
UpperCamelCase = 1
return True
UpperCamelCase = (not i < 0) and (not j < 0) # Check lower bounds
UpperCamelCase = (i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
UpperCamelCase = (not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
UpperCamelCase = 1
# check for directions
if (
run_maze(UpperCamelCase__ , i + 1 , UpperCamelCase__ , UpperCamelCase__ )
or run_maze(UpperCamelCase__ , UpperCamelCase__ , j + 1 , UpperCamelCase__ )
or run_maze(UpperCamelCase__ , i - 1 , UpperCamelCase__ , UpperCamelCase__ )
or run_maze(UpperCamelCase__ , UpperCamelCase__ , j - 1 , UpperCamelCase__ )
):
return True
UpperCamelCase = 0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 715
|
import cva
import numpy as np
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : int ):
"""simple docstring"""
if k in (0.04, 0.06):
UpperCamelCase = k
UpperCamelCase = window_size
else:
raise ValueError('invalid k value' )
def __str__( self : Any ):
"""simple docstring"""
return str(self.k )
def __lowerCAmelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : str ):
"""simple docstring"""
UpperCamelCase = cva.imread(SCREAMING_SNAKE_CASE__ , 0 )
UpperCamelCase , UpperCamelCase = img.shape
UpperCamelCase = []
UpperCamelCase = img.copy()
UpperCamelCase = cva.cvtColor(SCREAMING_SNAKE_CASE__ , cva.COLOR_GRAY2RGB )
UpperCamelCase , UpperCamelCase = np.gradient(SCREAMING_SNAKE_CASE__ )
UpperCamelCase = dx**2
UpperCamelCase = dy**2
UpperCamelCase = dx * dy
UpperCamelCase = 0.04
UpperCamelCase = self.window_size // 2
for y in range(SCREAMING_SNAKE_CASE__ , h - offset ):
for x in range(SCREAMING_SNAKE_CASE__ , w - offset ):
UpperCamelCase = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
UpperCamelCase = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
UpperCamelCase = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
UpperCamelCase = (wxx * wyy) - (wxy**2)
UpperCamelCase = wxx + wyy
UpperCamelCase = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 2_55 )
return color_img, corner_list
if __name__ == "__main__":
_snake_case = HarrisCorner(0.04, 3)
_snake_case , _snake_case = edge_detect.detect('''path_to_image''')
cva.imwrite('''detect.png''', color_img)
| 170
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase_ : Any = {'''configuration_opt''': ['''OPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''OPTConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Dict = [
'''OPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''OPTForCausalLM''',
'''OPTModel''',
'''OPTPreTrainedModel''',
'''OPTForSequenceClassification''',
'''OPTForQuestionAnswering''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Optional[Any] = ['''TFOPTForCausalLM''', '''TFOPTModel''', '''TFOPTPreTrainedModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : str = [
'''FlaxOPTForCausalLM''',
'''FlaxOPTModel''',
'''FlaxOPTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_opt import (
OPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OPTForCausalLM,
OPTForQuestionAnswering,
OPTForSequenceClassification,
OPTModel,
OPTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel
else:
import sys
UpperCAmelCase_ : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 17
|
from PIL import Image
def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ):
def brightness(lowerCAmelCase__ ) -> float:
return 128 + level + (c - 128)
if not -255.0 <= level <= 255.0:
raise ValueError("""level must be between -255.0 (black) and 255.0 (white)""" )
return img.point(lowerCAmelCase__ )
if __name__ == "__main__":
# Load image
with Image.open('''image_data/lena.jpg''') as img:
# Change brightness to 100
__SCREAMING_SNAKE_CASE : int =change_brightness(img, 100)
brigt_img.save('''image_data/lena_brightness.png''', format='''png''')
| 428
| 0
|
import random
import sys
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.colors import ListedColormap
__SCREAMING_SNAKE_CASE : int = '''Usage of script: script_name <size_of_canvas:int>'''
__SCREAMING_SNAKE_CASE : List[str] = [0] * 100 + [1] * 10
random.shuffle(choice)
def snake_case_ ( lowercase__ : int ):
'''simple docstring'''
_lowerCAmelCase =[[False for i in range(lowercase__ )] for j in range(lowercase__ )]
return canvas
def snake_case_ ( lowercase__ : list[list[bool]] ):
'''simple docstring'''
for i, row in enumerate(lowercase__ ):
for j, _ in enumerate(lowercase__ ):
_lowerCAmelCase =bool(random.getrandbits(1 ) )
def snake_case_ ( lowercase__ : list[list[bool]] ):
'''simple docstring'''
_lowerCAmelCase =np.array(lowercase__ )
_lowerCAmelCase =np.array(create_canvas(current_canvas.shape[0] ) )
for r, row in enumerate(lowercase__ ):
for c, pt in enumerate(lowercase__ ):
_lowerCAmelCase =__judge_point(
lowercase__ , current_canvas[r - 1 : r + 2, c - 1 : c + 2] )
_lowerCAmelCase =next_gen_canvas
del next_gen_canvas # cleaning memory as we move on.
_lowerCAmelCase =current_canvas.tolist()
return return_canvas
def snake_case_ ( lowercase__ : bool , lowercase__ : list[list[bool]] ):
'''simple docstring'''
_lowerCAmelCase =0
_lowerCAmelCase =0
# finding dead or alive neighbours count.
for i in neighbours:
for status in i:
if status:
alive += 1
else:
dead += 1
# handling duplicate entry for focus pt.
if pt:
alive -= 1
else:
dead -= 1
# running the rules of game here.
_lowerCAmelCase =pt
if pt:
if alive < 2:
_lowerCAmelCase =False
elif alive == 2 or alive == 3:
_lowerCAmelCase =True
elif alive > 3:
_lowerCAmelCase =False
else:
if alive == 3:
_lowerCAmelCase =True
return state
if __name__ == "__main__":
if len(sys.argv) != 2:
raise Exception(usage_doc)
__SCREAMING_SNAKE_CASE : str = int(sys.argv[1])
# main working structure of this module.
__SCREAMING_SNAKE_CASE : List[str] = create_canvas(canvas_size)
seed(c)
__SCREAMING_SNAKE_CASE : str = plt.subplots()
fig.show()
__SCREAMING_SNAKE_CASE : str = ListedColormap(['''w''', '''k'''])
try:
while True:
__SCREAMING_SNAKE_CASE : Union[str, Any] = run(c)
ax.matshow(c, cmap=cmap)
fig.canvas.draw()
ax.cla()
except KeyboardInterrupt:
# do nothing.
pass
| 713
|
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def snake_case_ ( lowercase__ : Optional[int] , lowercase__ : Any , lowercase__ : Dict , lowercase__ : int ):
'''simple docstring'''
for param, grad_param in zip(model_a.parameters() , model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is False
), f"Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})"
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is True
), f"Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})"
def snake_case_ ( lowercase__ : int , lowercase__ : int , lowercase__ : str , lowercase__ : Optional[int] , lowercase__ : Any=True ):
'''simple docstring'''
model.train()
_lowerCAmelCase =model(lowercase__ )
_lowerCAmelCase =F.mse_loss(lowercase__ , target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(lowercase__ )
def snake_case_ ( lowercase__ : Dict , lowercase__ : List[str]=False ):
'''simple docstring'''
set_seed(42 )
_lowerCAmelCase =RegressionModel()
_lowerCAmelCase =deepcopy(lowercase__ )
_lowerCAmelCase =RegressionDataset(length=80 )
_lowerCAmelCase =DataLoader(lowercase__ , batch_size=16 )
model.to(accelerator.device )
if sched:
_lowerCAmelCase =AdamW(params=model.parameters() , lr=1e-3 )
_lowerCAmelCase =AdamW(params=ddp_model.parameters() , lr=1e-3 )
_lowerCAmelCase =LambdaLR(lowercase__ , lr_lambda=lambda lowercase__ : epoch**0.6_5 )
_lowerCAmelCase =LambdaLR(lowercase__ , lr_lambda=lambda lowercase__ : epoch**0.6_5 )
# Make a copy of `model`
if sched:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =accelerator.prepare(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
else:
_lowerCAmelCase , _lowerCAmelCase =accelerator.prepare(lowercase__ , lowercase__ )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def snake_case_ ( lowercase__ : Union[str, Any] ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =get_training_setup(lowercase__ )
# Use a single batch
_lowerCAmelCase , _lowerCAmelCase =next(iter(lowercase__ ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
_lowerCAmelCase , _lowerCAmelCase =accelerator.gather((ddp_input, ddp_target) )
_lowerCAmelCase , _lowerCAmelCase =input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(lowercase__ ):
step_model(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
else:
# Sync grads
step_model(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad ), f"Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"
# Shuffle ddp_input on each iteration
torch.manual_seed(13_37 + iteration )
_lowerCAmelCase =ddp_input[torch.randperm(len(lowercase__ ) )]
def snake_case_ ( lowercase__ : List[str] ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =get_training_setup(lowercase__ )
# Use a single batch
_lowerCAmelCase , _lowerCAmelCase =next(iter(lowercase__ ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
_lowerCAmelCase , _lowerCAmelCase =accelerator.gather((ddp_input, ddp_target) )
_lowerCAmelCase , _lowerCAmelCase =input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(lowercase__ ):
step_model(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
else:
# Sync grads
step_model(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), f"Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})"
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), f"Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"
# Shuffle ddp_input on each iteration
torch.manual_seed(13_37 + iteration )
_lowerCAmelCase =ddp_input[torch.randperm(len(lowercase__ ) )]
def snake_case_ ( lowercase__ : Optional[Any]=False , lowercase__ : List[str]=False ):
'''simple docstring'''
_lowerCAmelCase =Accelerator(
split_batches=lowercase__ , dispatch_batches=lowercase__ , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =get_training_setup(lowercase__ )
for iteration, batch in enumerate(lowercase__ ):
_lowerCAmelCase , _lowerCAmelCase =batch.values()
# Gather the distributed inputs and targs for the base model
_lowerCAmelCase , _lowerCAmelCase =accelerator.gather((ddp_input, ddp_target) )
_lowerCAmelCase , _lowerCAmelCase =input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(lowercase__ ):
step_model(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(lowercase__ ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), f"Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), f"Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})"
# Shuffle ddp_input on each iteration
torch.manual_seed(13_37 + iteration )
_lowerCAmelCase =ddp_input[torch.randperm(len(lowercase__ ) )]
GradientState._reset_state()
def snake_case_ ( lowercase__ : int=False , lowercase__ : Dict=False ):
'''simple docstring'''
_lowerCAmelCase =Accelerator(
split_batches=lowercase__ , dispatch_batches=lowercase__ , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =get_training_setup(lowercase__ , lowercase__ )
for iteration, batch in enumerate(lowercase__ ):
_lowerCAmelCase , _lowerCAmelCase =batch.values()
# Gather the distributed inputs and targs for the base model
_lowerCAmelCase , _lowerCAmelCase =accelerator.gather((ddp_input, ddp_target) )
_lowerCAmelCase , _lowerCAmelCase =input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(lowercase__ )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(lowercase__ ):
step_model(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), f"Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]['lr']}\nDDP opt: {ddp_opt.param_groups[0]['lr']}\n"
_lowerCAmelCase =(((iteration + 1) % 2) == 0) or ((iteration + 1) == len(lowercase__ ))
if accelerator.num_processes > 1:
check_model_parameters(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
# Shuffle ddp_input on each iteration
torch.manual_seed(13_37 + iteration )
GradientState._reset_state()
def snake_case_ ( ):
'''simple docstring'''
_lowerCAmelCase =Accelerator()
_lowerCAmelCase =RegressionDataset(length=80 )
_lowerCAmelCase =DataLoader(lowercase__ , batch_size=16 )
_lowerCAmelCase =RegressionDataset(length=96 )
_lowerCAmelCase =DataLoader(lowercase__ , batch_size=16 )
_lowerCAmelCase , _lowerCAmelCase =accelerator.prepare(lowercase__ , lowercase__ )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(lowercase__ ):
assert id(accelerator.gradient_state.active_dataloader ) == id(lowercase__ )
if iteration < len(lowercase__ ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(lowercase__ ):
assert id(accelerator.gradient_state.active_dataloader ) == id(lowercase__ )
if batch_num < len(lowercase__ ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def snake_case_ ( ):
'''simple docstring'''
_lowerCAmelCase =Accelerator()
_lowerCAmelCase =accelerator.state
if state.local_process_index == 0:
print("""**Test `accumulate` gradient accumulation with dataloader break**""" )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print("""**Test NOOP `no_sync` context manager**""" )
test_noop_sync(lowercase__ )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print("""**Test Distributed `no_sync` context manager**""" )
test_distributed_sync(lowercase__ )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation, """ , f"`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**" , )
test_gradient_accumulation(lowercase__ , lowercase__ )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version("""<""" , """2.0""" ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation with optimizer and scheduler, """ , """`split_batches=False`, `dispatch_batches=False`**""" , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation with optimizer and scheduler, """ , f"`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**" , )
test_gradient_accumulation_with_opt_and_scheduler(lowercase__ , lowercase__ )
def snake_case_ ( lowercase__ : Tuple ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 149
| 0
|
import warnings
from ...utils import logging
from .image_processing_deit import DeiTImageProcessor
_a : Optional[Any] = logging.get_logger(__name__)
class UpperCamelCase_ ( __UpperCamelCase ):
"""simple docstring"""
def __init__( self , *UpperCAmelCase , **UpperCAmelCase ):
warnings.warn(
"""The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use DeiTImageProcessor instead.""" , UpperCAmelCase , )
super().__init__(*UpperCAmelCase , **UpperCAmelCase )
| 479
|
from transformers import BertTokenizerFast
from .custom_tokenization import CustomTokenizer
class UpperCamelCase_ ( __UpperCamelCase ):
"""simple docstring"""
A = CustomTokenizer
pass
| 479
| 1
|
from __future__ import annotations
import math
def _A ( __snake_case :int , __snake_case :int , __snake_case :bool , __snake_case :list[int] , __snake_case :float ) -> int:
"""simple docstring"""
if depth < 0:
raise ValueError("Depth cannot be less than 0" )
if not scores:
raise ValueError("Scores cannot be empty" )
if depth == height:
return scores[node_index]
return (
max(
minimax(depth + 1 , node_index * 2 , __snake_case , __snake_case , __snake_case ) , minimax(depth + 1 , node_index * 2 + 1 , __snake_case , __snake_case , __snake_case ) , )
if is_max
else min(
minimax(depth + 1 , node_index * 2 , __snake_case , __snake_case , __snake_case ) , minimax(depth + 1 , node_index * 2 + 1 , __snake_case , __snake_case , __snake_case ) , )
)
def _A ( ) -> None:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = [90, 23, 6, 33, 21, 65, 123, 3_4423]
__SCREAMING_SNAKE_CASE = math.log(len(__snake_case ) , 2 )
print(f'''Optimal value : {minimax(0 , 0 , __snake_case , __snake_case , __snake_case )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 214
|
import re
def _A ( __snake_case :str ) -> str:
"""simple docstring"""
if len(re.findall("[ATCG]" , __snake_case ) ) != len(__snake_case ):
raise ValueError("Invalid Strand" )
return dna.translate(dna.maketrans("ATCG" , "TAGC" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 214
| 1
|
'''simple docstring'''
def _lowercase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(__lowercase , int(b / 2 ) ) * actual_power(__lowercase , int(b / 2 ) )
else:
return a * actual_power(__lowercase , int(b / 2 ) ) * actual_power(__lowercase , int(b / 2 ) )
def _lowercase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if b < 0:
return 1 / actual_power(__lowercase , __lowercase )
return actual_power(__lowercase , __lowercase )
if __name__ == "__main__":
print(power(-2, -3))
| 111
|
'''simple docstring'''
from __future__ import annotations
import math
import random
from typing import Any
class snake_case :
"""simple docstring"""
def __init__( self : Optional[int] ):
__UpperCamelCase = []
__UpperCamelCase = 0
__UpperCamelCase = 0
def _lowerCamelCase ( self : Dict ):
return self.head == self.tail
def _lowerCamelCase ( self : int , __A : Any ):
self.data.append(__A )
__UpperCamelCase = self.tail + 1
def _lowerCamelCase ( self : Tuple ):
__UpperCamelCase = self.data[self.head]
__UpperCamelCase = self.head + 1
return ret
def _lowerCamelCase ( self : Union[str, Any] ):
return self.tail - self.head
def _lowerCamelCase ( self : Optional[int] ):
print(self.data )
print('**************' )
print(self.data[self.head : self.tail] )
class snake_case :
"""simple docstring"""
def __init__( self : str , __A : Any ):
__UpperCamelCase = data
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = 1
def _lowerCamelCase ( self : str ):
return self.data
def _lowerCamelCase ( self : Optional[int] ):
return self.left
def _lowerCamelCase ( self : Tuple ):
return self.right
def _lowerCamelCase ( self : str ):
return self.height
def _lowerCamelCase ( self : int , __A : Any ):
__UpperCamelCase = data
def _lowerCamelCase ( self : Optional[Any] , __A : MyNode | None ):
__UpperCamelCase = node
def _lowerCamelCase ( self : Tuple , __A : MyNode | None ):
__UpperCamelCase = node
def _lowerCamelCase ( self : List[str] , __A : int ):
__UpperCamelCase = height
def lowercase__ ( __lowercase : MyNode | None ) -> int:
"""simple docstring"""
if node is None:
return 0
return node.get_height()
def lowercase__ ( __lowercase : int , __lowercase : int ) -> int:
"""simple docstring"""
if a > b:
return a
return b
def lowercase__ ( __lowercase : MyNode ) -> MyNode:
"""simple docstring"""
print('left rotation node:' , node.get_data() )
__UpperCamelCase = node.get_left()
assert ret is not None
node.set_left(ret.get_right() )
ret.set_right(__lowercase )
__UpperCamelCase = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(__lowercase )
__UpperCamelCase = my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1
ret.set_height(__lowercase )
return ret
def lowercase__ ( __lowercase : MyNode ) -> MyNode:
"""simple docstring"""
print('right rotation node:' , node.get_data() )
__UpperCamelCase = node.get_right()
assert ret is not None
node.set_right(ret.get_left() )
ret.set_left(__lowercase )
__UpperCamelCase = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(__lowercase )
__UpperCamelCase = my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1
ret.set_height(__lowercase )
return ret
def lowercase__ ( __lowercase : MyNode ) -> MyNode:
"""simple docstring"""
__UpperCamelCase = node.get_left()
assert left_child is not None
node.set_left(left_rotation(__lowercase ) )
return right_rotation(__lowercase )
def lowercase__ ( __lowercase : MyNode ) -> MyNode:
"""simple docstring"""
__UpperCamelCase = node.get_right()
assert right_child is not None
node.set_right(right_rotation(__lowercase ) )
return left_rotation(__lowercase )
def lowercase__ ( __lowercase : MyNode | None , __lowercase : Any ) -> MyNode | None:
"""simple docstring"""
if node is None:
return MyNode(__lowercase )
if data < node.get_data():
node.set_left(insert_node(node.get_left() , __lowercase ) )
if (
get_height(node.get_left() ) - get_height(node.get_right() ) == 2
): # an unbalance detected
__UpperCamelCase = node.get_left()
assert left_child is not None
if (
data < left_child.get_data()
): # new node is the left child of the left child
__UpperCamelCase = right_rotation(__lowercase )
else:
__UpperCamelCase = lr_rotation(__lowercase )
else:
node.set_right(insert_node(node.get_right() , __lowercase ) )
if get_height(node.get_right() ) - get_height(node.get_left() ) == 2:
__UpperCamelCase = node.get_right()
assert right_child is not None
if data < right_child.get_data():
__UpperCamelCase = rl_rotation(__lowercase )
else:
__UpperCamelCase = left_rotation(__lowercase )
__UpperCamelCase = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(__lowercase )
return node
def lowercase__ ( __lowercase : MyNode ) -> Any:
"""simple docstring"""
while True:
__UpperCamelCase = root.get_right()
if right_child is None:
break
__UpperCamelCase = right_child
return root.get_data()
def lowercase__ ( __lowercase : MyNode ) -> Any:
"""simple docstring"""
while True:
__UpperCamelCase = root.get_left()
if left_child is None:
break
__UpperCamelCase = left_child
return root.get_data()
def lowercase__ ( __lowercase : MyNode , __lowercase : Any ) -> MyNode | None:
"""simple docstring"""
__UpperCamelCase = root.get_left()
__UpperCamelCase = root.get_right()
if root.get_data() == data:
if left_child is not None and right_child is not None:
__UpperCamelCase = get_left_most(__lowercase )
root.set_data(__lowercase )
root.set_right(del_node(__lowercase , __lowercase ) )
elif left_child is not None:
__UpperCamelCase = left_child
elif right_child is not None:
__UpperCamelCase = right_child
else:
return None
elif root.get_data() > data:
if left_child is None:
print('No such data' )
return root
else:
root.set_left(del_node(__lowercase , __lowercase ) )
else: # root.get_data() < data
if right_child is None:
return root
else:
root.set_right(del_node(__lowercase , __lowercase ) )
if get_height(__lowercase ) - get_height(__lowercase ) == 2:
assert right_child is not None
if get_height(right_child.get_right() ) > get_height(right_child.get_left() ):
__UpperCamelCase = left_rotation(__lowercase )
else:
__UpperCamelCase = rl_rotation(__lowercase )
elif get_height(__lowercase ) - get_height(__lowercase ) == -2:
assert left_child is not None
if get_height(left_child.get_left() ) > get_height(left_child.get_right() ):
__UpperCamelCase = right_rotation(__lowercase )
else:
__UpperCamelCase = lr_rotation(__lowercase )
__UpperCamelCase = my_max(get_height(root.get_right() ) , get_height(root.get_left() ) ) + 1
root.set_height(__lowercase )
return root
class snake_case :
"""simple docstring"""
def __init__( self : Optional[Any] ):
__UpperCamelCase = None
def _lowerCamelCase ( self : List[Any] ):
return get_height(self.root )
def _lowerCamelCase ( self : Dict , __A : Any ):
print('insert:' + str(__A ) )
__UpperCamelCase = insert_node(self.root , __A )
def _lowerCamelCase ( self : Any , __A : Any ):
print('delete:' + str(__A ) )
if self.root is None:
print('Tree is empty!' )
return
__UpperCamelCase = del_node(self.root , __A )
def __str__( self : Any , ): # a level traversale, gives a more intuitive look on the tree
__UpperCamelCase = ''
__UpperCamelCase = MyQueue()
q.push(self.root )
__UpperCamelCase = self.get_height()
if layer == 0:
return output
__UpperCamelCase = 0
while not q.is_empty():
__UpperCamelCase = q.pop()
__UpperCamelCase = ' ' * int(math.pow(2 , layer - 1 ) )
output += space
if node is None:
output += "*"
q.push(__A )
q.push(__A )
else:
output += str(node.get_data() )
q.push(node.get_left() )
q.push(node.get_right() )
output += space
__UpperCamelCase = cnt + 1
for i in range(1_0_0 ):
if cnt == math.pow(2 , __A ) - 1:
__UpperCamelCase = layer - 1
if layer == 0:
output += "\n*************************************"
return output
output += "\n"
break
output += "\n*************************************"
return output
def lowercase__ ( ) -> None:
"""simple docstring"""
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
a__ : Optional[int] =AVLtree()
a__ : List[str] =list(range(10))
random.shuffle(lst)
for i in lst:
t.insert(i)
print(str(t))
random.shuffle(lst)
for i in lst:
t.del_node(i)
print(str(t))
| 399
| 0
|
"""simple docstring"""
import os
import sys
import unittest
a_ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
a_ = os.path.join("tests", "models", "bert", "test_modeling_bert.py")
a_ = os.path.join("tests", "models", "blip", "test_modeling_blip.py")
class snake_case ( unittest.TestCase):
def a_ ( self : str ) -> List[str]:
'''simple docstring'''
_A = get_test_to_tester_mapping(a__ )
_A = get_test_to_tester_mapping(a__ )
_A = {"BertModelTest": "BertModelTester"}
_A = {
"BlipModelTest": "BlipModelTester",
"BlipTextImageModelTest": "BlipTextImageModelsModelTester",
"BlipTextModelTest": "BlipTextModelTester",
"BlipTextRetrievalModelTest": "BlipTextRetrievalModelTester",
"BlipVQAModelTest": "BlipVQAModelTester",
"BlipVisionModelTest": "BlipVisionModelTester",
}
self.assertEqual(get_test_info.to_json(a__ ) , a__ )
self.assertEqual(get_test_info.to_json(a__ ) , a__ )
def a_ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
_A = get_model_to_test_mapping(a__ )
_A = get_model_to_test_mapping(a__ )
_A = {
"BertForMaskedLM": ["BertModelTest"],
"BertForMultipleChoice": ["BertModelTest"],
"BertForNextSentencePrediction": ["BertModelTest"],
"BertForPreTraining": ["BertModelTest"],
"BertForQuestionAnswering": ["BertModelTest"],
"BertForSequenceClassification": ["BertModelTest"],
"BertForTokenClassification": ["BertModelTest"],
"BertLMHeadModel": ["BertModelTest"],
"BertModel": ["BertModelTest"],
}
_A = {
"BlipForConditionalGeneration": ["BlipTextImageModelTest"],
"BlipForImageTextRetrieval": ["BlipTextRetrievalModelTest"],
"BlipForQuestionAnswering": ["BlipVQAModelTest"],
"BlipModel": ["BlipModelTest"],
"BlipTextModel": ["BlipTextModelTest"],
"BlipVisionModel": ["BlipVisionModelTest"],
}
self.assertEqual(get_test_info.to_json(a__ ) , a__ )
self.assertEqual(get_test_info.to_json(a__ ) , a__ )
def a_ ( self : str ) -> Tuple:
'''simple docstring'''
_A = get_model_to_tester_mapping(a__ )
_A = get_model_to_tester_mapping(a__ )
_A = {
"BertForMaskedLM": ["BertModelTester"],
"BertForMultipleChoice": ["BertModelTester"],
"BertForNextSentencePrediction": ["BertModelTester"],
"BertForPreTraining": ["BertModelTester"],
"BertForQuestionAnswering": ["BertModelTester"],
"BertForSequenceClassification": ["BertModelTester"],
"BertForTokenClassification": ["BertModelTester"],
"BertLMHeadModel": ["BertModelTester"],
"BertModel": ["BertModelTester"],
}
_A = {
"BlipForConditionalGeneration": ["BlipTextImageModelsModelTester"],
"BlipForImageTextRetrieval": ["BlipTextRetrievalModelTester"],
"BlipForQuestionAnswering": ["BlipVQAModelTester"],
"BlipModel": ["BlipModelTester"],
"BlipTextModel": ["BlipTextModelTester"],
"BlipVisionModel": ["BlipVisionModelTester"],
}
self.assertEqual(get_test_info.to_json(a__ ) , a__ )
self.assertEqual(get_test_info.to_json(a__ ) , a__ )
| 717
|
"""simple docstring"""
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def a__ ( __lowercase ) -> Optional[int]:
_A = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"_float_tensor",
"decoder.output_projection.weight",
]
for k in ignore_keys:
state_dict.pop(__lowercase , __lowercase )
def a__ ( __lowercase ) -> List[Any]:
_A , _A = emb.weight.shape
_A = nn.Linear(__lowercase , __lowercase , bias=__lowercase )
_A = emb.weight.data
return lin_layer
def a__ ( __lowercase , __lowercase="facebook/mbart-large-en-ro" , __lowercase=False , __lowercase=False ) -> List[str]:
_A = torch.load(__lowercase , map_location="cpu" )["model"]
remove_ignore_keys_(__lowercase )
_A = state_dict["encoder.embed_tokens.weight"].shape[0]
_A = MBartConfig.from_pretrained(__lowercase , vocab_size=__lowercase )
if mbart_aa and finetuned:
_A = "relu"
_A = state_dict["decoder.embed_tokens.weight"]
_A = MBartForConditionalGeneration(__lowercase )
model.model.load_state_dict(__lowercase )
if finetuned:
_A = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"fairseq_path", type=str, help="bart.large, bart.large.cnn or a path to a model.pt on local filesystem."
)
parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--hf_config",
default="facebook/mbart-large-cc25",
type=str,
help="Which huggingface architecture to use: mbart-large",
)
parser.add_argument("--mbart_50", action="store_true", help="whether the model is mMART-50 checkpoint")
parser.add_argument("--finetuned", action="store_true", help="whether the model is a fine-tuned checkpoint")
a_ = parser.parse_args()
a_ = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path)
| 621
| 0
|
from __future__ import annotations
from scipy.special import comb # type: ignore
class snake_case__:
"""simple docstring"""
def __init__( self : Dict , SCREAMING_SNAKE_CASE : List[Any] ):
lowercase__ : Optional[int] = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
lowercase__ : Tuple = len(snake_case__ ) - 1
def snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE : Dict ):
assert 0 <= t <= 1, "Time t must be between 0 and 1."
lowercase__ : list[float] = []
for i in range(len(self.list_of_points ) ):
# basis function for each i
output_values.append(
comb(self.degree , snake_case__ ) * ((1 - t) ** (self.degree - i)) * (t**i) )
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(snake_case__ ) , 5 ) == 1
return output_values
def snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Any ):
assert 0 <= t <= 1, "Time t must be between 0 and 1."
lowercase__ : int = self.basis_function(snake_case__ )
lowercase__ : List[Any] = 0.0
lowercase__ : Union[str, Any] = 0.0
for i in range(len(self.list_of_points ) ):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def snake_case ( self : int , SCREAMING_SNAKE_CASE : int = 0.01 ):
from matplotlib import pyplot as plt # type: ignore
lowercase__ : list[float] = [] # x coordinates of points to plot
lowercase__ : list[float] = [] # y coordinates of points to plot
lowercase__ : List[str] = 0.0
while t <= 1:
lowercase__ : int = self.bezier_curve_function(snake_case__ )
to_plot_x.append(value[0] )
to_plot_y.append(value[1] )
t += step_size
lowercase__ : Dict = [i[0] for i in self.list_of_points]
lowercase__ : Union[str, Any] = [i[1] for i in self.list_of_points]
plt.plot(
snake_case__ , snake_case__ , color="blue" , label="Curve of Degree " + str(self.degree ) , )
plt.scatter(snake_case__ , snake_case__ , color="red" , label="Control Points" )
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
| 496
|
from __future__ import annotations
from scipy.special import comb # type: ignore
class lowerCAmelCase_ :
def __init__( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Optional[int] = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
SCREAMING_SNAKE_CASE_ : Tuple = len(snake_case__ ) - 1
def snake_case ( self ,snake_case__ ):
assert 0 <= t <= 1, "Time t must be between 0 and 1."
SCREAMING_SNAKE_CASE_ : list[float] = []
for i in range(len(self.list_of_points ) ):
# basis function for each i
output_values.append(
comb(self.degree ,snake_case__ ) * ((1 - t) ** (self.degree - i)) * (t**i) )
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(snake_case__ ) ,5 ) == 1
return output_values
def snake_case ( self ,snake_case__ ):
assert 0 <= t <= 1, "Time t must be between 0 and 1."
SCREAMING_SNAKE_CASE_ : int = self.basis_function(snake_case__ )
SCREAMING_SNAKE_CASE_ : List[Any] = 0.0
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 0.0
for i in range(len(self.list_of_points ) ):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def snake_case ( self ,snake_case__ = 0.01 ):
from matplotlib import pyplot as plt # type: ignore
SCREAMING_SNAKE_CASE_ : list[float] = [] # x coordinates of points to plot
SCREAMING_SNAKE_CASE_ : list[float] = [] # y coordinates of points to plot
SCREAMING_SNAKE_CASE_ : List[str] = 0.0
while t <= 1:
SCREAMING_SNAKE_CASE_ : int = self.bezier_curve_function(snake_case__ )
to_plot_x.append(value[0] )
to_plot_y.append(value[1] )
t += step_size
SCREAMING_SNAKE_CASE_ : Dict = [i[0] for i in self.list_of_points]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [i[1] for i in self.list_of_points]
plt.plot(
snake_case__ ,snake_case__ ,color='blue' ,label='Curve of Degree ' + str(self.degree ) ,)
plt.scatter(snake_case__ ,snake_case__ ,color='red' ,label='Control Points' )
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
| 105
| 0
|
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class _SCREAMING_SNAKE_CASE ( unittest.TestCase):
def _snake_case ( self )-> List[str]:
lowerCamelCase_ =torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
lowerCamelCase_ =get_activation("""gelu""" )
self.assertTrue(torch.allclose(gelu_python(_SCREAMING_SNAKE_CASE ) , torch_builtin(_SCREAMING_SNAKE_CASE ) ) )
self.assertFalse(torch.allclose(gelu_python(_SCREAMING_SNAKE_CASE ) , gelu_new(_SCREAMING_SNAKE_CASE ) ) )
def _snake_case ( self )-> int:
lowerCamelCase_ =torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
lowerCamelCase_ =get_activation("""gelu""" )
lowerCamelCase_ =get_activation("""gelu_10""" )
lowerCamelCase_ =torch_builtin(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =geluaa(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =torch.where(y_gelu_aa < 1_0.0 , 1 , 0 )
self.assertTrue(torch.max(_SCREAMING_SNAKE_CASE ).item() == 1_0.0 )
self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) )
def _snake_case ( self )-> Dict:
get_activation("""gelu""" )
get_activation("""gelu_10""" )
get_activation("""gelu_fast""" )
get_activation("""gelu_new""" )
get_activation("""gelu_python""" )
get_activation("""gelu_pytorch_tanh""" )
get_activation("""linear""" )
get_activation("""mish""" )
get_activation("""quick_gelu""" )
get_activation("""relu""" )
get_activation("""sigmoid""" )
get_activation("""silu""" )
get_activation("""swish""" )
get_activation("""tanh""" )
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
get_activation("""bogus""" )
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
get_activation(_SCREAMING_SNAKE_CASE )
def _snake_case ( self )-> Any:
lowerCamelCase_ =get_activation("""gelu""" )
lowerCamelCase_ =1
lowerCamelCase_ =get_activation("""gelu""" )
self.assertEqual(acta.a , 1 )
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
lowerCamelCase_ =acta.a
| 75
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
__A : List[str] = 'Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine'
def __UpperCamelCase ( ) ->List[str]:
"""simple docstring"""
lowerCamelCase_ =_ask_options(
"""In which compute environment are you running?""" , ["""This machine""", """AWS (Amazon SageMaker)"""] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
lowerCamelCase_ =get_sagemaker_input()
else:
lowerCamelCase_ =get_cluster_input()
return config
def __UpperCamelCase ( _A : List[str]=None ) ->str:
"""simple docstring"""
if subparsers is not None:
lowerCamelCase_ =subparsers.add_parser("""config""" , description=_A )
else:
lowerCamelCase_ =argparse.ArgumentParser("""Accelerate config command""" , description=_A )
parser.add_argument(
"""--config_file""" , default=_A , help=(
"""The path to use to store the config file. Will default to a file named default_config.yaml in the cache """
"""location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have """
"""such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed """
"""with 'huggingface'."""
) , )
if subparsers is not None:
parser.set_defaults(func=_A )
return parser
def __UpperCamelCase ( _A : Union[str, Any] ) ->Optional[int]:
"""simple docstring"""
lowerCamelCase_ =get_user_input()
if args.config_file is not None:
lowerCamelCase_ =args.config_file
else:
if not os.path.isdir(_A ):
os.makedirs(_A )
lowerCamelCase_ =default_yaml_config_file
if config_file.endswith(""".json""" ):
config.to_json_file(_A )
else:
config.to_yaml_file(_A )
print(f'accelerate configuration saved at {config_file}' )
def __UpperCamelCase ( ) ->Dict:
"""simple docstring"""
lowerCamelCase_ =config_command_parser()
lowerCamelCase_ =parser.parse_args()
config_command(_A )
if __name__ == "__main__":
main()
| 75
| 1
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase__ : List[Any] =logging.get_logger(__name__)
lowerCAmelCase__ : Optional[int] ='▁'
lowerCAmelCase__ : Union[str, Any] ={'vocab_file': 'sentencepiece.bpe.model'}
lowerCAmelCase__ : List[str] ={
'vocab_file': {
'facebook/mbart-large-en-ro': (
'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model'
),
'facebook/mbart-large-cc25': (
'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model'
),
}
}
lowerCAmelCase__ : Dict ={
'facebook/mbart-large-en-ro': 10_24,
'facebook/mbart-large-cc25': 10_24,
}
# fmt: off
lowerCAmelCase__ : Dict =['ar_AR', 'cs_CZ', 'de_DE', 'en_XX', 'es_XX', 'et_EE', 'fi_FI', 'fr_XX', 'gu_IN', 'hi_IN', 'it_IT', 'ja_XX', 'kk_KZ', 'ko_KR', 'lt_LT', 'lv_LV', 'my_MM', 'ne_NP', 'nl_XX', 'ro_RO', 'ru_RU', 'si_LK', 'tr_TR', 'vi_VN', 'zh_CN']
class __lowercase (_lowerCamelCase ):
"""simple docstring"""
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = ["""input_ids""", """attention_mask"""]
_UpperCAmelCase = []
_UpperCAmelCase = []
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__="<s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__="<s>" , lowerCAmelCase__="<unk>" , lowerCAmelCase__="<pad>" , lowerCAmelCase__="<mask>" , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__ = None , lowerCAmelCase__=None , **lowerCAmelCase__ , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else mask_token
SCREAMING_SNAKE_CASE_ : Any = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , src_lang=lowerCAmelCase__ , tgt_lang=lowerCAmelCase__ , additional_special_tokens=lowerCAmelCase__ , sp_model_kwargs=self.sp_model_kwargs , **lowerCAmelCase__ , )
SCREAMING_SNAKE_CASE_ : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowerCAmelCase__ ) )
SCREAMING_SNAKE_CASE_ : int = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
SCREAMING_SNAKE_CASE_ : str = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 1
SCREAMING_SNAKE_CASE_ : Union[str, Any] = len(self.sp_model )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(lowerCAmelCase__ )
}
SCREAMING_SNAKE_CASE_ : Tuple = {v: k for k, v in self.lang_code_to_id.items()}
SCREAMING_SNAKE_CASE_ : List[Any] = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
SCREAMING_SNAKE_CASE_ : Tuple = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
SCREAMING_SNAKE_CASE_ : List[str] = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
SCREAMING_SNAKE_CASE_ : Tuple = src_lang if src_lang is not None else 'en_XX'
SCREAMING_SNAKE_CASE_ : List[Any] = self.lang_code_to_id[self._src_lang]
SCREAMING_SNAKE_CASE_ : List[Any] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = self.__dict__.copy()
SCREAMING_SNAKE_CASE_ : Optional[int] = None
SCREAMING_SNAKE_CASE_ : List[str] = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
SCREAMING_SNAKE_CASE_ : Dict = {}
SCREAMING_SNAKE_CASE_ : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self._src_lang
@src_lang.setter
def UpperCamelCase__ ( self , lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase__ , token_ids_a=lowerCAmelCase__ , already_has_special_tokens=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : str = [1] * len(self.prefix_tokens )
SCREAMING_SNAKE_CASE_ : Tuple = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(lowerCAmelCase__ )) + suffix_ones
return prefix_ones + ([0] * len(lowerCAmelCase__ )) + ([0] * len(lowerCAmelCase__ )) + suffix_ones
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ):
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ):
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
SCREAMING_SNAKE_CASE_ : Any = src_lang
SCREAMING_SNAKE_CASE_ : Any = self(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.convert_tokens_to_ids(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : List[str] = tgt_lang_id
return inputs
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = {self.convert_ids_to_tokens(lowerCAmelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCamelCase__ ( self , lowerCAmelCase__ ):
"""simple docstring"""
return self.sp_model.encode(lowerCAmelCase__ , out_type=lowerCAmelCase__ )
def UpperCamelCase__ ( self , lowerCAmelCase__ ):
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.sp_model.PieceToId(lowerCAmelCase__ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def UpperCamelCase__ ( self , lowerCAmelCase__ ):
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def UpperCamelCase__ ( self , lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = ''.join(lowerCAmelCase__ ).replace(lowerCAmelCase__ , ' ' ).strip()
return out_string
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ):
"""simple docstring"""
if not os.path.isdir(lowerCAmelCase__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
SCREAMING_SNAKE_CASE_ : int = os.path.join(
lowerCAmelCase__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCAmelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCAmelCase__ , 'wb' ) as fi:
SCREAMING_SNAKE_CASE_ : Any = self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase__ )
return (out_vocab_file,)
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = "en_XX" , lowerCAmelCase__ = None , lowerCAmelCase__ = "ro_RO" , **lowerCAmelCase__ , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = src_lang
SCREAMING_SNAKE_CASE_ : int = tgt_lang
return super().prepare_seqaseq_batch(lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self.set_src_lang_special_tokens(self.src_lang )
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def UpperCamelCase__ ( self , lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.lang_code_to_id[src_lang]
SCREAMING_SNAKE_CASE_ : Dict = []
SCREAMING_SNAKE_CASE_ : int = [self.eos_token_id, self.cur_lang_code]
def UpperCamelCase__ ( self , lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = self.lang_code_to_id[lang]
SCREAMING_SNAKE_CASE_ : Tuple = []
SCREAMING_SNAKE_CASE_ : Dict = [self.eos_token_id, self.cur_lang_code]
| 101
|
'''simple docstring'''
import argparse
import collections
import os
import re
import tempfile
import pandas as pd
from datasets import Dataset
from huggingface_hub import hf_hub_download, upload_folder
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/update_metadata.py
_UpperCamelCase : Optional[Any] = 'src/transformers'
# This is to make sure the transformers module imported is the one in the repo.
_UpperCamelCase : Optional[int] = direct_transformers_import(TRANSFORMERS_PATH)
# Regexes that match TF/Flax/PT model names.
_UpperCamelCase : Union[str, Any] = re.compile(R'TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
_UpperCamelCase : int = re.compile(R'Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
_UpperCamelCase : List[str] = re.compile(R'(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# Fill this with tuples (pipeline_tag, model_mapping, auto_model)
_UpperCamelCase : List[Any] = [
('pretraining', 'MODEL_FOR_PRETRAINING_MAPPING_NAMES', 'AutoModelForPreTraining'),
('feature-extraction', 'MODEL_MAPPING_NAMES', 'AutoModel'),
('audio-classification', 'MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForAudioClassification'),
('text-generation', 'MODEL_FOR_CAUSAL_LM_MAPPING_NAMES', 'AutoModelForCausalLM'),
('automatic-speech-recognition', 'MODEL_FOR_CTC_MAPPING_NAMES', 'AutoModelForCTC'),
('image-classification', 'MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForImageClassification'),
('image-segmentation', 'MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES', 'AutoModelForImageSegmentation'),
('fill-mask', 'MODEL_FOR_MASKED_LM_MAPPING_NAMES', 'AutoModelForMaskedLM'),
('object-detection', 'MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES', 'AutoModelForObjectDetection'),
(
'zero-shot-object-detection',
'MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES',
'AutoModelForZeroShotObjectDetection',
),
('question-answering', 'MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES', 'AutoModelForQuestionAnswering'),
('text2text-generation', 'MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES', 'AutoModelForSeq2SeqLM'),
('text-classification', 'MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForSequenceClassification'),
('automatic-speech-recognition', 'MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES', 'AutoModelForSpeechSeq2Seq'),
(
'table-question-answering',
'MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES',
'AutoModelForTableQuestionAnswering',
),
('token-classification', 'MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForTokenClassification'),
('multiple-choice', 'MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES', 'AutoModelForMultipleChoice'),
(
'next-sentence-prediction',
'MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES',
'AutoModelForNextSentencePrediction',
),
(
'audio-frame-classification',
'MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES',
'AutoModelForAudioFrameClassification',
),
('audio-xvector', 'MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES', 'AutoModelForAudioXVector'),
(
'document-question-answering',
'MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES',
'AutoModelForDocumentQuestionAnswering',
),
(
'visual-question-answering',
'MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES',
'AutoModelForVisualQuestionAnswering',
),
('image-to-text', 'MODEL_FOR_FOR_VISION_2_SEQ_MAPPING_NAMES', 'AutoModelForVision2Seq'),
(
'zero-shot-image-classification',
'MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES',
'AutoModelForZeroShotImageClassification',
),
('depth-estimation', 'MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES', 'AutoModelForDepthEstimation'),
('video-classification', 'MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForVideoClassification'),
('mask-generation', 'MODEL_FOR_MASK_GENERATION_MAPPING_NAMES', 'AutoModelForMaskGeneration'),
]
def __snake_case ( lowerCAmelCase : int ):
__UpperCAmelCase = re.finditer('.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)' , lowerCAmelCase )
return [m.group(0 ) for m in matches]
def __snake_case ( ):
__UpperCAmelCase = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
__UpperCAmelCase = {
config.replace('Config' , '' ): model_type for model_type, config in config_maping_names.items()
}
# Dictionaries flagging if each model prefix has a backend in PT/TF/Flax.
__UpperCAmelCase = collections.defaultdict(lowerCAmelCase )
__UpperCAmelCase = collections.defaultdict(lowerCAmelCase )
__UpperCAmelCase = collections.defaultdict(lowerCAmelCase )
# Let's lookup through all transformers object (once) and find if models are supported by a given backend.
for attr_name in dir(lowerCAmelCase ):
__UpperCAmelCase = None
if _re_tf_models.match(lowerCAmelCase ) is not None:
__UpperCAmelCase = tf_models
__UpperCAmelCase = _re_tf_models.match(lowerCAmelCase ).groups()[0]
elif _re_flax_models.match(lowerCAmelCase ) is not None:
__UpperCAmelCase = flax_models
__UpperCAmelCase = _re_flax_models.match(lowerCAmelCase ).groups()[0]
elif _re_pt_models.match(lowerCAmelCase ) is not None:
__UpperCAmelCase = pt_models
__UpperCAmelCase = _re_pt_models.match(lowerCAmelCase ).groups()[0]
if lookup_dict is not None:
while len(lowerCAmelCase ) > 0:
if attr_name in model_prefix_to_model_type:
__UpperCAmelCase = True
break
# Try again after removing the last word in the name
__UpperCAmelCase = ''.join(camel_case_split(lowerCAmelCase )[:-1] )
__UpperCAmelCase = set(list(pt_models.keys() ) + list(tf_models.keys() ) + list(flax_models.keys() ) )
__UpperCAmelCase = list(lowerCAmelCase )
all_models.sort()
__UpperCAmelCase = {'model_type': all_models}
__UpperCAmelCase = [pt_models[t] for t in all_models]
__UpperCAmelCase = [tf_models[t] for t in all_models]
__UpperCAmelCase = [flax_models[t] for t in all_models]
# Now let's use the auto-mapping names to make sure
__UpperCAmelCase = {}
for t in all_models:
if t in transformers_module.models.auto.processing_auto.PROCESSOR_MAPPING_NAMES:
__UpperCAmelCase = 'AutoProcessor'
elif t in transformers_module.models.auto.tokenization_auto.TOKENIZER_MAPPING_NAMES:
__UpperCAmelCase = 'AutoTokenizer'
elif t in transformers_module.models.auto.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES:
__UpperCAmelCase = 'AutoFeatureExtractor'
else:
# Default to AutoTokenizer if a model has nothing, for backward compatibility.
__UpperCAmelCase = 'AutoTokenizer'
__UpperCAmelCase = [processors[t] for t in all_models]
return pd.DataFrame(lowerCAmelCase )
def __snake_case ( lowerCAmelCase : Any ):
__UpperCAmelCase = [
transformers_module.models.auto.modeling_auto,
transformers_module.models.auto.modeling_tf_auto,
transformers_module.models.auto.modeling_flax_auto,
]
for pipeline_tag, model_mapping, auto_class in PIPELINE_TAGS_AND_AUTO_MODELS:
__UpperCAmelCase = [model_mapping, F"""TF_{model_mapping}""", F"""FLAX_{model_mapping}"""]
__UpperCAmelCase = [auto_class, F"""TF_{auto_class}""", F"""Flax_{auto_class}"""]
# Loop through all three frameworks
for module, cls, mapping in zip(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
# The type of pipeline may not exist in this framework
if not hasattr(lowerCAmelCase , lowerCAmelCase ):
continue
# First extract all model_names
__UpperCAmelCase = []
for name in getattr(lowerCAmelCase , lowerCAmelCase ).values():
if isinstance(lowerCAmelCase , lowerCAmelCase ):
model_names.append(lowerCAmelCase )
else:
model_names.extend(list(lowerCAmelCase ) )
# Add pipeline tag and auto model class for those models
table.update({model_name: (pipeline_tag, cls) for model_name in model_names} )
return table
def __snake_case ( lowerCAmelCase : str , lowerCAmelCase : int ):
__UpperCAmelCase = get_frameworks_table()
__UpperCAmelCase = Dataset.from_pandas(lowerCAmelCase )
__UpperCAmelCase = hf_hub_download(
'huggingface/transformers-metadata' , 'pipeline_tags.json' , repo_type='dataset' , token=lowerCAmelCase )
__UpperCAmelCase = Dataset.from_json(lowerCAmelCase )
__UpperCAmelCase = {
tags_dataset[i]['model_class']: (tags_dataset[i]['pipeline_tag'], tags_dataset[i]['auto_class'])
for i in range(len(lowerCAmelCase ) )
}
__UpperCAmelCase = update_pipeline_and_auto_class_table(lowerCAmelCase )
# Sort the model classes to avoid some nondeterministic updates to create false update commits.
__UpperCAmelCase = sorted(table.keys() )
__UpperCAmelCase = pd.DataFrame(
{
'model_class': model_classes,
'pipeline_tag': [table[m][0] for m in model_classes],
'auto_class': [table[m][1] for m in model_classes],
} )
__UpperCAmelCase = Dataset.from_pandas(lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
frameworks_dataset.to_json(os.path.join(lowerCAmelCase , 'frameworks.json' ) )
tags_dataset.to_json(os.path.join(lowerCAmelCase , 'pipeline_tags.json' ) )
if commit_sha is not None:
__UpperCAmelCase = (
F"""Update with commit {commit_sha}\n\nSee: """
F"""https://github.com/huggingface/transformers/commit/{commit_sha}"""
)
else:
__UpperCAmelCase = 'Update'
upload_folder(
repo_id='huggingface/transformers-metadata' , folder_path=lowerCAmelCase , repo_type='dataset' , token=lowerCAmelCase , commit_message=lowerCAmelCase , )
def __snake_case ( ):
__UpperCAmelCase = {tag: cls for tag, _, cls in PIPELINE_TAGS_AND_AUTO_MODELS}
__UpperCAmelCase = transformers_module.pipelines.SUPPORTED_TASKS
__UpperCAmelCase = []
for key in pipeline_tasks:
if key not in in_table:
__UpperCAmelCase = pipeline_tasks[key]['pt']
if isinstance(lowerCAmelCase , (list, tuple) ):
__UpperCAmelCase = model[0]
__UpperCAmelCase = model.__name__
if model not in in_table.values():
missing.append(lowerCAmelCase )
if len(lowerCAmelCase ) > 0:
__UpperCAmelCase = ', '.join(lowerCAmelCase )
raise ValueError(
'The following pipeline tags are not present in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant inside '
F"""`utils/update_metadata.py`: {msg}. Please add them!""" )
if __name__ == "__main__":
_UpperCamelCase : Tuple = argparse.ArgumentParser()
parser.add_argument('--token', type=str, help='The token to use to push to the transformers-metadata dataset.')
parser.add_argument('--commit_sha', type=str, help='The sha of the commit going with this update.')
parser.add_argument('--check-only', action='store_true', help='Activate to just check all pipelines are present.')
_UpperCamelCase : Optional[Any] = parser.parse_args()
if args.check_only:
check_pipeline_tags()
else:
update_metadata(args.token, args.commit_sha)
| 396
| 0
|
"""simple docstring"""
import logging
import os
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
from filelock import FileLock
from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available
UpperCamelCase : Any = logging.getLogger(__name__)
@dataclass
class lowerCamelCase__ :
lowerCAmelCase = 42
lowerCAmelCase = 42
lowerCAmelCase = 42
@dataclass
class lowerCamelCase__ :
lowerCAmelCase = 42
lowerCAmelCase = 42
lowerCAmelCase = None
lowerCAmelCase = None
class lowerCamelCase__ ( UpperCAmelCase_ ):
lowerCAmelCase = """train"""
lowerCAmelCase = """dev"""
lowerCAmelCase = """test"""
class lowerCamelCase__ :
@staticmethod
def __a ( _lowercase : List[Any] , _lowercase : Union[Split, str] ):
raise NotImplementedError
@staticmethod
def __a ( _lowercase : str ):
raise NotImplementedError
@staticmethod
def __a ( _lowercase : List[InputExample] , _lowercase : List[str] , _lowercase : int , _lowercase : PreTrainedTokenizer , _lowercase : List[str]=False , _lowercase : Optional[int]="[CLS]" , _lowercase : Any=1 , _lowercase : Optional[int]="[SEP]" , _lowercase : str=False , _lowercase : Optional[Any]=False , _lowercase : int=0 , _lowercase : List[str]=0 , _lowercase : Tuple=-100 , _lowercase : str=0 , _lowercase : Any=True , ):
A = {label: i for i, label in enumerate(_lowercase )}
A = []
for ex_index, example in enumerate(_lowercase ):
if ex_index % 10_000 == 0:
logger.info('Writing example %d of %d' , _lowercase , len(_lowercase ) )
A = []
A = []
for word, label in zip(example.words , example.labels ):
A = tokenizer.tokenize(_lowercase )
# bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space.
if len(_lowercase ) > 0:
tokens.extend(_lowercase )
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(_lowercase ) - 1) )
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
A = tokenizer.num_special_tokens_to_add()
if len(_lowercase ) > max_seq_length - special_tokens_count:
A = tokens[: (max_seq_length - special_tokens_count)]
A = label_ids[: (max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens += [sep_token]
label_ids += [pad_token_label_id]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
label_ids += [pad_token_label_id]
A = [sequence_a_segment_id] * len(_lowercase )
if cls_token_at_end:
tokens += [cls_token]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
else:
A = [cls_token] + tokens
A = [pad_token_label_id] + label_ids
A = [cls_token_segment_id] + segment_ids
A = tokenizer.convert_tokens_to_ids(_lowercase )
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
A = [1 if mask_padding_with_zero else 0] * len(_lowercase )
# Zero-pad up to the sequence length.
A = max_seq_length - len(_lowercase )
if pad_on_left:
A = ([pad_token] * padding_length) + input_ids
A = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
A = ([pad_token_segment_id] * padding_length) + segment_ids
A = ([pad_token_label_id] * padding_length) + label_ids
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
label_ids += [pad_token_label_id] * padding_length
assert len(_lowercase ) == max_seq_length
assert len(_lowercase ) == max_seq_length
assert len(_lowercase ) == max_seq_length
assert len(_lowercase ) == max_seq_length
if ex_index < 5:
logger.info('*** Example ***' )
logger.info('guid: %s' , example.guid )
logger.info('tokens: %s' , ' '.join([str(_lowercase ) for x in tokens] ) )
logger.info('input_ids: %s' , ' '.join([str(_lowercase ) for x in input_ids] ) )
logger.info('input_mask: %s' , ' '.join([str(_lowercase ) for x in input_mask] ) )
logger.info('segment_ids: %s' , ' '.join([str(_lowercase ) for x in segment_ids] ) )
logger.info('label_ids: %s' , ' '.join([str(_lowercase ) for x in label_ids] ) )
if "token_type_ids" not in tokenizer.model_input_names:
A = None
features.append(
InputFeatures(
input_ids=_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , label_ids=_lowercase ) )
return features
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
class lowerCamelCase__ ( UpperCAmelCase_ ):
lowerCAmelCase = 42
lowerCAmelCase = nn.CrossEntropyLoss().ignore_index
def __init__( self : Dict , _lowercase : TokenClassificationTask , _lowercase : str , _lowercase : PreTrainedTokenizer , _lowercase : List[str] , _lowercase : str , _lowercase : Optional[int] = None , _lowercase : Tuple=False , _lowercase : Split = Split.train , ):
# Load data features from cache or dataset file
A = os.path.join(
_lowercase , 'cached_{}_{}_{}'.format(mode.value , tokenizer.__class__.__name__ , str(_lowercase ) ) , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
A = cached_features_file + '.lock'
with FileLock(_lowercase ):
if os.path.exists(_lowercase ) and not overwrite_cache:
logger.info(f'Loading features from cached file {cached_features_file}' )
A = torch.load(_lowercase )
else:
logger.info(f'Creating features from dataset file at {data_dir}' )
A = token_classification_task.read_examples_from_file(_lowercase , _lowercase )
# TODO clean up all this to leverage built-in features of tokenizers
A = token_classification_task.convert_examples_to_features(
_lowercase , _lowercase , _lowercase , _lowercase , cls_token_at_end=bool(model_type in ['xlnet'] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ['xlnet'] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=_lowercase , pad_on_left=bool(tokenizer.padding_side == 'left' ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info(f'Saving features into cached file {cached_features_file}' )
torch.save(self.features , _lowercase )
def __len__( self : Optional[Any] ):
return len(self.features )
def __getitem__( self : str , _lowercase : Optional[Any] ):
return self.features[i]
if is_tf_available():
import tensorflow as tf
class lowerCamelCase__ :
lowerCAmelCase = 42
lowerCAmelCase = -100
def __init__( self : Any , _lowercase : TokenClassificationTask , _lowercase : str , _lowercase : PreTrainedTokenizer , _lowercase : List[str] , _lowercase : str , _lowercase : Optional[int] = None , _lowercase : Optional[int]=False , _lowercase : Split = Split.train , ):
A = token_classification_task.read_examples_from_file(_lowercase , _lowercase )
# TODO clean up all this to leverage built-in features of tokenizers
A = token_classification_task.convert_examples_to_features(
_lowercase , _lowercase , _lowercase , _lowercase , cls_token_at_end=bool(model_type in ['xlnet'] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ['xlnet'] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=_lowercase , pad_on_left=bool(tokenizer.padding_side == 'left' ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
def gen():
for ex in self.features:
if ex.token_type_ids is None:
yield (
{"input_ids": ex.input_ids, "attention_mask": ex.attention_mask},
ex.label_ids,
)
else:
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label_ids,
)
if "token_type_ids" not in tokenizer.model_input_names:
A = tf.data.Dataset.from_generator(
_lowercase , ({'input_ids': tf.intaa, 'attention_mask': tf.intaa}, tf.intaa) , (
{'input_ids': tf.TensorShape([None] ), 'attention_mask': tf.TensorShape([None] )},
tf.TensorShape([None] ),
) , )
else:
A = tf.data.Dataset.from_generator(
_lowercase , ({'input_ids': tf.intaa, 'attention_mask': tf.intaa, 'token_type_ids': tf.intaa}, tf.intaa) , (
{
'input_ids': tf.TensorShape([None] ),
'attention_mask': tf.TensorShape([None] ),
'token_type_ids': tf.TensorShape([None] ),
},
tf.TensorShape([None] ),
) , )
def __a ( self : Tuple ):
A = self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features ) ) )
return self.dataset
def __len__( self : Optional[Any] ):
return len(self.features )
def __getitem__( self : Union[str, Any] , _lowercase : Optional[Any] ):
return self.features[i]
| 91
|
"""simple docstring"""
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("""TEST_SAGEMAKER""" , """False""" ) ) is not True , reason="""Skipping test because should only be run when releasing minor transformers version""" , )
@pytest.mark.usefixtures("""sm_env""" )
@parameterized_class(
[
{
"""framework""": """pytorch""",
"""script""": """run_glue.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.p3.16xlarge""",
"""results""": {"""train_runtime""": 650, """eval_accuracy""": 0.7, """eval_loss""": 0.6},
},
{
"""framework""": """pytorch""",
"""script""": """run_ddp.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.p3.16xlarge""",
"""results""": {"""train_runtime""": 600, """eval_accuracy""": 0.7, """eval_loss""": 0.6},
},
{
"""framework""": """tensorflow""",
"""script""": """run_tf_dist.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.p3.16xlarge""",
"""results""": {"""train_runtime""": 600, """eval_accuracy""": 0.6, """eval_loss""": 0.7},
},
] )
class lowerCamelCase__ ( unittest.TestCase ):
def __a ( self : int ):
if self.framework == "pytorch":
subprocess.run(
f'cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'.split() , encoding='utf-8' , check=_lowercase , )
assert hasattr(self , 'env' )
def __a ( self : Optional[int] , _lowercase : int ):
A = f'{self.env.base_job_name}-{instance_count}-{"ddp" if "ddp" in self.script else "smd"}'
# distributed data settings
A = {'smdistributed': {'dataparallel': {'enabled': True}}} if self.script != 'run_ddp.py' else None
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=_lowercase , instance_count=_lowercase , instance_type=self.instance_type , debugger_hook_config=_lowercase , hyperparameters={**self.env.distributed_hyperparameters, 'model_name_or_path': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=_lowercase , py_version='py36' , )
def __a ( self : Tuple , _lowercase : List[str] ):
TrainingJobAnalytics(_lowercase ).export_csv(f'{self.env.test_path}/{job_name}_metrics.csv' )
@parameterized.expand([(2,)] )
def __a ( self : List[Any] , _lowercase : Union[str, Any] ):
# create estimator
A = self.create_estimator(_lowercase )
# run training
estimator.fit()
# result dataframe
A = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
A = list(result_metrics_df[result_metrics_df.metric_name == 'eval_accuracy']['value'] )
A = list(result_metrics_df[result_metrics_df.metric_name == 'eval_loss']['value'] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
A = (
Session().describe_training_job(estimator.latest_training_job.name ).get('TrainingTimeInSeconds' , 999_999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['eval_accuracy'] for t in eval_accuracy )
assert all(t <= self.results['eval_loss'] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f'{estimator.latest_training_job.name}.json' , 'w' ) as outfile:
json.dump({'train_time': train_runtime, 'eval_accuracy': eval_accuracy, 'eval_loss': eval_loss} , _lowercase )
| 91
| 1
|
def UpperCamelCase ( __magic_name__ : int , __magic_name__ : int ) -> int:
"""simple docstring"""
lowercase__ = 1 # To kept the Calculated Value
# Since C(n, k) = C(n, n-k)
if k > (n - k):
lowercase__ = n - k
# Calculate C(n,k)
for i in range(__magic_name__ ):
result *= n - i
result //= i + 1
return result
def UpperCamelCase ( __magic_name__ : int ) -> int:
"""simple docstring"""
return binomial_coefficient(2 * node_count , __magic_name__ ) // (node_count + 1)
def UpperCamelCase ( __magic_name__ : int ) -> int:
"""simple docstring"""
if n < 0:
raise ValueError("""factorial() not defined for negative values""" )
lowercase__ = 1
for i in range(1 , n + 1 ):
result *= i
return result
def UpperCamelCase ( __magic_name__ : int ) -> int:
"""simple docstring"""
return catalan_number(__magic_name__ ) * factorial(__magic_name__ )
if __name__ == "__main__":
A : Tuple = int(input('Enter the number of nodes: ').strip() or 0)
if node_count <= 0:
raise ValueError('We need some nodes to work with.')
print(
F'Given {node_count} nodes, there are {binary_tree_count(node_count)} '
F'binary trees and {catalan_number(node_count)} binary search trees.'
)
| 15
|
"""simple docstring"""
from maths.prime_check import is_prime
def _snake_case ( snake_case__ : int ):
if not isinstance(snake_case__ , snake_case__ ):
A = F'Input value of [number={number}] must be an integer'
raise TypeError(snake_case__ )
if is_prime(snake_case__ ) and is_prime(number + 2 ):
return number + 2
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 91
| 0
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class a__ ( UpperCamelCase__ ):
__magic_name__ : List[Any] = "facebook/bart-large-mnli"
__magic_name__ : Union[str, Any] = (
"This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which "
"should be the text to classify, and `labels`, which should be the list of labels to use for classification. "
"It returns the most likely label in the list of provided `labels` for the input text."
)
__magic_name__ : Optional[int] = "text_classifier"
__magic_name__ : Optional[int] = AutoTokenizer
__magic_name__ : List[str] = AutoModelForSequenceClassification
__magic_name__ : Optional[int] = ["text", ["text"]]
__magic_name__ : List[Any] = ["text"]
def lowercase__ (self : Union[str, Any] ) -> str:
"""simple docstring"""
super().setup()
SCREAMING_SNAKE_CASE : Any = self.model.config
SCREAMING_SNAKE_CASE : str = -1
for idx, label in config.idalabel.items():
if label.lower().startswith('''entail''' ):
SCREAMING_SNAKE_CASE : int = int(__A )
if self.entailment_id == -1:
raise ValueError('''Could not determine the entailment ID from the model config, please pass it at init.''' )
def lowercase__ (self : Union[str, Any], __UpperCAmelCase : Optional[int], __UpperCAmelCase : int ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = labels
return self.pre_processor(
[text] * len(__A ), [F'''This example is {label}''' for label in labels], return_tensors='''pt''', padding='''max_length''', )
def lowercase__ (self : Tuple, __UpperCAmelCase : Optional[int] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = outputs.logits
SCREAMING_SNAKE_CASE : str = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 715
|
'''simple docstring'''
import heapq
import sys
import numpy as np
snake_case_ = tuple[int, int]
class a__ :
def __init__(self : int ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = []
SCREAMING_SNAKE_CASE : Tuple = set()
def lowercase__ (self : Any ) -> Dict:
"""simple docstring"""
if not self.empty():
return self.elements[0][0]
else:
return float('''inf''' )
def lowercase__ (self : Dict ) -> Union[str, Any]:
"""simple docstring"""
return len(self.elements ) == 0
def lowercase__ (self : Dict, __UpperCAmelCase : int, __UpperCAmelCase : List[Any] ) -> Tuple:
"""simple docstring"""
if item not in self.set:
heapq.heappush(self.elements, (priority, item) )
self.set.add(__UpperCAmelCase )
else:
# update
# print("update", item)
SCREAMING_SNAKE_CASE : List[Any] = []
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) : Any = heapq.heappop(self.elements )
while x != item:
temp.append((pri, x) )
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) : Any = heapq.heappop(self.elements )
temp.append((priority, item) )
for pro, xxx in temp:
heapq.heappush(self.elements, (pro, xxx) )
def lowercase__ (self : Union[str, Any], __UpperCAmelCase : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
if item in self.set:
self.set.remove(__UpperCAmelCase )
SCREAMING_SNAKE_CASE : Optional[int] = []
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) : Any = heapq.heappop(self.elements )
while x != item:
temp.append((pro, x) )
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) : Optional[Any] = heapq.heappop(self.elements )
for prito, yyy in temp:
heapq.heappush(self.elements, (prito, yyy) )
def lowercase__ (self : Tuple ) -> Dict:
"""simple docstring"""
return self.elements[0][1]
def lowercase__ (self : Optional[Any] ) -> Tuple:
"""simple docstring"""
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) : Optional[int] = heapq.heappop(self.elements )
self.set.remove(__UpperCAmelCase )
return (priority, item)
def __lowercase (_SCREAMING_SNAKE_CASE :TPos , _SCREAMING_SNAKE_CASE :TPos ):
# euclidean distance
SCREAMING_SNAKE_CASE : str = np.array(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : List[Any] = np.array(_SCREAMING_SNAKE_CASE )
return np.linalg.norm(a - b )
def __lowercase (_SCREAMING_SNAKE_CASE :TPos , _SCREAMING_SNAKE_CASE :TPos ):
# integer division by time variable
return consistent_heuristic(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) // t
def __lowercase (_SCREAMING_SNAKE_CASE :TPos , _SCREAMING_SNAKE_CASE :TPos ):
# manhattan distance
return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] )
def __lowercase (_SCREAMING_SNAKE_CASE :TPos , _SCREAMING_SNAKE_CASE :int , _SCREAMING_SNAKE_CASE :TPos , _SCREAMING_SNAKE_CASE :dict[TPos, float] ):
SCREAMING_SNAKE_CASE : List[str] = g_function[start] + Wa * heuristics[i](_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return ans
def __lowercase (_SCREAMING_SNAKE_CASE :Optional[Any] , _SCREAMING_SNAKE_CASE :List[str] , _SCREAMING_SNAKE_CASE :Tuple ):
SCREAMING_SNAKE_CASE : Optional[int] = np.chararray((n, n) )
for i in range(_SCREAMING_SNAKE_CASE ):
for j in range(_SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE : Tuple = '''*'''
for i in range(_SCREAMING_SNAKE_CASE ):
for j in range(_SCREAMING_SNAKE_CASE ):
if (j, (n - 1) - i) in blocks:
SCREAMING_SNAKE_CASE : Tuple = '''#'''
SCREAMING_SNAKE_CASE : List[Any] = '''-'''
SCREAMING_SNAKE_CASE : Any = back_pointer[goal]
while x != start:
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) : Optional[int] = x
# print(x)
SCREAMING_SNAKE_CASE : int = '''-'''
SCREAMING_SNAKE_CASE : Dict = back_pointer[x]
SCREAMING_SNAKE_CASE : int = '''-'''
for i in range(_SCREAMING_SNAKE_CASE ):
for j in range(_SCREAMING_SNAKE_CASE ):
if (i, j) == (0, n - 1):
print(grid[i][j] , end=''' ''' )
print('''<-- End position''' , end=''' ''' )
else:
print(grid[i][j] , end=''' ''' )
print()
print('''^''' )
print('''Start position''' )
print()
print('''# is an obstacle''' )
print('''- is the path taken by algorithm''' )
print('''PATH TAKEN BY THE ALGORITHM IS:-''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = back_pointer[goal]
while x != start:
print(_SCREAMING_SNAKE_CASE , end=''' ''' )
SCREAMING_SNAKE_CASE : Optional[int] = back_pointer[x]
print(_SCREAMING_SNAKE_CASE )
sys.exit()
def __lowercase (_SCREAMING_SNAKE_CASE :TPos ):
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def __lowercase (_SCREAMING_SNAKE_CASE :Union[str, Any] , _SCREAMING_SNAKE_CASE :int , _SCREAMING_SNAKE_CASE :List[str] , _SCREAMING_SNAKE_CASE :List[str] , _SCREAMING_SNAKE_CASE :List[str] , _SCREAMING_SNAKE_CASE :Union[str, Any] , _SCREAMING_SNAKE_CASE :Tuple , _SCREAMING_SNAKE_CASE :Optional[int] , ):
for itera in range(_SCREAMING_SNAKE_CASE ):
open_list[itera].remove_element(_SCREAMING_SNAKE_CASE )
# print("s", s)
# print("j", j)
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) : Tuple = s
SCREAMING_SNAKE_CASE : List[Any] = (x - 1, y)
SCREAMING_SNAKE_CASE : Optional[Any] = (x + 1, y)
SCREAMING_SNAKE_CASE : List[Any] = (x, y + 1)
SCREAMING_SNAKE_CASE : Any = (x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(_SCREAMING_SNAKE_CASE ) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Optional[int] = -1
SCREAMING_SNAKE_CASE : List[str] = float('''inf''' )
if valid(_SCREAMING_SNAKE_CASE ) and g_function[neighbours] > g_function[s] + 1:
SCREAMING_SNAKE_CASE : List[str] = g_function[s] + 1
SCREAMING_SNAKE_CASE : Optional[Any] = s
if neighbours not in close_list_anchor:
open_list[0].put(_SCREAMING_SNAKE_CASE , key(_SCREAMING_SNAKE_CASE , 0 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
if neighbours not in close_list_inad:
for var in range(1 , _SCREAMING_SNAKE_CASE ):
if key(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) <= Wa * key(
_SCREAMING_SNAKE_CASE , 0 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
open_list[j].put(
_SCREAMING_SNAKE_CASE , key(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
def __lowercase ():
SCREAMING_SNAKE_CASE : Optional[int] = []
for x in range(1 , 5 ):
for y in range(1 , 6 ):
some_list.append((x, y) )
for x in range(15 , 20 ):
some_list.append((x, 17) )
for x in range(10 , 19 ):
for y in range(1 , 15 ):
some_list.append((x, y) )
# L block
for x in range(1 , 4 ):
for y in range(12 , 19 ):
some_list.append((x, y) )
for x in range(3 , 13 ):
for y in range(16 , 19 ):
some_list.append((x, y) )
return some_list
snake_case_ = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
snake_case_ = [
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(10, 1),
(11, 1),
(12, 1),
(13, 1),
(14, 1),
(15, 1),
(16, 1),
(17, 1),
(18, 1),
(19, 1),
]
snake_case_ = make_common_ground()
snake_case_ = blocks_blk
# hyper parameters
snake_case_ = 1
snake_case_ = 1
snake_case_ = 20
snake_case_ = 3 # one consistent and two other inconsistent
# start and end destination
snake_case_ = (0, 0)
snake_case_ = (n - 1, n - 1)
snake_case_ = 1
def __lowercase (_SCREAMING_SNAKE_CASE :TPos , _SCREAMING_SNAKE_CASE :TPos , _SCREAMING_SNAKE_CASE :int ):
SCREAMING_SNAKE_CASE : Any = {start: 0, goal: float('''inf''' )}
SCREAMING_SNAKE_CASE : Tuple = {start: -1, goal: -1}
SCREAMING_SNAKE_CASE : Tuple = []
SCREAMING_SNAKE_CASE : Union[str, Any] = set()
for i in range(_SCREAMING_SNAKE_CASE ):
open_list.append(PriorityQueue() )
open_list[i].put(_SCREAMING_SNAKE_CASE , key(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
SCREAMING_SNAKE_CASE : list[int] = []
SCREAMING_SNAKE_CASE : list[int] = []
while open_list[0].minkey() < float('''inf''' ):
for i in range(1 , _SCREAMING_SNAKE_CASE ):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float('''inf''' ):
do_something(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = open_list[i].top_show()
visited.add(_SCREAMING_SNAKE_CASE )
expand_state(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , )
close_list_inad.append(_SCREAMING_SNAKE_CASE )
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float('''inf''' ):
do_something(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
SCREAMING_SNAKE_CASE : List[Any] = open_list[0].top_show()
visited.add(_SCREAMING_SNAKE_CASE )
expand_state(
_SCREAMING_SNAKE_CASE , 0 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , )
close_list_anchor.append(_SCREAMING_SNAKE_CASE )
print('''No path found to goal''' )
print()
for i in range(n - 1 , -1 , -1 ):
for j in range(_SCREAMING_SNAKE_CASE ):
if (j, i) in blocks:
print('''#''' , end=''' ''' )
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print('''*''' , end=''' ''' )
else:
print('''-''' , end=''' ''' )
else:
print('''*''' , end=''' ''' )
if (j, i) == (n - 1, n - 1):
print('''<-- End position''' , end=''' ''' )
print()
print('''^''' )
print('''Start position''' )
print()
print('''# is an obstacle''' )
print('''- is the path taken by algorithm''' )
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic)
| 355
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.